diff options
| author | Linus Torvalds <torvalds@athlon.transmeta.com> | 2002-02-04 20:03:43 -0800 |
|---|---|---|
| committer | Linus Torvalds <torvalds@athlon.transmeta.com> | 2002-02-04 20:03:43 -0800 |
| commit | 70d68bd32041d22febb277038641d55c6ac7b57a (patch) | |
| tree | 6288fe675e36fc874ed284519f2d1d7a8e4c789e /include | |
| parent | 48ad999d6fe95727a27b9ec82e522398d05cd928 (diff) | |
v2.4.7.3 -> v2.4.7.4
- David Mosberger: IA64 update
- Geert Uytterhoeven: cleanup, new atyfb
- Marcelo Tosatti: zone aging fixes
- me, others: limit IO requests sanely
Diffstat (limited to 'include')
34 files changed, 533 insertions, 315 deletions
diff --git a/include/asm-i386/softirq.h b/include/asm-i386/softirq.h index 75f742e89e2e..4bf8d607b906 100644 --- a/include/asm-i386/softirq.h +++ b/include/asm-i386/softirq.h @@ -25,9 +25,7 @@ #define local_bh_enable() \ do { \ unsigned int *ptr = &local_bh_count(smp_processor_id()); \ - unsigned long flags; \ \ - __save_flags(flags); \ barrier(); \ if (!--*ptr) \ __asm__ __volatile__ ( \ diff --git a/include/asm-ia64/a.out.h b/include/asm-ia64/a.out.h index 7cc0a00ce0a6..25de011bc7f4 100644 --- a/include/asm-ia64/a.out.h +++ b/include/asm-ia64/a.out.h @@ -32,7 +32,7 @@ struct exec { #ifdef __KERNEL__ # include <asm/page.h> # define STACK_TOP (0x8000000000000000UL + (1UL << (4*PAGE_SHIFT - 12)) - PAGE_SIZE) -# define IA64_RBS_BOT (STACK_TOP - 0x80000000L) /* bottom of register backing store */ +# define IA64_RBS_BOT (STACK_TOP - 0x80000000L + PAGE_SIZE) /* bottom of reg. backing store */ #endif #endif /* _ASM_IA64_A_OUT_H */ diff --git a/include/asm-ia64/acpi-ext.h b/include/asm-ia64/acpi-ext.h index 775934cbdfb8..b11c49e9d415 100644 --- a/include/asm-ia64/acpi-ext.h +++ b/include/asm-ia64/acpi-ext.h @@ -5,12 +5,12 @@ * Advanced Configuration and Power Infterface * Based on 'ACPI Specification 1.0b' Febryary 2, 1999 * and 'IA-64 Extensions to the ACPI Specification' Rev 0.6 - * + * * Copyright (C) 1999 VA Linux Systems * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> * Copyright (C) 2000 Intel Corp. * Copyright (C) 2000 J.I. Lee <jung-ik.lee@intel.com> - * ACPI 2.0 specification + * ACPI 2.0 specification */ #include <linux/types.h> @@ -147,6 +147,9 @@ typedef struct { u32 flags; } acpi_madt_t; +/* acpi 2.0 MADT flags */ +#define MADT_PCAT_COMPAT (1<<0) + /* acpi 2.0 MADT structure types */ #define ACPI20_ENTRY_LOCAL_APIC 0 #define ACPI20_ENTRY_IO_APIC 1 diff --git a/include/asm-ia64/acpikcfg.h b/include/asm-ia64/acpikcfg.h index a1d35aa89a19..d79cbbe4db0a 100644 --- a/include/asm-ia64/acpikcfg.h +++ b/include/asm-ia64/acpikcfg.h @@ -1,6 +1,6 @@ -#include <linux/config.h> +#ifndef _ASM_IA64_ACPIKCFG_H +#define _ASM_IA64_ACPIKCFG_H -#ifdef CONFIG_ACPI_KERNEL_CONFIG /* * acpikcfg.h - ACPI based Kernel Configuration Manager External Interfaces * @@ -26,4 +26,5 @@ acpi_cf_print_pci_vectors ( int num_pci_vectors ); #endif -#endif /* CONFIG_ACPI_KERNEL_CONFIG */ + +#endif /* _ASM_IA64_ACPIKCFG_H */ diff --git a/include/asm-ia64/bitops.h b/include/asm-ia64/bitops.h index 5c5884635aff..6499193f5d82 100644 --- a/include/asm-ia64/bitops.h +++ b/include/asm-ia64/bitops.h @@ -2,24 +2,29 @@ #define _ASM_IA64_BITOPS_H /* - * Copyright (C) 1998-2000 Hewlett-Packard Co - * Copyright (C) 1998-2000 David Mosberger-Tang <davidm@hpl.hp.com> - * - * 02/04/00 D. Mosberger Require 64-bit alignment for bitops, per suggestion from davem + * Copyright (C) 1998-2001 Hewlett-Packard Co + * Copyright (C) 1998-2001 David Mosberger-Tang <davidm@hpl.hp.com> */ #include <asm/system.h> -/* - * These operations need to be atomic. The address must be (at least) - * 32-bit aligned. Note that there are driver (e.g., eepro100) which - * use these operations to operate on hw-defined data-structures, so - * we can't easily change these operations to force a bigger - * alignment. +/** + * set_bit - Atomically set a bit in memory + * @nr: the bit to set + * @addr: the address to start counting from + * + * This function is atomic and may not be reordered. See __set_bit() + * if you do not require the atomic guarantees. + * Note that @nr may be almost arbitrarily large; this function is not + * restricted to acting on a single-word quantity. + * + * The address must be (at least) "long" aligned. + * Note that there are driver (e.g., eepro100) which use these operations to operate on + * hw-defined data-structures, so we can't easily change these operations to force a + * bigger alignment. * * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). */ - static __inline__ void set_bit (int nr, volatile void *addr) { @@ -36,11 +41,37 @@ set_bit (int nr, volatile void *addr) } while (cmpxchg_acq(m, old, new) != old); } +/** + * __set_bit - Set a bit in memory + * @nr: the bit to set + * @addr: the address to start counting from + * + * Unlike set_bit(), this function is non-atomic and may be reordered. + * If it's called on the same region of memory simultaneously, the effect + * may be that only one operation succeeds. + */ +static __inline__ void +__set_bit (int nr, volatile void *addr) +{ + *((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31)); +} + /* * clear_bit() doesn't provide any barrier for the compiler. */ #define smp_mb__before_clear_bit() smp_mb() #define smp_mb__after_clear_bit() smp_mb() + +/** + * clear_bit - Clears a bit in memory + * @nr: Bit to clear + * @addr: Address to start counting from + * + * clear_bit() is atomic and may not be reordered. However, it does + * not contain a memory barrier, so if it is used for locking purposes, + * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() + * in order to ensure changes are visible on other processors. + */ static __inline__ void clear_bit (int nr, volatile void *addr) { @@ -57,6 +88,15 @@ clear_bit (int nr, volatile void *addr) } while (cmpxchg_acq(m, old, new) != old); } +/** + * change_bit - Toggle a bit in memory + * @nr: Bit to clear + * @addr: Address to start counting from + * + * change_bit() is atomic and may not be reordered. + * Note that @nr may be almost arbitrarily large; this function is not + * restricted to acting on a single-word quantity. + */ static __inline__ void change_bit (int nr, volatile void *addr) { @@ -73,6 +113,29 @@ change_bit (int nr, volatile void *addr) } while (cmpxchg_acq(m, old, new) != old); } +/** + * __change_bit - Toggle a bit in memory + * @nr: the bit to set + * @addr: the address to start counting from + * + * Unlike change_bit(), this function is non-atomic and may be reordered. + * If it's called on the same region of memory simultaneously, the effect + * may be that only one operation succeeds. + */ +static __inline__ void +__change_bit (int nr, volatile void *addr) +{ + *((__u32 *) addr + (nr >> 5)) ^= (1 << (nr & 31)); +} + +/** + * test_and_set_bit - Set a bit and return its old value + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is atomic and cannot be reordered. + * It also implies a memory barrier. + */ static __inline__ int test_and_set_bit (int nr, volatile void *addr) { @@ -90,6 +153,34 @@ test_and_set_bit (int nr, volatile void *addr) return (old & bit) != 0; } +/** + * __test_and_set_bit - Set a bit and return its old value + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is non-atomic and can be reordered. + * If two examples of this operation race, one can appear to succeed + * but actually fail. You must protect multiple accesses with a lock. + */ +static __inline__ int +__test_and_set_bit (int nr, volatile void *addr) +{ + __u32 *p = (__u32 *) addr + (nr >> 5); + __u32 m = 1 << (nr & 31); + int oldbitset = (*p & m) != 0; + + *p |= m; + return oldbitset; +} + +/** + * test_and_clear_bit - Clear a bit and return its old value + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is atomic and cannot be reordered. + * It also implies a memory barrier. + */ static __inline__ int test_and_clear_bit (int nr, volatile void *addr) { @@ -107,6 +198,34 @@ test_and_clear_bit (int nr, volatile void *addr) return (old & ~mask) != 0; } +/** + * __test_and_clear_bit - Clear a bit and return its old value + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is non-atomic and can be reordered. + * If two examples of this operation race, one can appear to succeed + * but actually fail. You must protect multiple accesses with a lock. + */ +static __inline__ int +__test_and_clear_bit(int nr, volatile void * addr) +{ + __u32 *p = (__u32 *) addr + (nr >> 5); + __u32 m = 1 << (nr & 31); + int oldbitset = *p & m; + + *p &= ~m; + return oldbitset; +} + +/** + * test_and_change_bit - Change a bit and return its new value + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is atomic and cannot be reordered. + * It also implies a memory barrier. + */ static __inline__ int test_and_change_bit (int nr, volatile void *addr) { @@ -124,15 +243,33 @@ test_and_change_bit (int nr, volatile void *addr) return (old & bit) != 0; } +/* + * WARNING: non atomic version. + */ +static __inline__ int +__test_and_change_bit (int nr, void *addr) +{ + __u32 old, bit = (1 << (nr & 31)); + __u32 *m = (__u32 *) addr + (nr >> 5); + + old = *m; + *m = old ^ bit; + return (old & bit) != 0; +} + static __inline__ int test_bit (int nr, volatile void *addr) { return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31)); } -/* - * ffz = Find First Zero in word. Undefined if no zero exists, - * so code should check against ~0UL first.. +/** + * ffz - find the first zero bit in a memory region + * @x: The address to start the search at + * + * Returns the bit-number (0..63) of the first (least significant) zero bit, not + * the number of the byte containing a bit. Undefined if no zero exists, so + * code should check against ~0UL first... */ static inline unsigned long ffz (unsigned long x) @@ -146,8 +283,8 @@ ffz (unsigned long x) #ifdef __KERNEL__ /* - * Find the most significant bit that is set (undefined if no bit is - * set). + * find_last_zero_bit - find the last zero bit in a 64 bit quantity + * @x: The value to search */ static inline unsigned long ia64_fls (unsigned long x) @@ -160,9 +297,10 @@ ia64_fls (unsigned long x) } /* - * ffs: find first bit set. This is defined the same way as - * the libc and compiler builtin ffs routines, therefore - * differs in spirit from the above ffz (man ffs). + * ffs: find first bit set. This is defined the same way as the libc and compiler builtin + * ffs routines, therefore differs in spirit from the above ffz (man ffs): it operates on + * "int" values only and the result value is the bit number + 1. ffs(0) is defined to + * return zero. */ #define ffs(x) __builtin_ffs(x) diff --git a/include/asm-ia64/efi.h b/include/asm-ia64/efi.h index 70128e907b90..3157aaa68c01 100644 --- a/include/asm-ia64/efi.h +++ b/include/asm-ia64/efi.h @@ -15,6 +15,7 @@ #include <linux/string.h> #include <linux/time.h> #include <linux/types.h> +#include <linux/proc_fs.h> #include <asm/page.h> #include <asm/system.h> @@ -237,7 +238,7 @@ extern void efi_map_pal_code (void); extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg); extern void efi_gettimeofday (struct timeval *tv); extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if possible */ - +extern u64 efi_get_iobase (void); /* * Variable Attributes @@ -246,4 +247,12 @@ extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if pos #define EFI_VARIABLE_BOOTSERVICE_ACCESS 0x0000000000000002 #define EFI_VARIABLE_RUNTIME_ACCESS 0x0000000000000004 + +/* + * efi_dir is allocated in arch/ia64/kernel/efi.c. + */ +#ifdef CONFIG_PROC_FS +extern struct proc_dir_entry *efi_dir; +#endif + #endif /* _ASM_IA64_EFI_H */ diff --git a/include/asm-ia64/fpswa.h b/include/asm-ia64/fpswa.h index bbf8afcfdd44..8986f033c815 100644 --- a/include/asm-ia64/fpswa.h +++ b/include/asm-ia64/fpswa.h @@ -9,10 +9,6 @@ * Copyright (C) 1999 Goutham Rao <goutham.rao@intel.com> */ -#if 1 -#define FPSWA_BUG -#endif - typedef struct { /* 4 * 128 bits */ unsigned long fp_lp[4*2]; diff --git a/include/asm-ia64/hardirq.h b/include/asm-ia64/hardirq.h index 4befe78e0f0b..0328bd3f0d06 100644 --- a/include/asm-ia64/hardirq.h +++ b/include/asm-ia64/hardirq.h @@ -16,15 +16,15 @@ /* * No irq_cpustat_t for IA-64. The data is held in the per-CPU data structure. */ -#define softirq_active(cpu) (cpu_data[cpu].softirq.active) -#define softirq_mask(cpu) (cpu_data[cpu].softirq.mask) -#define irq_count(cpu) (cpu_data[cpu].irq_stat.f.irq_count) -#define bh_count(cpu) (cpu_data[cpu].irq_stat.f.bh_count) +#define softirq_pending(cpu) (cpu_data(cpu)->softirq_pending) +#define ksoftirqd_task(cpu) (cpu_data(cpu)->ksoftirqd) +#define irq_count(cpu) (cpu_data(cpu)->irq_stat.f.irq_count) +#define bh_count(cpu) (cpu_data(cpu)->irq_stat.f.bh_count) #define syscall_count(cpu) /* unused on IA-64 */ #define nmi_count(cpu) 0 -#define local_softirq_active() (local_cpu_data->softirq.active) -#define local_softirq_mask() (local_cpu_data->softirq.mask) +#define local_softirq_pending() (local_cpu_data->softirq_pending) +#define local_ksoftirqd_task() (local_cpu_data->ksoftirqd) #define local_irq_count() (local_cpu_data->irq_stat.f.irq_count) #define local_bh_count() (local_cpu_data->irq_stat.f.bh_count) #define local_syscall_count() /* unused on IA-64 */ diff --git a/include/asm-ia64/hw_irq.h b/include/asm-ia64/hw_irq.h index c0162769842c..75c82bc8abf5 100644 --- a/include/asm-ia64/hw_irq.h +++ b/include/asm-ia64/hw_irq.h @@ -49,6 +49,7 @@ typedef u8 ia64_vector; #define IA64_PERFMON_VECTOR 0xee /* performanc monitor interrupt vector */ #define IA64_TIMER_VECTOR 0xef /* use highest-prio group 15 interrupt for timer */ #define IA64_MCA_WAKEUP_VECTOR 0xf0 /* MCA wakeup (must be >MCA_RENDEZ_VECTOR) */ +#define IA64_IPI_RESCHEDULE 0xfd /* SMP reschedule */ #define IA64_IPI_VECTOR 0xfe /* inter-processor interrupt vector */ /* IA64 inter-cpu interrupt related definitions */ @@ -69,7 +70,7 @@ extern __u8 isa_irq_to_vector_map[16]; extern unsigned long ipi_base_addr; -extern struct hw_interrupt_type irq_type_ia64_sapic; /* CPU-internal interrupt controller */ +extern struct hw_interrupt_type irq_type_ia64_lsapic; /* CPU-internal interrupt controller */ extern int ia64_alloc_irq (void); /* allocate a free irq */ extern void ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect); diff --git a/include/asm-ia64/ia32.h b/include/asm-ia64/ia32.h index a0b2c4b78e02..7b2f88f8f80d 100644 --- a/include/asm-ia64/ia32.h +++ b/include/asm-ia64/ia32.h @@ -108,7 +108,7 @@ typedef struct { } sigset32_t; struct sigaction32 { - unsigned int sa_handler; /* Really a pointer, but need to deal + unsigned int sa_handler; /* Really a pointer, but need to deal with 32 bits */ unsigned int sa_flags; unsigned int sa_restorer; /* Another 32 bit pointer */ @@ -118,7 +118,7 @@ struct sigaction32 { typedef unsigned int old_sigset32_t; /* at least 32 bits */ struct old_sigaction32 { - unsigned int sa_handler; /* Really a pointer, but need to deal + unsigned int sa_handler; /* Really a pointer, but need to deal with 32 bits */ old_sigset32_t sa_mask; /* A 32 bit mask */ unsigned int sa_flags; @@ -133,7 +133,7 @@ typedef struct sigaltstack_ia32 { struct ucontext_ia32 { unsigned int uc_flags; - unsigned int uc_link; + unsigned int uc_link; stack_ia32_t uc_stack; struct sigcontext_ia32 uc_mcontext; sigset_t uc_sigmask; /* mask last for extensibility */ @@ -252,6 +252,15 @@ typedef struct siginfo32 { #define ELF_ARCH EM_386 #define IA32_PAGE_OFFSET 0xc0000000 +#define IA32_STACK_TOP ((IA32_PAGE_OFFSET/3) * 2) + +/* + * The system segments (GDT, TSS, LDT) have to be mapped below 4GB so the IA-32 engine can + * access them. + */ +#define IA32_GDT_OFFSET (IA32_PAGE_OFFSET) +#define IA32_TSS_OFFSET (IA32_PAGE_OFFSET + PAGE_SIZE) +#define IA32_LDT_OFFSET (IA32_PAGE_OFFSET + 2*PAGE_SIZE) #define USE_ELF_CORE_DUMP #define ELF_EXEC_PAGESIZE IA32_PAGE_SIZE @@ -287,7 +296,7 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; /* This macro yields a bitmask that programs can use to figure out what instruction set this CPU supports. */ -#define ELF_HWCAP 0 +#define ELF_HWCAP 0 /* This macro yields a string that ld.so will use to load implementation specific libraries for optimization. Not terribly @@ -304,61 +313,64 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; /* * IA-32 ELF specific definitions for IA-64. */ - + #define __USER_CS 0x23 #define __USER_DS 0x2B -#define SEG_LIM 32 -#define SEG_TYPE 52 -#define SEG_SYS 56 -#define SEG_DPL 57 -#define SEG_P 59 -#define SEG_DB 62 -#define SEG_G 63 - #define FIRST_TSS_ENTRY 6 #define FIRST_LDT_ENTRY (FIRST_TSS_ENTRY+1) #define _TSS(n) ((((unsigned long) n)<<4)+(FIRST_TSS_ENTRY<<3)) #define _LDT(n) ((((unsigned long) n)<<4)+(FIRST_LDT_ENTRY<<3)) -#define IA64_SEG_DESCRIPTOR(base, limit, segtype, nonsysseg, dpl, segpresent, segdb, granularity) \ - ((base) | \ - (limit << SEG_LIM) | \ - (segtype << SEG_TYPE) | \ - (nonsysseg << SEG_SYS) | \ - (dpl << SEG_DPL) | \ - (segpresent << SEG_P) | \ - (segdb << SEG_DB) | \ - (granularity << SEG_G)) - -#define IA32_SEG_BASE 16 -#define IA32_SEG_TYPE 40 -#define IA32_SEG_SYS 44 -#define IA32_SEG_DPL 45 -#define IA32_SEG_P 47 -#define IA32_SEG_HIGH_LIMIT 48 -#define IA32_SEG_AVL 52 -#define IA32_SEG_DB 54 -#define IA32_SEG_G 55 -#define IA32_SEG_HIGH_BASE 56 - -#define IA32_SEG_DESCRIPTOR(base, limit, segtype, nonsysseg, dpl, segpresent, avl, segdb, granularity) \ - ((limit & 0xFFFF) | \ - (base & 0xFFFFFF << IA32_SEG_BASE) | \ - (segtype << IA32_SEG_TYPE) | \ - (nonsysseg << IA32_SEG_SYS) | \ - (dpl << IA32_SEG_DPL) | \ - (segpresent << IA32_SEG_P) | \ - (((limit >> 16) & 0xF) << IA32_SEG_HIGH_LIMIT) | \ - (avl << IA32_SEG_AVL) | \ - (segdb << IA32_SEG_DB) | \ - (granularity << IA32_SEG_G) | \ - (((base >> 24) & 0xFF) << IA32_SEG_HIGH_BASE)) - -#define IA32_IOBASE 0x2000000000000000 /* Virtual address for I/O space */ - -#define IA32_CR0 0x80000001 /* Enable PG and PE bits */ -#define IA32_CR4 0 /* No architectural extensions */ +#define IA32_SEG_BASE 16 +#define IA32_SEG_TYPE 40 +#define IA32_SEG_SYS 44 +#define IA32_SEG_DPL 45 +#define IA32_SEG_P 47 +#define IA32_SEG_HIGH_LIMIT 48 +#define IA32_SEG_AVL 52 +#define IA32_SEG_DB 54 +#define IA32_SEG_G 55 +#define IA32_SEG_HIGH_BASE 56 + +#define IA32_SEG_DESCRIPTOR(base, limit, segtype, nonsysseg, dpl, segpresent, avl, segdb, gran) \ + (((limit) & 0xffff) \ + | (((unsigned long) (base) & 0xffffff) << IA32_SEG_BASE) \ + | ((unsigned long) (segtype) << IA32_SEG_TYPE) \ + | ((unsigned long) (nonsysseg) << IA32_SEG_SYS) \ + | ((unsigned long) (dpl) << IA32_SEG_DPL) \ + | ((unsigned long) (segpresent) << IA32_SEG_P) \ + | ((((unsigned long) (limit) >> 16) & 0xf) << IA32_SEG_HIGH_LIMIT) \ + | ((unsigned long) (avl) << IA32_SEG_AVL) \ + | ((unsigned long) (segdb) << IA32_SEG_DB) \ + | ((unsigned long) (gran) << IA32_SEG_G) \ + | ((((unsigned long) (base) >> 24) & 0xff) << IA32_SEG_HIGH_BASE)) + +#define SEG_LIM 32 +#define SEG_TYPE 52 +#define SEG_SYS 56 +#define SEG_DPL 57 +#define SEG_P 59 +#define SEG_AVL 60 +#define SEG_DB 62 +#define SEG_G 63 + +/* Unscramble an IA-32 segment descriptor into the IA-64 format. */ +#define IA32_SEG_UNSCRAMBLE(sd) \ + ( (((sd) >> IA32_SEG_BASE) & 0xffffff) | ((((sd) >> IA32_SEG_HIGH_BASE) & 0xff) << 24) \ + | ((((sd) & 0xffff) | ((((sd) >> IA32_SEG_HIGH_LIMIT) & 0xf) << 16)) << SEG_LIM) \ + | ((((sd) >> IA32_SEG_TYPE) & 0xf) << SEG_TYPE) \ + | ((((sd) >> IA32_SEG_SYS) & 0x1) << SEG_SYS) \ + | ((((sd) >> IA32_SEG_DPL) & 0x3) << SEG_DPL) \ + | ((((sd) >> IA32_SEG_P) & 0x1) << SEG_P) \ + | ((((sd) >> IA32_SEG_AVL) & 0x1) << SEG_AVL) \ + | ((((sd) >> IA32_SEG_DB) & 0x1) << SEG_DB) \ + | ((((sd) >> IA32_SEG_G) & 0x1) << SEG_G)) + +#define IA32_IOBASE 0x2000000000000000 /* Virtual address for I/O space */ + +#define IA32_CR0 0x80000001 /* Enable PG and PE bits */ +#define IA32_CR4 0x600 /* MMXEX and FXSR on */ /* * IA32 floating point control registers starting values @@ -384,6 +396,25 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; regs->r12 = new_sp; \ } while (0) +/* + * Local Descriptor Table (LDT) related declarations. + */ + +#define IA32_LDT_ENTRIES 8192 /* Maximum number of LDT entries supported. */ +#define IA32_LDT_ENTRY_SIZE 8 /* The size of each LDT entry. */ + +struct ia32_modify_ldt_ldt_s { + unsigned int entry_number; + unsigned int base_addr; + unsigned int limit; + unsigned int seg_32bit:1; + unsigned int contents:2; + unsigned int read_exec_only:1; + unsigned int limit_in_pages:1; + unsigned int seg_not_present:1; + unsigned int useable:1; +}; + extern void ia32_gdt_init (void); extern int ia32_setup_frame1 (int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, struct pt_regs *regs); @@ -392,5 +423,5 @@ extern int ia32_setup_arg_pages (struct linux_binprm *bprm); extern int ia32_exception (struct pt_regs *regs, unsigned long isr); #endif /* !CONFIG_IA32_SUPPORT */ - + #endif /* _ASM_IA64_IA32_H */ diff --git a/include/asm-ia64/io.h b/include/asm-ia64/io.h index 480b95590a66..ae5b7781a746 100644 --- a/include/asm-ia64/io.h +++ b/include/asm-ia64/io.h @@ -333,7 +333,7 @@ __writeq (unsigned long val, void *addr) #define readb(a) __readb((void *)(a)) #define readw(a) __readw((void *)(a)) #define readl(a) __readl((void *)(a)) -#define readq(a) __readqq((void *)(a)) +#define readq(a) __readq((void *)(a)) #define __raw_readb readb #define __raw_readw readw #define __raw_readl readl diff --git a/include/asm-ia64/iosapic.h b/include/asm-ia64/iosapic.h index 90ab8aacfca4..f3a199013c17 100644 --- a/include/asm-ia64/iosapic.h +++ b/include/asm-ia64/iosapic.h @@ -51,7 +51,8 @@ #ifndef __ASSEMBLY__ -extern void __init iosapic_init (unsigned long address, unsigned int base_irq); +extern void __init iosapic_init (unsigned long address, unsigned int base_irq, + int pcat_compat); extern void iosapic_register_legacy_irq (unsigned long irq, unsigned long pin, unsigned long polarity, unsigned long trigger); extern void iosapic_pci_fixup (int); diff --git a/include/asm-ia64/mca_asm.h b/include/asm-ia64/mca_asm.h index bade650e4445..b1d32e556c97 100644 --- a/include/asm-ia64/mca_asm.h +++ b/include/asm-ia64/mca_asm.h @@ -1,5 +1,5 @@ /* - * File: mca_asm.h + * File: mca_asm.h * * Copyright (C) 1999 Silicon Graphics, Inc. * Copyright (C) Vijay Chander (vijay@engr.sgi.com) @@ -16,23 +16,23 @@ #define PSR_RT 27 #define PSR_IT 36 #define PSR_BN 44 - + /* * This macro converts a instruction virtual address to a physical address * Right now for simulation purposes the virtual addresses are * direct mapped to physical addresses. - * 1. Lop off bits 61 thru 63 in the virtual address + * 1. Lop off bits 61 thru 63 in the virtual address */ #define INST_VA_TO_PA(addr) \ - dep addr = 0, addr, 61, 3; + dep addr = 0, addr, 61, 3; /* * This macro converts a data virtual address to a physical address * Right now for simulation purposes the virtual addresses are * direct mapped to physical addresses. - * 1. Lop off bits 61 thru 63 in the virtual address + * 1. Lop off bits 61 thru 63 in the virtual address */ #define DATA_VA_TO_PA(addr) \ - dep addr = 0, addr, 61, 3; + dep addr = 0, addr, 61, 3; /* * This macro converts a data physical address to a virtual address * Right now for simulation purposes the virtual addresses are @@ -40,7 +40,7 @@ * 1. Put 0x7 in bits 61 thru 63. */ #define DATA_PA_TO_VA(addr,temp) \ - mov temp = 0x7 ; \ + mov temp = 0x7 ;; \ dep addr = temp, addr, 61, 3; /* @@ -48,11 +48,11 @@ * and starts execution in physical mode with all the address * translations turned off. * 1. Save the current psr - * 2. Make sure that all the upper 32 bits are off + * 2. Make sure that all the upper 32 bits are off * * 3. Clear the interrupt enable and interrupt state collection bits * in the psr before updating the ipsr and iip. - * + * * 4. Turn off the instruction, data and rse translation bits of the psr * and store the new value into ipsr * Also make sure that the interrupts are disabled. @@ -71,7 +71,7 @@ mov old_psr = psr; \ ;; \ dep old_psr = 0, old_psr, 32, 32; \ - \ + \ mov ar.rsc = 0 ; \ ;; \ mov temp2 = ar.bspstore; \ @@ -86,7 +86,7 @@ mov temp1 = psr; \ mov temp2 = psr; \ ;; \ - \ + \ dep temp2 = 0, temp2, PSR_IC, 2; \ ;; \ mov psr.l = temp2; \ @@ -94,11 +94,11 @@ srlz.d; \ dep temp1 = 0, temp1, 32, 32; \ ;; \ - dep temp1 = 0, temp1, PSR_IT, 1; \ + dep temp1 = 0, temp1, PSR_IT, 1; \ ;; \ - dep temp1 = 0, temp1, PSR_DT, 1; \ + dep temp1 = 0, temp1, PSR_DT, 1; \ ;; \ - dep temp1 = 0, temp1, PSR_RT, 1; \ + dep temp1 = 0, temp1, PSR_RT, 1; \ ;; \ dep temp1 = 0, temp1, PSR_I, 1; \ ;; \ @@ -125,72 +125,73 @@ * This macro jumps to the instruction at the given virtual address * and starts execution in virtual mode with all the address * translations turned on. - * 1. Get the old saved psr - * - * 2. Clear the interrupt enable and interrupt state collection bits + * 1. Get the old saved psr + * + * 2. Clear the interrupt enable and interrupt state collection bits * in the current psr. - * + * * 3. Set the instruction translation bit back in the old psr * Note we have to do this since we are right now saving only the * lower 32-bits of old psr.(Also the old psr has the data and * rse translation bits on) - * + * * 4. Set ipsr to this old_psr with "it" bit set and "bn" = 1. * - * 5. Set iip to the virtual address of the next instruction bundle. + * 5. Set iip to the virtual address of the next instruction bundle. * * 6. Do an rfi to move ipsr to psr and iip to ip. */ -#define VIRTUAL_MODE_ENTER(temp1, temp2, start_addr, old_psr) \ - mov temp2 = psr; \ - ;; \ - dep temp2 = 0, temp2, PSR_IC, 2; \ - ;; \ - mov psr.l = temp2; \ - mov ar.rsc = 0; \ - ;; \ - srlz.d; \ - mov temp2 = ar.bspstore; \ - ;; \ - DATA_PA_TO_VA(temp2,temp1); \ - ;; \ - mov temp1 = ar.rnat; \ - ;; \ - mov ar.bspstore = temp2; \ - ;; \ - mov ar.rnat = temp1; \ - ;; \ - mov temp1 = old_psr; \ - ;; \ - mov temp2 = 1 ; \ - dep temp1 = temp2, temp1, PSR_I, 1; \ - ;; \ - dep temp1 = temp2, temp1, PSR_IC, 1; \ - ;; \ - dep temp1 = temp2, temp1, PSR_IT, 1; \ - ;; \ - dep temp1 = temp2, temp1, PSR_DT, 1; \ - ;; \ - dep temp1 = temp2, temp1, PSR_RT, 1; \ - ;; \ - dep temp1 = temp2, temp1, PSR_BN, 1; \ - ;; \ - \ - mov cr.ipsr = temp1; \ - movl temp2 = start_addr; \ - ;; \ - mov cr.iip = temp2; \ - DATA_PA_TO_VA(sp, temp1); \ - DATA_PA_TO_VA(gp, temp2); \ - ;; \ - nop 1; \ - nop 2; \ - nop 1; \ - rfi; \ +#define VIRTUAL_MODE_ENTER(temp1, temp2, start_addr, old_psr) \ + mov temp2 = psr; \ + ;; \ + dep temp2 = 0, temp2, PSR_IC, 2; \ + ;; \ + mov psr.l = temp2; \ + mov ar.rsc = 0; \ + ;; \ + srlz.d; \ + mov temp2 = ar.bspstore; \ + ;; \ + DATA_PA_TO_VA(temp2,temp1); \ + ;; \ + mov temp1 = ar.rnat; \ + ;; \ + mov ar.bspstore = temp2; \ + ;; \ + mov ar.rnat = temp1; \ + ;; \ + mov temp1 = old_psr; \ + ;; \ + mov temp2 = 1 \ + ;; \ + dep temp1 = temp2, temp1, PSR_I, 1; \ + ;; \ + dep temp1 = temp2, temp1, PSR_IC, 1; \ + ;; \ + dep temp1 = temp2, temp1, PSR_IT, 1; \ + ;; \ + dep temp1 = temp2, temp1, PSR_DT, 1; \ + ;; \ + dep temp1 = temp2, temp1, PSR_RT, 1; \ + ;; \ + dep temp1 = temp2, temp1, PSR_BN, 1; \ + ;; \ + \ + mov cr.ipsr = temp1; \ + movl temp2 = start_addr; \ + ;; \ + mov cr.iip = temp2; \ + DATA_PA_TO_VA(sp, temp1); \ + DATA_PA_TO_VA(gp, temp2); \ + ;; \ + nop 1; \ + nop 2; \ + nop 1; \ + rfi; \ ;; -/* +/* * The following offsets capture the order in which the * RSE related registers from the old context are * saved onto the new stack frame. @@ -198,15 +199,15 @@ * +-----------------------+ * |NDIRTY [BSP - BSPSTORE]| * +-----------------------+ - * | RNAT | + * | RNAT | * +-----------------------+ - * | BSPSTORE | + * | BSPSTORE | * +-----------------------+ - * | IFS | + * | IFS | * +-----------------------+ - * | PFS | + * | PFS | * +-----------------------+ - * | RSC | + * | RSC | * +-----------------------+ <-------- Bottom of new stack frame */ #define rse_rsc_offset 0 @@ -229,23 +230,23 @@ * 8. Read and save the new BSP to calculate the #dirty registers * NOTE: Look at pages 11-10, 11-11 in PRM Vol 2 */ -#define rse_switch_context(temp,p_stackframe,p_bspstore) \ - ;; \ - mov temp=ar.rsc;; \ - st8 [p_stackframe]=temp,8;; \ - mov temp=ar.pfs;; \ - st8 [p_stackframe]=temp,8; \ - cover ;; \ - mov temp=cr.ifs;; \ - st8 [p_stackframe]=temp,8;; \ - mov temp=ar.bspstore;; \ - st8 [p_stackframe]=temp,8;; \ - mov temp=ar.rnat;; \ - st8 [p_stackframe]=temp,8; \ - mov ar.bspstore=p_bspstore;; \ - mov temp=ar.bsp;; \ - sub temp=temp,p_bspstore;; \ - st8 [p_stackframe]=temp,8 +#define rse_switch_context(temp,p_stackframe,p_bspstore) \ + ;; \ + mov temp=ar.rsc;; \ + st8 [p_stackframe]=temp,8;; \ + mov temp=ar.pfs;; \ + st8 [p_stackframe]=temp,8; \ + cover ;; \ + mov temp=cr.ifs;; \ + st8 [p_stackframe]=temp,8;; \ + mov temp=ar.bspstore;; \ + st8 [p_stackframe]=temp,8;; \ + mov temp=ar.rnat;; \ + st8 [p_stackframe]=temp,8; \ + mov ar.bspstore=p_bspstore;; \ + mov temp=ar.bsp;; \ + sub temp=temp,p_bspstore;; \ + st8 [p_stackframe]=temp,8 /* * rse_return_context @@ -253,7 +254,7 @@ * 2. Store the number of dirty registers RSC.loadrs field * 3. Issue a loadrs to insure that any registers from the interrupted * context which were saved on the new stack frame have been loaded - * back into the stacked registers + * back into the stacked registers * 4. Restore BSPSTORE * 5. Restore RNAT * 6. Restore PFS @@ -261,44 +262,44 @@ * 8. Restore RSC * 9. Issue an RFI */ -#define rse_return_context(psr_mask_reg,temp,p_stackframe) \ - ;; \ - alloc temp=ar.pfs,0,0,0,0; \ - add p_stackframe=rse_ndirty_offset,p_stackframe;; \ - ld8 temp=[p_stackframe];; \ - shl temp=temp,16;; \ - mov ar.rsc=temp;; \ - loadrs;; \ - add p_stackframe=-rse_ndirty_offset+rse_bspstore_offset,p_stackframe;;\ - ld8 temp=[p_stackframe];; \ - mov ar.bspstore=temp;; \ - add p_stackframe=-rse_bspstore_offset+rse_rnat_offset,p_stackframe;;\ - ld8 temp=[p_stackframe];; \ - mov ar.rnat=temp;; \ - add p_stackframe=-rse_rnat_offset+rse_pfs_offset,p_stackframe;; \ - ld8 temp=[p_stackframe];; \ - mov ar.pfs=temp; \ - add p_stackframe=-rse_pfs_offset+rse_ifs_offset,p_stackframe;; \ - ld8 temp=[p_stackframe];; \ - mov cr.ifs=temp; \ - add p_stackframe=-rse_ifs_offset+rse_rsc_offset,p_stackframe;; \ - ld8 temp=[p_stackframe];; \ - mov ar.rsc=temp ; \ - add p_stackframe=-rse_rsc_offset,p_stackframe; \ - mov temp=cr.ipsr;; \ - st8 [p_stackframe]=temp,8; \ - mov temp=cr.iip;; \ - st8 [p_stackframe]=temp,-8; \ - mov temp=psr;; \ - or temp=temp,psr_mask_reg;; \ - mov cr.ipsr=temp;; \ - mov temp=ip;; \ - add temp=0x30,temp;; \ - mov cr.iip=temp;; \ - rfi;; \ - ld8 temp=[p_stackframe],8;; \ - mov cr.ipsr=temp;; \ - ld8 temp=[p_stackframe];; \ - mov cr.iip=temp +#define rse_return_context(psr_mask_reg,temp,p_stackframe) \ + ;; \ + alloc temp=ar.pfs,0,0,0,0; \ + add p_stackframe=rse_ndirty_offset,p_stackframe;; \ + ld8 temp=[p_stackframe];; \ + shl temp=temp,16;; \ + mov ar.rsc=temp;; \ + loadrs;; \ + add p_stackframe=-rse_ndirty_offset+rse_bspstore_offset,p_stackframe;;\ + ld8 temp=[p_stackframe];; \ + mov ar.bspstore=temp;; \ + add p_stackframe=-rse_bspstore_offset+rse_rnat_offset,p_stackframe;;\ + ld8 temp=[p_stackframe];; \ + mov ar.rnat=temp;; \ + add p_stackframe=-rse_rnat_offset+rse_pfs_offset,p_stackframe;; \ + ld8 temp=[p_stackframe];; \ + mov ar.pfs=temp; \ + add p_stackframe=-rse_pfs_offset+rse_ifs_offset,p_stackframe;; \ + ld8 temp=[p_stackframe];; \ + mov cr.ifs=temp; \ + add p_stackframe=-rse_ifs_offset+rse_rsc_offset,p_stackframe;; \ + ld8 temp=[p_stackframe];; \ + mov ar.rsc=temp ; \ + add p_stackframe=-rse_rsc_offset,p_stackframe; \ + mov temp=cr.ipsr;; \ + st8 [p_stackframe]=temp,8; \ + mov temp=cr.iip;; \ + st8 [p_stackframe]=temp,-8; \ + mov temp=psr;; \ + or temp=temp,psr_mask_reg;; \ + mov cr.ipsr=temp;; \ + mov temp=ip;; \ + add temp=0x30,temp;; \ + mov cr.iip=temp;; \ + rfi;; \ + ld8 temp=[p_stackframe],8;; \ + mov cr.ipsr=temp;; \ + ld8 temp=[p_stackframe];; \ + mov cr.iip=temp #endif /* _ASM_IA64_MCA_ASM_H */ diff --git a/include/asm-ia64/offsets.h b/include/asm-ia64/offsets.h index 8b2445c9cda3..82cb9553aa64 100644 --- a/include/asm-ia64/offsets.h +++ b/include/asm-ia64/offsets.h @@ -1,21 +1,19 @@ #ifndef _ASM_IA64_OFFSETS_H #define _ASM_IA64_OFFSETS_H - /* * DO NOT MODIFY * - * This file was generated by arch/ia64/tools/print_offsets. + * This file was generated by arch/ia64/tools/print_offsets.awk. * */ - -#define PT_PTRACED_BIT 0 -#define PT_TRACESYS_BIT 1 - +#define PT_PTRACED_BIT 0 +#define PT_TRACESYS_BIT 1 #define IA64_TASK_SIZE 3904 /* 0xf40 */ #define IA64_PT_REGS_SIZE 400 /* 0x190 */ #define IA64_SWITCH_STACK_SIZE 560 /* 0x230 */ #define IA64_SIGINFO_SIZE 128 /* 0x80 */ #define IA64_CPU_SIZE 16384 /* 0x4000 */ +#define SIGFRAME_SIZE 2832 /* 0xb10 */ #define UNW_FRAME_INFO_SIZE 448 /* 0x1c0 */ #define IA64_TASK_PTRACE_OFFSET 48 /* 0x30 */ @@ -24,8 +22,8 @@ #define IA64_TASK_PROCESSOR_OFFSET 100 /* 0x64 */ #define IA64_TASK_THREAD_OFFSET 1456 /* 0x5b0 */ #define IA64_TASK_THREAD_KSP_OFFSET 1456 /* 0x5b0 */ -#define IA64_TASK_THREAD_SIGMASK_OFFSET 3752 /* 0xea8 */ -#define IA64_TASK_PFM_NOTIFY_OFFSET 3648 /* 0xe40 */ +#define IA64_TASK_THREAD_SIGMASK_OFFSET 1568 /* 0x620 */ +#define IA64_TASK_PFM_NOTIFY_OFFSET 2088 /* 0x828 */ #define IA64_TASK_PID_OFFSET 196 /* 0xc4 */ #define IA64_TASK_MM_OFFSET 88 /* 0x58 */ #define IA64_PT_REGS_CR_IPSR_OFFSET 0 /* 0x0 */ @@ -75,7 +73,7 @@ #define IA64_PT_REGS_F8_OFFSET 368 /* 0x170 */ #define IA64_PT_REGS_F9_OFFSET 384 /* 0x180 */ #define IA64_SWITCH_STACK_CALLER_UNAT_OFFSET 0 /* 0x0 */ -#define IA64_SWITCH_STACK_AR_FPSR_OFFSET 8 /* 0x8 */ +#define IA64_SWITCH_STACK_AR_FPSR_OFFSET 8 /* 0x8 */ #define IA64_SWITCH_STACK_F2_OFFSET 16 /* 0x10 */ #define IA64_SWITCH_STACK_F3_OFFSET 32 /* 0x20 */ #define IA64_SWITCH_STACK_F4_OFFSET 48 /* 0x30 */ @@ -114,21 +112,30 @@ #define IA64_SWITCH_STACK_B5_OFFSET 504 /* 0x1f8 */ #define IA64_SWITCH_STACK_AR_PFS_OFFSET 512 /* 0x200 */ #define IA64_SWITCH_STACK_AR_LC_OFFSET 520 /* 0x208 */ -#define IA64_SWITCH_STACK_AR_UNAT_OFFSET 528 /* 0x210 */ -#define IA64_SWITCH_STACK_AR_RNAT_OFFSET 536 /* 0x218 */ +#define IA64_SWITCH_STACK_AR_UNAT_OFFSET 528 /* 0x210 */ +#define IA64_SWITCH_STACK_AR_RNAT_OFFSET 536 /* 0x218 */ #define IA64_SWITCH_STACK_AR_BSPSTORE_OFFSET 544 /* 0x220 */ #define IA64_SWITCH_STACK_PR_OFFSET 552 /* 0x228 */ #define IA64_SIGCONTEXT_AR_BSP_OFFSET 72 /* 0x48 */ +#define IA64_SIGCONTEXT_AR_FPSR_OFFSET 104 /* 0x68 */ #define IA64_SIGCONTEXT_AR_RNAT_OFFSET 80 /* 0x50 */ -#define IA64_SIGCONTEXT_FLAGS_OFFSET 0 /* 0x0 */ +#define IA64_SIGCONTEXT_AR_UNAT_OFFSET 96 /* 0x60 */ +#define IA64_SIGCONTEXT_B0_OFFSET 136 /* 0x88 */ #define IA64_SIGCONTEXT_CFM_OFFSET 48 /* 0x30 */ +#define IA64_SIGCONTEXT_FLAGS_OFFSET 0 /* 0x0 */ #define IA64_SIGCONTEXT_FR6_OFFSET 560 /* 0x230 */ -#define IA64_CLONE_VFORK 16384 /* 0x4000 */ +#define IA64_SIGCONTEXT_PR_OFFSET 128 /* 0x80 */ +#define IA64_SIGCONTEXT_R12_OFFSET 296 /* 0x128 */ +#define IA64_SIGFRAME_ARG0_OFFSET 0 /* 0x0 */ +#define IA64_SIGFRAME_ARG1_OFFSET 8 /* 0x8 */ +#define IA64_SIGFRAME_ARG2_OFFSET 16 /* 0x10 */ +#define IA64_SIGFRAME_RBS_BASE_OFFSET 24 /* 0x18 */ +#define IA64_SIGFRAME_HANDLER_OFFSET 32 /* 0x20 */ +#define IA64_SIGFRAME_SIGCONTEXT_OFFSET 176 /* 0xb0 */ +#define IA64_CLONE_VFORK 16384 /* 0x4000 */ #define IA64_CLONE_VM 256 /* 0x100 */ -#define IA64_CPU_IRQ_COUNT_OFFSET 8 /* 0x8 */ -#define IA64_CPU_BH_COUNT_OFFSET 12 /* 0xc */ -#define IA64_CPU_SOFTIRQ_ACTIVE_OFFSET 0 /* 0x0 */ -#define IA64_CPU_SOFTIRQ_MASK_OFFSET 4 /* 0x4 */ -#define IA64_CPU_PHYS_STACKED_SIZE_P8_OFFSET 16 /* 0x10 */ +#define IA64_CPU_IRQ_COUNT_OFFSET 0 /* 0x0 */ +#define IA64_CPU_BH_COUNT_OFFSET 4 /* 0x4 */ +#define IA64_CPU_PHYS_STACKED_SIZE_P8_OFFSET 12 /* 0xc */ #endif /* _ASM_IA64_OFFSETS_H */ diff --git a/include/asm-ia64/pgalloc.h b/include/asm-ia64/pgalloc.h index 37cfd0b72f78..df0fd548653b 100644 --- a/include/asm-ia64/pgalloc.h +++ b/include/asm-ia64/pgalloc.h @@ -34,7 +34,7 @@ #define pgtable_cache_size (local_cpu_data->pgtable_cache_sz) static inline pgd_t* -pgd_alloc_one_fast (void) +pgd_alloc_one_fast (struct mm_struct *mm) { unsigned long *ret = pgd_quicklist; @@ -51,7 +51,7 @@ static inline pgd_t* pgd_alloc (struct mm_struct *mm) { /* the VM system never calls pgd_alloc_one_fast(), so we do it here. */ - pgd_t *pgd = pgd_alloc_one_fast(); + pgd_t *pgd = pgd_alloc_one_fast(mm); if (__builtin_expect(pgd == NULL, 0)) { pgd = (pgd_t *)__get_free_page(GFP_KERNEL); diff --git a/include/asm-ia64/pgtable.h b/include/asm-ia64/pgtable.h index 5d844a25dc8f..51942aeee818 100644 --- a/include/asm-ia64/pgtable.h +++ b/include/asm-ia64/pgtable.h @@ -17,6 +17,7 @@ #include <asm/mman.h> #include <asm/page.h> #include <asm/processor.h> +#include <asm/system.h> #include <asm/types.h> #define IA64_MAX_PHYS_BITS 50 /* max. number of physical address bits (architected) */ @@ -125,7 +126,7 @@ #include <asm/bitops.h> #include <asm/mmu_context.h> -#include <asm/system.h> +#include <asm/processor.h> /* * Next come the mappings that determine how mmap() protection bits @@ -443,7 +444,7 @@ extern void paging_init (void); #define SWP_TYPE(entry) (((entry).val >> 1) & 0xff) #define SWP_OFFSET(entry) (((entry).val << 1) >> 10) -#define SWP_ENTRY(type,offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 9) }) +#define SWP_ENTRY(type,offset) ((swp_entry_t) { ((type) << 1) | ((long) (offset) << 9) }) #define pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define swp_entry_to_pte(x) ((pte_t) { (x).val }) @@ -464,4 +465,12 @@ extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)]; # endif /* !__ASSEMBLY__ */ +/* + * Identity-mapped regions use a large page size. KERNEL_PG_NUM is the + * number of the (large) page frame that mapps the kernel. + */ +#define KERNEL_PG_SHIFT _PAGE_SIZE_64M +#define KERNEL_PG_SIZE (1 << KERNEL_PG_SHIFT) +#define KERNEL_PG_NUM ((KERNEL_START - PAGE_OFFSET) / KERNEL_PG_SIZE) + #endif /* _ASM_IA64_PGTABLE_H */ diff --git a/include/asm-ia64/processor.h b/include/asm-ia64/processor.h index 2e10c77c7bf6..bf8411d0fcb5 100644 --- a/include/asm-ia64/processor.h +++ b/include/asm-ia64/processor.h @@ -235,11 +235,7 @@ struct ia64_psr { * state comes earlier: */ struct cpuinfo_ia64 { - /* irq_stat and softirq should be 64-bit aligned */ - struct { - __u32 active; - __u32 mask; - } softirq; + /* irq_stat must be 64-bit aligned */ union { struct { __u32 irq_count; @@ -247,8 +243,8 @@ struct cpuinfo_ia64 { } f; __u64 irq_and_bh_counts; } irq_stat; + __u32 softirq_pending; __u32 phys_stacked_size_p8; /* size of physical stacked registers + 8 */ - __u32 pad0; __u64 itm_delta; /* # of clock cycles between clock ticks */ __u64 itm_next; /* interval timer mask value to use for next clock tick */ __u64 *pgd_quick; @@ -273,6 +269,7 @@ struct cpuinfo_ia64 { __u64 ptce_base; __u32 ptce_count[2]; __u32 ptce_stride[2]; + struct task_struct *ksoftirqd; /* kernel softirq daemon for this CPU */ #ifdef CONFIG_SMP __u64 loops_per_jiffy; __u64 ipi_count; @@ -280,6 +277,9 @@ struct cpuinfo_ia64 { __u64 prof_multiplier; __u64 ipi_operation; #endif +#ifdef CONFIG_NUMA + struct cpuinfo_ia64 *cpu_data[NR_CPUS]; +#endif } __attribute__ ((aligned (PAGE_SIZE))) ; /* @@ -288,7 +288,22 @@ struct cpuinfo_ia64 { */ #define local_cpu_data ((struct cpuinfo_ia64 *) PERCPU_ADDR) -extern struct cpuinfo_ia64 cpu_data[NR_CPUS]; +/* + * On NUMA systems, cpu_data for each cpu is allocated during cpu_init() & is allocated on + * the node that contains the cpu. This minimizes off-node memory references. cpu_data + * for each cpu contains an array of pointers to the cpu_data structures of each of the + * other cpus. + * + * On non-NUMA systems, cpu_data is a static array allocated at compile time. References + * to the cpu_data of another cpu is done by direct references to the appropriate entry of + * the array. + */ +#ifdef CONFIG_NUMA +# define cpu_data(cpu) local_cpu_data->cpu_data_ptrs[cpu] +#else + extern struct cpuinfo_ia64 _cpu_data[NR_CPUS]; +# define cpu_data(cpu) (&_cpu_data[cpu]) +#endif extern void identify_cpu (struct cpuinfo_ia64 *); extern void print_cpu_info (struct cpuinfo_ia64 *); @@ -314,20 +329,10 @@ struct siginfo; struct thread_struct { __u64 ksp; /* kernel stack pointer */ unsigned long flags; /* various flags */ - struct ia64_fpreg fph[96]; /* saved/loaded on demand */ - __u64 dbr[IA64_NUM_DBG_REGS]; - __u64 ibr[IA64_NUM_DBG_REGS]; -#ifdef CONFIG_PERFMON - __u64 pmc[IA64_NUM_PMC_REGS]; - __u64 pmd[IA64_NUM_PMD_REGS]; - unsigned long pfm_pend_notify; /* non-zero if we need to notify and block */ - void *pfm_context; /* pointer to detailed PMU context */ -# define INIT_THREAD_PM {0, }, {0, }, 0, 0, -#else -# define INIT_THREAD_PM -#endif __u64 map_base; /* base address for get_unmapped_area() */ __u64 task_size; /* limit for task size */ + struct siginfo *siginfo; /* current siginfo struct for ptrace() */ + #ifdef CONFIG_IA32_SUPPORT __u64 eflag; /* IA32 EFLAGS reg */ __u64 fsr; /* IA32 floating pt status reg */ @@ -345,7 +350,18 @@ struct thread_struct { #else # define INIT_THREAD_IA32 #endif /* CONFIG_IA32_SUPPORT */ - struct siginfo *siginfo; /* current siginfo struct for ptrace() */ +#ifdef CONFIG_PERFMON + __u64 pmc[IA64_NUM_PMC_REGS]; + __u64 pmd[IA64_NUM_PMD_REGS]; + unsigned long pfm_pend_notify; /* non-zero if we need to notify and block */ + void *pfm_context; /* pointer to detailed PMU context */ +# define INIT_THREAD_PM {0, }, {0, }, 0, 0, +#else +# define INIT_THREAD_PM +#endif + __u64 dbr[IA64_NUM_DBG_REGS]; + __u64 ibr[IA64_NUM_DBG_REGS]; + struct ia64_fpreg fph[96]; /* saved/loaded on demand */ }; #define INIT_MMAP { \ @@ -356,14 +372,14 @@ struct thread_struct { #define INIT_THREAD { \ 0, /* ksp */ \ 0, /* flags */ \ - {{{{0}}}, }, /* fph */ \ - {0, }, /* dbr */ \ - {0, }, /* ibr */ \ - INIT_THREAD_PM \ DEFAULT_MAP_BASE, /* map_base */ \ DEFAULT_TASK_SIZE, /* task_size */ \ + 0, /* siginfo */ \ INIT_THREAD_IA32 \ - 0 /* siginfo */ \ + INIT_THREAD_PM \ + {0, }, /* dbr */ \ + {0, }, /* ibr */ \ + {{{{0}}}, } /* fph */ \ } #define start_thread(regs,new_ip,new_sp) do { \ @@ -416,7 +432,7 @@ struct task_struct; /* * Free all resources held by a thread. This is called after the * parent of DEAD_TASK has collected the exist status of the task via - * wait(). This is a no-op on IA-64. + * wait(). */ #ifdef CONFIG_PERFMON extern void release_thread (struct task_struct *task); @@ -513,8 +529,8 @@ extern void ia64_save_debug_regs (unsigned long *save_area); extern void ia64_load_debug_regs (unsigned long *save_area); #ifdef CONFIG_IA32_SUPPORT -extern void ia32_save_state (struct thread_struct *thread); -extern void ia32_load_state (struct thread_struct *thread); +extern void ia32_save_state (struct task_struct *task); +extern void ia32_load_state (struct task_struct *task); #endif #ifdef CONFIG_PERFMON diff --git a/include/asm-ia64/ptrace.h b/include/asm-ia64/ptrace.h index 6e7417b453f1..5210d2de1df4 100644 --- a/include/asm-ia64/ptrace.h +++ b/include/asm-ia64/ptrace.h @@ -220,11 +220,16 @@ struct switch_stack { struct task_struct; /* forward decl */ extern void show_regs (struct pt_regs *); - extern unsigned long ia64_get_user_bsp (struct task_struct *, struct pt_regs *); - extern long ia64_peek (struct task_struct *, unsigned long, unsigned long, long *); - extern long ia64_poke (struct task_struct *, unsigned long, unsigned long, long); + extern unsigned long ia64_get_user_rbs_end (struct task_struct *, struct pt_regs *, + unsigned long *); + extern long ia64_peek (struct task_struct *, struct switch_stack *, unsigned long, + unsigned long, long *); + extern long ia64_poke (struct task_struct *, struct switch_stack *, unsigned long, + unsigned long, long); extern void ia64_flush_fph (struct task_struct *); extern void ia64_sync_fph (struct task_struct *); + extern long ia64_sync_user_rbs (struct task_struct *, struct switch_stack *, + unsigned long, unsigned long); /* get nat bits for scratch registers such that bit N==1 iff scratch register rN is a NaT */ extern unsigned long ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned long scratch_unat); diff --git a/include/asm-ia64/ptrace_offsets.h b/include/asm-ia64/ptrace_offsets.h index 6fa6fb446f6b..44a76cdcc4e9 100644 --- a/include/asm-ia64/ptrace_offsets.h +++ b/include/asm-ia64/ptrace_offsets.h @@ -173,7 +173,7 @@ #define PT_AR_BSPSTORE 0x0868 #define PT_PR 0x0870 #define PT_B6 0x0878 -#define PT_AR_BSP 0x0880 +#define PT_AR_BSP 0x0880 /* note: this points to the *end* of the backing store! */ #define PT_R1 0x0888 #define PT_R2 0x0890 #define PT_R3 0x0898 diff --git a/include/asm-ia64/sal.h b/include/asm-ia64/sal.h index f24928e44c9a..64e652b2721b 100644 --- a/include/asm-ia64/sal.h +++ b/include/asm-ia64/sal.h @@ -7,8 +7,8 @@ * This is based on version 2.5 of the manual "IA-64 System * Abstraction Layer". * - * Copyright (C) 1998, 1999 Hewlett-Packard Co - * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com> + * Copyright (C) 1998, 1999, 2001 Hewlett-Packard Co + * Copyright (C) 1998, 1999, 2001 David Mosberger-Tang <davidm@hpl.hp.com> * Copyright (C) 1999 Srinivasa Prasad Thirumalachar <sprasad@sprasad.engr.sgi.com> * * 99/09/29 davidm Updated for SAL 2.6. diff --git a/include/asm-ia64/sigcontext.h b/include/asm-ia64/sigcontext.h index 5abb275a9e9f..5ff4a2ff67b7 100644 --- a/include/asm-ia64/sigcontext.h +++ b/include/asm-ia64/sigcontext.h @@ -40,6 +40,8 @@ struct sigcontext { unsigned long sc_gr[32]; /* general registers (static partition) */ struct ia64_fpreg sc_fr[128]; /* floating-point registers */ + unsigned long sc_rsvd[16]; /* reserved for future use */ + /* * The mask must come last so we can increase _NSIG_WORDS * without breaking binary compatibility. diff --git a/include/asm-ia64/signal.h b/include/asm-ia64/signal.h index f6a01d64034d..45dd55e59b1c 100644 --- a/include/asm-ia64/signal.h +++ b/include/asm-ia64/signal.h @@ -56,7 +56,7 @@ * SA_FLAGS values: * * SA_ONSTACK indicates that a registered stack_t will be used. - * SA_INTERRUPT is a no-op, but left due to historical reasons. Use the + * SA_INTERRUPT is a no-op, but left due to historical reasons. * SA_RESTART flag to get restarting signals (which were the default long ago) * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. * SA_RESETHAND clears the handler when the signal is delivered. @@ -105,7 +105,6 @@ #define SA_PROBE SA_ONESHOT #define SA_SAMPLE_RANDOM SA_RESTART #define SA_SHIRQ 0x04000000 -#define SA_LEGACY 0x02000000 /* installed via a legacy irq? */ #endif /* __KERNEL__ */ diff --git a/include/asm-ia64/smp.h b/include/asm-ia64/smp.h index f9878b782f56..a4bab219f605 100644 --- a/include/asm-ia64/smp.h +++ b/include/asm-ia64/smp.h @@ -1,7 +1,7 @@ /* * SMP Support * - * Copyright (C) 1999 VA Linux Systems + * Copyright (C) 1999 VA Linux Systems * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> * Copyright (C) 2001 Hewlett-Packard Co * Copyright (C) 2001 David Mosberger-Tang <davidm@hpl.hp.com> @@ -35,14 +35,13 @@ extern struct smp_boot_data { extern char no_int_routing __initdata; -extern unsigned long cpu_present_map; -extern unsigned long cpu_online_map; +extern volatile unsigned long cpu_online_map; extern unsigned long ipi_base_addr; -extern int __cpu_physical_id[NR_CPUS]; extern unsigned char smp_int_redirect; extern int smp_num_cpus; -#define cpu_physical_id(i) __cpu_physical_id[i] +extern volatile int ia64_cpu_to_sapicid[]; +#define cpu_physical_id(i) ia64_cpu_to_sapicid[i] #define cpu_number_map(i) (i) #define cpu_logical_map(i) (i) @@ -70,7 +69,7 @@ cpu_logical_id (int cpuid) * max_xtp : never deliver interrupts to this CPU. */ -static inline void +static inline void min_xtp (void) { if (smp_int_redirect & SMP_IRQ_REDIRECTION) @@ -85,13 +84,13 @@ normal_xtp (void) } static inline void -max_xtp (void) +max_xtp (void) { if (smp_int_redirect & SMP_IRQ_REDIRECTION) writeb(0x0f, ipi_base_addr | XTP_OFFSET); /* Set XTP to max */ } -static inline unsigned int +static inline unsigned int hard_smp_processor_id (void) { union { diff --git a/include/asm-ia64/softirq.h b/include/asm-ia64/softirq.h index 5d3c7ab2ae60..6ac2197344b6 100644 --- a/include/asm-ia64/softirq.h +++ b/include/asm-ia64/softirq.h @@ -7,8 +7,18 @@ */ #include <asm/hardirq.h> +#define __local_bh_enable() do { barrier(); local_bh_count()--; } while (0) + #define local_bh_disable() do { local_bh_count()++; barrier(); } while (0) -#define local_bh_enable() do { barrier(); local_bh_count()--; } while (0) +#define local_bh_enable() \ +do { \ + __local_bh_enable(); \ + if (__builtin_expect(local_softirq_pending(), 0) && local_bh_count() == 0) \ + do_softirq(); \ +} while (0) + + +#define __cpu_raise_softirq(cpu,nr) set_bit((nr), &softirq_pending(cpu)) #define in_softirq() (local_bh_count() != 0) diff --git a/include/asm-ia64/spinlock.h b/include/asm-ia64/spinlock.h index 70ff9bda4b44..3a6d2e0e7cf4 100644 --- a/include/asm-ia64/spinlock.h +++ b/include/asm-ia64/spinlock.h @@ -19,12 +19,12 @@ #ifdef NEW_LOCK -typedef struct { +typedef struct { volatile unsigned int lock; } spinlock_t; #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } -#define spin_lock_init(x) ((x)->lock = 0) +#define spin_lock_init(x) ((x)->lock = 0) /* * Streamlined test_and_set_bit(0, (x)). We use test-and-test-and-set @@ -62,12 +62,12 @@ typedef struct { }) #define spin_is_locked(x) ((x)->lock != 0) -#define spin_unlock(x) do {((spinlock_t *) x)->lock = 0;} while (0) -#define spin_unlock_wait(x) do {} while ((x)->lock) +#define spin_unlock(x) do { barrier(); ((spinlock_t *) x)->lock = 0;} while (0) +#define spin_unlock_wait(x) do { barrier(); } while ((x)->lock) #else /* !NEW_LOCK */ -typedef struct { +typedef struct { volatile unsigned int lock; } spinlock_t; @@ -96,7 +96,7 @@ typedef struct { :: "r"(&(x)->lock) : "r2", "r29", "memory") #define spin_is_locked(x) ((x)->lock != 0) -#define spin_unlock(x) do {((spinlock_t *) x)->lock = 0; barrier(); } while (0) +#define spin_unlock(x) do { barrier(); ((spinlock_t *) x)->lock = 0; } while (0) #define spin_trylock(x) (cmpxchg_acq(&(x)->lock, 0, 1) == 0) #define spin_unlock_wait(x) do { barrier(); } while ((x)->lock) diff --git a/include/asm-ia64/string.h b/include/asm-ia64/string.h index c17fc8de4280..5c89e5c5584d 100644 --- a/include/asm-ia64/string.h +++ b/include/asm-ia64/string.h @@ -10,7 +10,6 @@ */ #include <linux/config.h> /* remove this once we remove the A-step workaround... */ -#ifndef CONFIG_ITANIUM_ASTEP_SPECIFIC #define __HAVE_ARCH_STRLEN 1 /* see arch/ia64/lib/strlen.S */ #define __HAVE_ARCH_MEMSET 1 /* see arch/ia64/lib/memset.S */ @@ -21,6 +20,4 @@ extern __kernel_size_t strlen (const char *); extern void *memset (void *, int, __kernel_size_t); extern void *memcpy (void *, const void *, __kernel_size_t); -#endif /* CONFIG_ITANIUM_ASTEP_SPECIFIC */ - #endif /* _ASM_IA64_STRING_H */ diff --git a/include/asm-ia64/system.h b/include/asm-ia64/system.h index cd46c860cf48..f44f3777de01 100644 --- a/include/asm-ia64/system.h +++ b/include/asm-ia64/system.h @@ -29,8 +29,7 @@ #define GATE_ADDR (0xa000000000000000 + PAGE_SIZE) #define PERCPU_ADDR (0xa000000000000000 + 2*PAGE_SIZE) -#if defined(CONFIG_ITANIUM_ASTEP_SPECIFIC) \ - || defined(CONFIG_ITANIUM_B0_SPECIFIC) || defined(CONFIG_ITANIUM_B1_SPECIFIC) +#if defined(CONFIG_ITANIUM_B0_SPECIFIC) || defined(CONFIG_ITANIUM_B1_SPECIFIC) /* Workaround for Errata 97. */ # define IA64_SEMFIX_INSN mf; # define IA64_SEMFIX "mf;" diff --git a/include/asm-ia64/unaligned.h b/include/asm-ia64/unaligned.h index c9d6dca0eca7..118676881435 100644 --- a/include/asm-ia64/unaligned.h +++ b/include/asm-ia64/unaligned.h @@ -1,6 +1,8 @@ #ifndef _ASM_IA64_UNALIGNED_H #define _ASM_IA64_UNALIGNED_H +#include <linux/types.h> + /* * The main single-value unaligned transfer routines. Derived from * the Linux/Alpha version. diff --git a/include/asm-ia64/unistd.h b/include/asm-ia64/unistd.h index d2e3bdd3e28e..e2f8d278e727 100644 --- a/include/asm-ia64/unistd.h +++ b/include/asm-ia64/unistd.h @@ -204,6 +204,7 @@ #define __NR_fstat 1212 #define __NR_clone2 1213 #define __NR_getdents64 1214 +#define __NR_getunwind 1215 #if !defined(__ASSEMBLY__) && !defined(ASSEMBLER) diff --git a/include/asm-ia64/unwind.h b/include/asm-ia64/unwind.h index fa00da32af01..d92df4658237 100644 --- a/include/asm-ia64/unwind.h +++ b/include/asm-ia64/unwind.h @@ -94,9 +94,10 @@ struct unw_frame_info { * Initialize unwind support. */ extern void unw_init (void); +extern void unw_create_gate_table (void); extern void *unw_add_unwind_table (const char *name, unsigned long segment_base, unsigned long gp, - void *table_start, void *table_end); + const void *table_start, const void *table_end); extern void unw_remove_unwind_table (void *handle); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 1c45c2cb3637..2b2c0bb1e7cb 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -174,8 +174,6 @@ extern int * max_sectors[MAX_BLKDEV]; extern int * max_segments[MAX_BLKDEV]; -extern atomic_t queued_sectors; - #define MAX_SEGMENTS 128 #define MAX_SECTORS 255 @@ -203,14 +201,7 @@ static inline int get_hardsect_size(kdev_t dev) return 512; } -#define blk_finished_io(nsects) \ - atomic_sub(nsects, &queued_sectors); \ - if (atomic_read(&queued_sectors) < 0) { \ - printk("block: queued_sectors < 0\n"); \ - atomic_set(&queued_sectors, 0); \ - } - -#define blk_started_io(nsects) \ - atomic_add(nsects, &queued_sectors); +#define blk_finished_io(nsects) do { } while (0) +#define blk_started_io(nsects) do { } while (0) #endif diff --git a/include/linux/dcache.h b/include/linux/dcache.h index df33ef8c6a8d..f11118fac4d3 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -171,11 +171,11 @@ extern int d_invalidate(struct dentry *); #define shrink_dcache() prune_dcache(0) struct zone_struct; /* dcache memory management */ -extern void shrink_dcache_memory(int, unsigned int); +extern int shrink_dcache_memory(int, unsigned int); extern void prune_dcache(int); /* icache memory management (defined in linux/fs/inode.c) */ -extern void shrink_icache_memory(int, int); +extern int shrink_icache_memory(int, int); extern void prune_icache(int); /* only used at mount-time */ diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h index bcb9a1752d0c..415a2dbec3ec 100644 --- a/include/linux/reiserfs_fs.h +++ b/include/linux/reiserfs_fs.h @@ -1652,7 +1652,6 @@ int reiserfs_convert_objectid_map_v1(struct super_block *) ; /* stree.c */ int B_IS_IN_TREE(struct buffer_head *); -extern inline void copy_key (void * to, void * from); extern inline void copy_short_key (void * to, void * from); extern inline void copy_item_head(void * p_v_to, void * p_v_from); diff --git a/include/linux/sockios.h b/include/linux/sockios.h index d360c7533d9f..1857eb928eaf 100644 --- a/include/linux/sockios.h +++ b/include/linux/sockios.h @@ -109,6 +109,8 @@ * vector. Each device should include this file and redefine these names * as their own. Because these are device dependent it is a good idea * _NOT_ to issue them to random objects and hope. + * + * THESE IOCTLS ARE _DEPRECATED_ AND WILL DISAPPEAR IN 2.5.X -DaveM */ #define SIOCDEVPRIVATE 0x89F0 /* to 89FF */ |
