diff options
| author | Linus Torvalds <torvalds@home.transmeta.com> | 2002-08-12 07:26:30 -0700 |
|---|---|---|
| committer | Linus Torvalds <torvalds@home.transmeta.com> | 2002-08-12 07:26:30 -0700 |
| commit | 177b088842397486d9207c838de5b68369c2735b (patch) | |
| tree | 08c6dc5bd8c1e34c7319b3fd042500b9b69a620a /include | |
| parent | 0fc27e84c3e8209c202de84c145148659ff5b1bc (diff) | |
| parent | c78a7e908d9360e914d53bc46683d0bd70f6b560 (diff) | |
Merge http://lia64.bkbits.net/to-linus-2.5
into home.transmeta.com:/home/torvalds/v2.5/linux
Diffstat (limited to 'include')
34 files changed, 510 insertions, 341 deletions
diff --git a/include/asm-ia64/acpi.h b/include/asm-ia64/acpi.h index 663a568d9767..8e39b4fe8691 100644 --- a/include/asm-ia64/acpi.h +++ b/include/asm-ia64/acpi.h @@ -30,11 +30,74 @@ #ifdef __KERNEL__ -#define __acpi_map_table(phys_addr, size) __va(phys_addr) +#define COMPILER_DEPENDENT_INT64 long +#define COMPILER_DEPENDENT_UINT64 unsigned long + +/* + * Calling conventions: + * + * ACPI_SYSTEM_XFACE - Interfaces to host OS (handlers, threads) + * ACPI_EXTERNAL_XFACE - External ACPI interfaces + * ACPI_INTERNAL_XFACE - Internal ACPI interfaces + * ACPI_INTERNAL_VAR_XFACE - Internal variable-parameter list interfaces + */ +#define ACPI_SYSTEM_XFACE +#define ACPI_EXTERNAL_XFACE +#define ACPI_INTERNAL_XFACE +#define ACPI_INTERNAL_VAR_XFACE + +/* Asm macros */ + +#define ACPI_ASM_MACROS +#define BREAKPOINT3 +#define ACPI_DISABLE_IRQS() local_irq_disable() +#define ACPI_ENABLE_IRQS() local_irq_enable() +#define ACPI_FLUSH_CPU_CACHE() + +#define ACPI_ACQUIRE_GLOBAL_LOCK(GLptr, Acq) \ + do { \ + __asm__ volatile ("1: ld4 r29=%1\n" \ + ";;\n" \ + "mov ar.ccv=r29\n" \ + "mov r2=r29\n" \ + "shr.u r30=r29,1\n" \ + "and r29=-4,r29\n" \ + ";;\n" \ + "add r29=2,r29\n" \ + "and r30=1,r30\n" \ + ";;\n" \ + "add r29=r29,r30\n" \ + ";;\n" \ + "cmpxchg4.acq r30=%1,r29,ar.ccv\n" \ + ";;\n" \ + "cmp.eq p6,p7=r2,r30\n" \ + "(p7) br.dpnt.few 1b\n" \ + "cmp.gt p8,p9=3,r29\n" \ + ";;\n" \ + "(p8) mov %0=-1\n" \ + "(p9) mov %0=r0\n" \ + :"=r"(Acq):"m"(GLptr):"r2","r29","r30","memory"); \ + } while (0) + +#define ACPI_RELEASE_GLOBAL_LOCK(GLptr, Acq) \ + do { \ + __asm__ volatile ("1: ld4 r29=%1\n" \ + ";;\n" \ + "mov ar.ccv=r29\n" \ + "mov r2=r29\n" \ + "and r29=-4,r29\n" \ + ";;\n" \ + "cmpxchg4.acq r30=%1,r29,ar.ccv\n" \ + ";;\n" \ + "cmp.eq p6,p7=r2,r30\n" \ + "(p7) br.dpnt.few 1b\n" \ + "and %0=1,r2\n" \ + ";;\n" \ + :"=r"(Acq):"m"(GLptr):"r2","r29","r30","memory"); \ + } while (0) const char *acpi_get_sysname (void); int acpi_boot_init (char *cdline); -int acpi_find_rsdp (unsigned long *phys_addr); int acpi_request_vector (u32 int_type); int acpi_get_prt (struct pci_vector_struct **vectors, int *count); int acpi_get_interrupt_model(int *type); diff --git a/include/asm-ia64/agp.h b/include/asm-ia64/agp.h index ba05bdf9a211..97dc1b9baed5 100644 --- a/include/asm-ia64/agp.h +++ b/include/asm-ia64/agp.h @@ -1,11 +1,21 @@ -#ifndef AGP_H -#define AGP_H 1 +#ifndef _ASM_IA64_AGP_H +#define _ASM_IA64_AGP_H -/* dummy for now */ +/* + * IA-64 specific AGP definitions. + * + * Copyright (C) 2002 Hewlett-Packard Co + * David Mosberger-Tang <davidm@hpl.hp.com> + */ -#define map_page_into_agp(page) -#define unmap_page_from_agp(page) -#define flush_agp_mappings() -#define flush_agp_cache() mb() +/* + * To avoid memory-attribute aliasing issues, we require that the AGPGART engine operate + * in coherent mode, which lets us map the AGP memory as normal (write-back) memory + * (unlike x86, where it gets mapped "write-coalescing"). + */ +#define map_page_into_agp(page) /* nothing */ +#define unmap_page_from_agp(page) /* nothing */ +#define flush_agp_mappings() /* nothing */ +#define flush_agp_cache() mb() -#endif +#endif /* _ASM_IA64_AGP_H */ diff --git a/include/asm-ia64/bitops.h b/include/asm-ia64/bitops.h index 63c824e17768..9ec001ebd341 100644 --- a/include/asm-ia64/bitops.h +++ b/include/asm-ia64/bitops.h @@ -326,7 +326,7 @@ ia64_fls (unsigned long x) return exp - 0xffff; } -static int +static inline int fls (int x) { return ia64_fls((unsigned int) x); diff --git a/include/asm-ia64/cacheflush.h b/include/asm-ia64/cacheflush.h index 025398be93bc..51c4780d875a 100644 --- a/include/asm-ia64/cacheflush.h +++ b/include/asm-ia64/cacheflush.h @@ -6,6 +6,8 @@ * David Mosberger-Tang <davidm@hpl.hp.com> */ +#include <linux/page-flags.h> + #include <asm/bitops.h> #include <asm/page.h> @@ -23,7 +25,7 @@ #define flush_dcache_page(page) \ do { \ - clear_bit(PG_arch_1, &page->flags); \ + clear_bit(PG_arch_1, &(page)->flags); \ } while (0) extern void flush_icache_range (unsigned long start, unsigned long end); diff --git a/include/asm-ia64/delay.h b/include/asm-ia64/delay.h index 5c283e8cd977..da812415f634 100644 --- a/include/asm-ia64/delay.h +++ b/include/asm-ia64/delay.h @@ -53,7 +53,7 @@ ia64_get_itc (void) __asm__ __volatile__("mov %0=ar.itc" : "=r"(result) :: "memory"); #ifdef CONFIG_ITANIUM - while (unlikely ((__s32) result == -1) + while (unlikely((__s32) result == -1)) __asm__ __volatile__("mov %0=ar.itc" : "=r"(result) :: "memory"); #endif return result; diff --git a/include/asm-ia64/dma.h b/include/asm-ia64/dma.h index ea1e362062da..3be1b4925e18 100644 --- a/include/asm-ia64/dma.h +++ b/include/asm-ia64/dma.h @@ -2,8 +2,8 @@ #define _ASM_IA64_DMA_H /* - * Copyright (C) 1998-2001 Hewlett-Packard Co - * Copyright (C) 1998-2001 David Mosberger-Tang <davidm@hpl.hp.com> + * Copyright (C) 1998-2002 Hewlett-Packard Co + * David Mosberger-Tang <davidm@hpl.hp.com> */ #include <linux/config.h> @@ -18,4 +18,6 @@ extern unsigned long MAX_DMA_ADDRESS; # define isa_dma_bridge_buggy (0) #endif +#define free_dma(x) + #endif /* _ASM_IA64_DMA_H */ diff --git a/include/asm-ia64/efi.h b/include/asm-ia64/efi.h index be93c080640a..de403ab78367 100644 --- a/include/asm-ia64/efi.h +++ b/include/asm-ia64/efi.h @@ -190,6 +190,9 @@ typedef void efi_reset_system_t (int reset_type, efi_status_t status, #define SAL_SYSTEM_TABLE_GUID \ EFI_GUID( 0xeb9d2d32, 0x2d88, 0x11d3, 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d ) +#define HCDP_TABLE_GUID \ + EFI_GUID( 0xf951938d, 0x620b, 0x42ef, 0x82, 0x79, 0xa8, 0x4b, 0x79, 0x61, 0x78, 0x98 ) + typedef struct { efi_guid_t guid; u64 table; @@ -225,6 +228,7 @@ extern struct efi { void *smbios; /* SM BIOS table */ void *sal_systab; /* SAL system table */ void *boot_info; /* boot info table */ + void *hcdp; /* HCDP table */ efi_get_time_t *get_time; efi_set_time_t *set_time; efi_get_wakeup_time_t *get_wakeup_time; diff --git a/include/asm-ia64/elf.h b/include/asm-ia64/elf.h index 05511152aa22..a0904abbd014 100644 --- a/include/asm-ia64/elf.h +++ b/include/asm-ia64/elf.h @@ -2,7 +2,7 @@ #define _ASM_IA64_ELF_H /* - * ELF archtecture specific definitions. + * ELF-specific definitions. * * Copyright (C) 1998, 1999, 2002 Hewlett-Packard Co * David Mosberger-Tang <davidm@hpl.hp.com> diff --git a/include/asm-ia64/hardirq.h b/include/asm-ia64/hardirq.h index f5ad8445d75c..95c2cc97b8e3 100644 --- a/include/asm-ia64/hardirq.h +++ b/include/asm-ia64/hardirq.h @@ -17,89 +17,93 @@ * No irq_cpustat_t for IA-64. The data is held in the per-CPU data structure. */ #define softirq_pending(cpu) (cpu_data(cpu)->softirq_pending) -#define ksoftirqd_task(cpu) (cpu_data(cpu)->ksoftirqd) -#define irq_count(cpu) (cpu_data(cpu)->irq_stat.f.irq_count) -#define bh_count(cpu) (cpu_data(cpu)->irq_stat.f.bh_count) #define syscall_count(cpu) /* unused on IA-64 */ +#define ksoftirqd_task(cpu) (cpu_data(cpu)->ksoftirqd) #define nmi_count(cpu) 0 #define local_softirq_pending() (local_cpu_data->softirq_pending) -#define local_ksoftirqd_task() (local_cpu_data->ksoftirqd) -#define really_local_irq_count() (local_cpu_data->irq_stat.f.irq_count) /* XXX fix me */ -#define really_local_bh_count() (local_cpu_data->irq_stat.f.bh_count) /* XXX fix me */ #define local_syscall_count() /* unused on IA-64 */ +#define local_ksoftirqd_task() (local_cpu_data->ksoftirqd) #define local_nmi_count() 0 /* - * Are we in an interrupt context? Either doing bottom half or hardware interrupt - * processing? + * We put the hardirq and softirq counter into the preemption counter. The bitmask has the + * following meaning: + * + * - bits 0-7 are the preemption count (max preemption depth: 256) + * - bits 8-15 are the softirq count (max # of softirqs: 256) + * - bits 16-31 are the hardirq count (max # of hardirqs: 65536) + * + * - (bit 63 is the PREEMPT_ACTIVE flag---not currently implemented.) + * + * PREEMPT_MASK: 0x000000ff + * SOFTIRQ_MASK: 0x0000ff00 + * HARDIRQ_MASK: 0xffff0000 */ -#define in_interrupt() (local_cpu_data->irq_stat.irq_and_bh_counts != 0) -#define in_irq() (local_cpu_data->irq_stat.f.irq_count != 0) -#ifndef CONFIG_SMP -# define local_hardirq_trylock() (really_local_irq_count() == 0) -# define local_hardirq_endlock() do { } while (0) +#define PREEMPT_BITS 8 +#define SOFTIRQ_BITS 8 +#define HARDIRQ_BITS 16 -# define local_irq_enter(irq) (really_local_irq_count()++) -# define local_irq_exit(irq) (really_local_irq_count()--) +#define PREEMPT_SHIFT 0 +#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) +#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS) -# define synchronize_irq() barrier() -#else +#define __MASK(x) ((1UL << (x))-1) + +#define PREEMPT_MASK (__MASK(PREEMPT_BITS) << PREEMPT_SHIFT) +#define HARDIRQ_MASK (__MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT) +#define SOFTIRQ_MASK (__MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) + +#define hardirq_count() (preempt_count() & HARDIRQ_MASK) +#define softirq_count() (preempt_count() & SOFTIRQ_MASK) +#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK)) + +#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT) +#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT) +#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) + +/* + * The hardirq mask has to be large enough to have space for potentially all IRQ sources + * in the system nesting on a single CPU: + */ +#if (1 << HARDIRQ_BITS) < NR_IRQS +# error HARDIRQ_BITS is too low! +#endif + +/* + * Are we doing bottom half or hardware interrupt processing? + * Are we in a softirq context? + * Interrupt context? + */ +#define in_irq() (hardirq_count()) +#define in_softirq() (softirq_count()) +#define in_interrupt() (irq_count()) -#include <asm/atomic.h> -#include <asm/smp.h> - -extern unsigned int global_irq_holder; -extern volatile unsigned long global_irq_lock; - -static inline int -irqs_running (void) -{ - int i; - - for (i = 0; i < NR_CPUS; i++) - if (irq_count(i)) - return 1; - return 0; -} - -static inline void -release_irqlock (int cpu) -{ - /* if we didn't own the irq lock, just ignore.. */ - if (global_irq_holder == cpu) { - global_irq_holder = NO_PROC_ID; - smp_mb__before_clear_bit(); /* need barrier before releasing lock... */ - clear_bit(0,&global_irq_lock); - } -} - -static inline void -local_irq_enter (int irq) -{ - really_local_irq_count()++; - - while (test_bit(0,&global_irq_lock)) { - /* nothing */; - } -} - -static inline void -local_irq_exit (int irq) -{ - really_local_irq_count()--; -} - -static inline int -local_hardirq_trylock (void) -{ - return !really_local_irq_count() && !test_bit(0,&global_irq_lock); -} - -#define local_hardirq_endlock() do { } while (0) - -extern void synchronize_irq (void); +#define hardirq_trylock() (!in_interrupt()) +#define hardirq_endlock() do { } while (0) +#define irq_enter() (preempt_count() += HARDIRQ_OFFSET) + +#if CONFIG_PREEMPT +# error CONFIG_PREEMT currently not supported. +# define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1) +#else +# define IRQ_EXIT_OFFSET HARDIRQ_OFFSET +#endif + +#define irq_exit() \ +do { \ + preempt_count() -= IRQ_EXIT_OFFSET; \ + if (!in_interrupt() && softirq_pending(smp_processor_id())) \ + do_softirq(); \ + preempt_enable_no_resched(); \ +} while (0) + +#ifdef CONFIG_SMP + extern void synchronize_irq (unsigned int irq); +#else +# define synchronize_irq(irq) barrier() #endif /* CONFIG_SMP */ + #endif /* _ASM_IA64_HARDIRQ_H */ diff --git a/include/asm-ia64/hw_irq.h b/include/asm-ia64/hw_irq.h index 823d3e8ce20f..f723d5eedf62 100644 --- a/include/asm-ia64/hw_irq.h +++ b/include/asm-ia64/hw_irq.h @@ -2,10 +2,11 @@ #define _ASM_IA64_HW_IRQ_H /* - * Copyright (C) 2001 Hewlett-Packard Co - * Copyright (C) 2001 David Mosberger-Tang <davidm@hpl.hp.com> + * Copyright (C) 2001-2002 Hewlett-Packard Co + * David Mosberger-Tang <davidm@hpl.hp.com> */ +#include <linux/interrupt.h> #include <linux/sched.h> #include <linux/types.h> @@ -67,6 +68,8 @@ enum { extern __u8 isa_irq_to_vector_map[16]; #define isa_irq_to_vector(x) isa_irq_to_vector_map[(x)] +extern __u8 gsi_to_vector_map[255]; +#define gsi_to_vector(x) gsi_to_vector_map[(x)] extern unsigned long ipi_base_addr; diff --git a/include/asm-ia64/keyboard.h b/include/asm-ia64/keyboard.h index cfb781aae625..b2921543421f 100644 --- a/include/asm-ia64/keyboard.h +++ b/include/asm-ia64/keyboard.h @@ -16,6 +16,7 @@ #define KEYBOARD_IRQ isa_irq_to_vector(1) #define DISABLE_KBD_DURING_INTERRUPTS 0 +extern unsigned char acpi_kbd_controller_present; extern int pckbd_setkeycode(unsigned int scancode, unsigned int keycode); extern int pckbd_getkeycode(unsigned int scancode); extern int pckbd_pretranslate(unsigned char scancode, char raw_mode); @@ -26,6 +27,7 @@ extern void pckbd_leds(unsigned char leds); extern void pckbd_init_hw(void); extern unsigned char pckbd_sysrq_xlate[128]; +#define kbd_controller_present() acpi_kbd_controller_present #define kbd_setkeycode pckbd_setkeycode #define kbd_getkeycode pckbd_getkeycode #define kbd_pretranslate pckbd_pretranslate diff --git a/include/asm-ia64/kregs.h b/include/asm-ia64/kregs.h index 66b7ea301de9..173777837e55 100644 --- a/include/asm-ia64/kregs.h +++ b/include/asm-ia64/kregs.h @@ -64,6 +64,15 @@ #define IA64_PSR_RI_BIT 41 #define IA64_PSR_ED_BIT 43 #define IA64_PSR_BN_BIT 44 +#define IA64_PSR_IA_BIT 45 + +/* A mask of PSR bits that we generally don't want to inherit across a clone2() or an + execve(). Only list flags here that need to be cleared/set for BOTH clone2() and + execve(). */ +#define IA64_PSR_BITS_TO_CLEAR (IA64_PSR_MFL | IA64_PSR_MFH | IA64_PSR_DB | IA64_PSR_LP | \ + IA64_PSR_TB | IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD | \ + IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA) +#define IA64_PSR_BITS_TO_SET (IA64_PSR_DFH) #define IA64_PSR_BE (__IA64_UL(1) << IA64_PSR_BE_BIT) #define IA64_PSR_UP (__IA64_UL(1) << IA64_PSR_UP_BIT) @@ -85,6 +94,7 @@ #define IA64_PSR_TB (__IA64_UL(1) << IA64_PSR_TB_BIT) #define IA64_PSR_RT (__IA64_UL(1) << IA64_PSR_RT_BIT) /* The following are not affected by save_flags()/restore_flags(): */ +#define IA64_PSR_CPL (__IA64_UL(3) << IA64_PSR_CPL0_BIT) #define IA64_PSR_IS (__IA64_UL(1) << IA64_PSR_IS_BIT) #define IA64_PSR_MC (__IA64_UL(1) << IA64_PSR_MC_BIT) #define IA64_PSR_IT (__IA64_UL(1) << IA64_PSR_IT_BIT) @@ -95,6 +105,7 @@ #define IA64_PSR_RI (__IA64_UL(3) << IA64_PSR_RI_BIT) #define IA64_PSR_ED (__IA64_UL(1) << IA64_PSR_ED_BIT) #define IA64_PSR_BN (__IA64_UL(1) << IA64_PSR_BN_BIT) +#define IA64_PSR_IA (__IA64_UL(1) << IA64_PSR_IA_BIT) /* User mask bits: */ #define IA64_PSR_UM (IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL | IA64_PSR_MFH) diff --git a/include/asm-ia64/machvec.h b/include/asm-ia64/machvec.h index ae5390ccc726..82500a98d1be 100644 --- a/include/asm-ia64/machvec.h +++ b/include/asm-ia64/machvec.h @@ -210,6 +210,7 @@ extern ia64_mv_pci_unmap_sg swiotlb_unmap_sg; extern ia64_mv_pci_dma_sync_single swiotlb_sync_single; extern ia64_mv_pci_dma_sync_sg swiotlb_sync_sg; extern ia64_mv_pci_dma_address swiotlb_dma_address; +extern ia64_mv_pci_dma_supported swiotlb_pci_dma_supported; /* * Define default versions so we can extend machvec for new platforms without having diff --git a/include/asm-ia64/machvec_init.h b/include/asm-ia64/machvec_init.h index 71912c19c2d4..609b61eb44bd 100644 --- a/include/asm-ia64/machvec_init.h +++ b/include/asm-ia64/machvec_init.h @@ -16,6 +16,7 @@ extern ia64_mv_inl_t __ia64_inl; extern ia64_mv_outb_t __ia64_outb; extern ia64_mv_outw_t __ia64_outw; extern ia64_mv_outl_t __ia64_outl; +extern ia64_mv_mmiob_t __ia64_mmiob; #define MACHVEC_HELPER(name) \ struct ia64_machine_vector machvec_##name __attribute__ ((unused, __section__ (".machvec"))) \ diff --git a/include/asm-ia64/mmu_context.h b/include/asm-ia64/mmu_context.h index 1a0024cfc3f3..dde8b9702d44 100644 --- a/include/asm-ia64/mmu_context.h +++ b/include/asm-ia64/mmu_context.h @@ -2,8 +2,8 @@ #define _ASM_IA64_MMU_CONTEXT_H /* - * Copyright (C) 1998-2001 Hewlett-Packard Co - * Copyright (C) 1998-2001 David Mosberger-Tang <davidm@hpl.hp.com> + * Copyright (C) 1998-2002 Hewlett-Packard Co + * David Mosberger-Tang <davidm@hpl.hp.com> */ /* @@ -13,8 +13,6 @@ * consider the region number when performing a TLB lookup, we need to assign a unique * region id to each region in a process. We use the least significant three bits in a * region id for this purpose. - * - * Copyright (C) 1998-2001 David Mosberger-Tang <davidm@hpl.hp.com> */ #define IA64_REGION_ID_KERNEL 0 /* the kernel's region id (tlb.c depends on this being 0) */ @@ -23,6 +21,8 @@ # ifndef __ASSEMBLY__ +#include <linux/compiler.h> +#include <linux/percpu.h> #include <linux/sched.h> #include <linux/spinlock.h> @@ -36,6 +36,7 @@ struct ia64_ctx { }; extern struct ia64_ctx ia64_ctx; +extern u8 ia64_need_tlb_flush __per_cpu_data; extern void wrap_mmu_context (struct mm_struct *mm); @@ -44,6 +45,23 @@ enter_lazy_tlb (struct mm_struct *mm, struct task_struct *tsk, unsigned cpu) { } +/* + * When the context counter wraps around all TLBs need to be flushed because an old + * context number might have been reused. This is signalled by the ia64_need_tlb_flush + * per-CPU variable, which is checked in the routine below. Called by activate_mm(). + * <efocht@ess.nec.de> + */ +static inline void +delayed_tlb_flush (void) +{ + extern void __flush_tlb_all (void); + + if (unlikely(ia64_need_tlb_flush)) { + __flush_tlb_all(); + ia64_need_tlb_flush = 0; + } +} + static inline void get_new_mmu_context (struct mm_struct *mm) { @@ -54,7 +72,6 @@ get_new_mmu_context (struct mm_struct *mm) mm->context = ia64_ctx.next++; } spin_unlock(&ia64_ctx.lock); - } static inline void @@ -109,6 +126,8 @@ reload_context (struct mm_struct *mm) static inline void activate_mm (struct mm_struct *prev, struct mm_struct *next) { + delayed_tlb_flush(); + /* * We may get interrupts here, but that's OK because interrupt * handlers cannot touch user-space. diff --git a/include/asm-ia64/module.h b/include/asm-ia64/module.h index 0c64400aceb7..beab8670ffa7 100644 --- a/include/asm-ia64/module.h +++ b/include/asm-ia64/module.h @@ -75,7 +75,7 @@ ia64_module_init (struct module *mod) return 1; } if (!mod_bound(archdata->segment_base, 0, mod)) { - printk(KERN_ERR "module_arch_init: archdata->unw_table out of bounds.\n"); + printk(KERN_ERR "module_arch_init: archdata->segment_base out of bounds.\n"); return 1; } diff --git a/include/asm-ia64/offsets.h b/include/asm-ia64/offsets.h deleted file mode 100644 index ae6724ba9f2c..000000000000 --- a/include/asm-ia64/offsets.h +++ /dev/null @@ -1,130 +0,0 @@ -#ifndef _ASM_IA64_OFFSETS_H -#define _ASM_IA64_OFFSETS_H -/* - * DO NOT MODIFY - * - * This file was generated by arch/ia64/tools/print_offsets.awk. - * - */ -#define IA64_TASK_SIZE 3952 /* 0xf70 */ -#define IA64_THREAD_INFO_SIZE 32 /* 0x20 */ -#define IA64_PT_REGS_SIZE 400 /* 0x190 */ -#define IA64_SWITCH_STACK_SIZE 560 /* 0x230 */ -#define IA64_SIGINFO_SIZE 128 /* 0x80 */ -#define IA64_CPU_SIZE 224 /* 0xe0 */ -#define SIGFRAME_SIZE 2816 /* 0xb00 */ -#define UNW_FRAME_INFO_SIZE 448 /* 0x1c0 */ - -#define IA64_TASK_THREAD_KSP_OFFSET 1496 /* 0x5d8 */ -#define IA64_PT_REGS_CR_IPSR_OFFSET 0 /* 0x0 */ -#define IA64_PT_REGS_CR_IIP_OFFSET 8 /* 0x8 */ -#define IA64_PT_REGS_CR_IFS_OFFSET 16 /* 0x10 */ -#define IA64_PT_REGS_AR_UNAT_OFFSET 24 /* 0x18 */ -#define IA64_PT_REGS_AR_PFS_OFFSET 32 /* 0x20 */ -#define IA64_PT_REGS_AR_RSC_OFFSET 40 /* 0x28 */ -#define IA64_PT_REGS_AR_RNAT_OFFSET 48 /* 0x30 */ -#define IA64_PT_REGS_AR_BSPSTORE_OFFSET 56 /* 0x38 */ -#define IA64_PT_REGS_PR_OFFSET 64 /* 0x40 */ -#define IA64_PT_REGS_B6_OFFSET 72 /* 0x48 */ -#define IA64_PT_REGS_LOADRS_OFFSET 80 /* 0x50 */ -#define IA64_PT_REGS_R1_OFFSET 88 /* 0x58 */ -#define IA64_PT_REGS_R2_OFFSET 96 /* 0x60 */ -#define IA64_PT_REGS_R3_OFFSET 104 /* 0x68 */ -#define IA64_PT_REGS_R12_OFFSET 112 /* 0x70 */ -#define IA64_PT_REGS_R13_OFFSET 120 /* 0x78 */ -#define IA64_PT_REGS_R14_OFFSET 128 /* 0x80 */ -#define IA64_PT_REGS_R15_OFFSET 136 /* 0x88 */ -#define IA64_PT_REGS_R8_OFFSET 144 /* 0x90 */ -#define IA64_PT_REGS_R9_OFFSET 152 /* 0x98 */ -#define IA64_PT_REGS_R10_OFFSET 160 /* 0xa0 */ -#define IA64_PT_REGS_R11_OFFSET 168 /* 0xa8 */ -#define IA64_PT_REGS_R16_OFFSET 176 /* 0xb0 */ -#define IA64_PT_REGS_R17_OFFSET 184 /* 0xb8 */ -#define IA64_PT_REGS_R18_OFFSET 192 /* 0xc0 */ -#define IA64_PT_REGS_R19_OFFSET 200 /* 0xc8 */ -#define IA64_PT_REGS_R20_OFFSET 208 /* 0xd0 */ -#define IA64_PT_REGS_R21_OFFSET 216 /* 0xd8 */ -#define IA64_PT_REGS_R22_OFFSET 224 /* 0xe0 */ -#define IA64_PT_REGS_R23_OFFSET 232 /* 0xe8 */ -#define IA64_PT_REGS_R24_OFFSET 240 /* 0xf0 */ -#define IA64_PT_REGS_R25_OFFSET 248 /* 0xf8 */ -#define IA64_PT_REGS_R26_OFFSET 256 /* 0x100 */ -#define IA64_PT_REGS_R27_OFFSET 264 /* 0x108 */ -#define IA64_PT_REGS_R28_OFFSET 272 /* 0x110 */ -#define IA64_PT_REGS_R29_OFFSET 280 /* 0x118 */ -#define IA64_PT_REGS_R30_OFFSET 288 /* 0x120 */ -#define IA64_PT_REGS_R31_OFFSET 296 /* 0x128 */ -#define IA64_PT_REGS_AR_CCV_OFFSET 304 /* 0x130 */ -#define IA64_PT_REGS_AR_FPSR_OFFSET 312 /* 0x138 */ -#define IA64_PT_REGS_B0_OFFSET 320 /* 0x140 */ -#define IA64_PT_REGS_B7_OFFSET 328 /* 0x148 */ -#define IA64_PT_REGS_F6_OFFSET 336 /* 0x150 */ -#define IA64_PT_REGS_F7_OFFSET 352 /* 0x160 */ -#define IA64_PT_REGS_F8_OFFSET 368 /* 0x170 */ -#define IA64_PT_REGS_F9_OFFSET 384 /* 0x180 */ -#define IA64_SWITCH_STACK_CALLER_UNAT_OFFSET 0 /* 0x0 */ -#define IA64_SWITCH_STACK_AR_FPSR_OFFSET 8 /* 0x8 */ -#define IA64_SWITCH_STACK_F2_OFFSET 16 /* 0x10 */ -#define IA64_SWITCH_STACK_F3_OFFSET 32 /* 0x20 */ -#define IA64_SWITCH_STACK_F4_OFFSET 48 /* 0x30 */ -#define IA64_SWITCH_STACK_F5_OFFSET 64 /* 0x40 */ -#define IA64_SWITCH_STACK_F10_OFFSET 80 /* 0x50 */ -#define IA64_SWITCH_STACK_F11_OFFSET 96 /* 0x60 */ -#define IA64_SWITCH_STACK_F12_OFFSET 112 /* 0x70 */ -#define IA64_SWITCH_STACK_F13_OFFSET 128 /* 0x80 */ -#define IA64_SWITCH_STACK_F14_OFFSET 144 /* 0x90 */ -#define IA64_SWITCH_STACK_F15_OFFSET 160 /* 0xa0 */ -#define IA64_SWITCH_STACK_F16_OFFSET 176 /* 0xb0 */ -#define IA64_SWITCH_STACK_F17_OFFSET 192 /* 0xc0 */ -#define IA64_SWITCH_STACK_F18_OFFSET 208 /* 0xd0 */ -#define IA64_SWITCH_STACK_F19_OFFSET 224 /* 0xe0 */ -#define IA64_SWITCH_STACK_F20_OFFSET 240 /* 0xf0 */ -#define IA64_SWITCH_STACK_F21_OFFSET 256 /* 0x100 */ -#define IA64_SWITCH_STACK_F22_OFFSET 272 /* 0x110 */ -#define IA64_SWITCH_STACK_F23_OFFSET 288 /* 0x120 */ -#define IA64_SWITCH_STACK_F24_OFFSET 304 /* 0x130 */ -#define IA64_SWITCH_STACK_F25_OFFSET 320 /* 0x140 */ -#define IA64_SWITCH_STACK_F26_OFFSET 336 /* 0x150 */ -#define IA64_SWITCH_STACK_F27_OFFSET 352 /* 0x160 */ -#define IA64_SWITCH_STACK_F28_OFFSET 368 /* 0x170 */ -#define IA64_SWITCH_STACK_F29_OFFSET 384 /* 0x180 */ -#define IA64_SWITCH_STACK_F30_OFFSET 400 /* 0x190 */ -#define IA64_SWITCH_STACK_F31_OFFSET 416 /* 0x1a0 */ -#define IA64_SWITCH_STACK_R4_OFFSET 432 /* 0x1b0 */ -#define IA64_SWITCH_STACK_R5_OFFSET 440 /* 0x1b8 */ -#define IA64_SWITCH_STACK_R6_OFFSET 448 /* 0x1c0 */ -#define IA64_SWITCH_STACK_R7_OFFSET 456 /* 0x1c8 */ -#define IA64_SWITCH_STACK_B0_OFFSET 464 /* 0x1d0 */ -#define IA64_SWITCH_STACK_B1_OFFSET 472 /* 0x1d8 */ -#define IA64_SWITCH_STACK_B2_OFFSET 480 /* 0x1e0 */ -#define IA64_SWITCH_STACK_B3_OFFSET 488 /* 0x1e8 */ -#define IA64_SWITCH_STACK_B4_OFFSET 496 /* 0x1f0 */ -#define IA64_SWITCH_STACK_B5_OFFSET 504 /* 0x1f8 */ -#define IA64_SWITCH_STACK_AR_PFS_OFFSET 512 /* 0x200 */ -#define IA64_SWITCH_STACK_AR_LC_OFFSET 520 /* 0x208 */ -#define IA64_SWITCH_STACK_AR_UNAT_OFFSET 528 /* 0x210 */ -#define IA64_SWITCH_STACK_AR_RNAT_OFFSET 536 /* 0x218 */ -#define IA64_SWITCH_STACK_AR_BSPSTORE_OFFSET 544 /* 0x220 */ -#define IA64_SWITCH_STACK_PR_OFFSET 552 /* 0x228 */ -#define IA64_SIGCONTEXT_IP_OFFSET 40 /* 0x28 */ -#define IA64_SIGCONTEXT_AR_BSP_OFFSET 72 /* 0x48 */ -#define IA64_SIGCONTEXT_AR_FPSR_OFFSET 104 /* 0x68 */ -#define IA64_SIGCONTEXT_AR_RNAT_OFFSET 80 /* 0x50 */ -#define IA64_SIGCONTEXT_AR_UNAT_OFFSET 96 /* 0x60 */ -#define IA64_SIGCONTEXT_B0_OFFSET 136 /* 0x88 */ -#define IA64_SIGCONTEXT_CFM_OFFSET 48 /* 0x30 */ -#define IA64_SIGCONTEXT_FLAGS_OFFSET 0 /* 0x0 */ -#define IA64_SIGCONTEXT_FR6_OFFSET 560 /* 0x230 */ -#define IA64_SIGCONTEXT_PR_OFFSET 128 /* 0x80 */ -#define IA64_SIGCONTEXT_R12_OFFSET 296 /* 0x128 */ -#define IA64_SIGCONTEXT_RBS_BASE_OFFSET 2512 /* 0x9d0 */ -#define IA64_SIGCONTEXT_LOADRS_OFFSET 2520 /* 0x9d8 */ -#define IA64_SIGFRAME_ARG0_OFFSET 0 /* 0x0 */ -#define IA64_SIGFRAME_ARG1_OFFSET 8 /* 0x8 */ -#define IA64_SIGFRAME_ARG2_OFFSET 16 /* 0x10 */ -#define IA64_SIGFRAME_HANDLER_OFFSET 24 /* 0x18 */ -#define IA64_SIGFRAME_SIGCONTEXT_OFFSET 160 /* 0xa0 */ -#define IA64_CLONE_VFORK 16384 /* 0x4000 */ -#define IA64_CLONE_VM 256 /* 0x100 */ - -#endif /* _ASM_IA64_OFFSETS_H */ diff --git a/include/asm-ia64/page.h b/include/asm-ia64/page.h index c50cf5ec238e..27d1c9e59479 100644 --- a/include/asm-ia64/page.h +++ b/include/asm-ia64/page.h @@ -87,7 +87,12 @@ typedef union ia64_va { #define REGION_SIZE REGION_NUMBER(1) #define REGION_KERNEL 7 -#define BUG() do { printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); *(int *)0=0; } while (0) +#if (__GNUC__ > 3) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1) +# define ia64_abort() __builtin_trap() +#else +# define ia64_abort() (*(volatile int *) 0 = 0) +#endif +#define BUG() do { printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); ia64_abort(); } while (0) #define PAGE_BUG(page) do { BUG(); } while (0) static __inline__ int diff --git a/include/asm-ia64/param.h b/include/asm-ia64/param.h index dc82a64fc40e..eaee5da9a787 100644 --- a/include/asm-ia64/param.h +++ b/include/asm-ia64/param.h @@ -4,8 +4,8 @@ /* * Fundamental kernel parameters. * - * Copyright (C) 1998, 1999 Hewlett-Packard Co - * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com> + * Copyright (C) 1998, 1999, 2002 Hewlett-Packard Co + * David Mosberger-Tang <davidm@hpl.hp.com> */ #include <linux/config.h> @@ -33,6 +33,7 @@ #define MAXHOSTNAMELEN 64 /* max length of hostname */ #ifdef __KERNEL__ +# define USER_HZ HZ # define CLOCKS_PER_SEC HZ /* frequency at which times() counts */ #endif diff --git a/include/asm-ia64/pci.h b/include/asm-ia64/pci.h index a5071096e68e..7ec7f24575e1 100644 --- a/include/asm-ia64/pci.h +++ b/include/asm-ia64/pci.h @@ -21,7 +21,7 @@ #define PCIBIOS_MIN_MEM 0x10000000 void pcibios_config_init(void); -struct pci_bus * pcibios_scan_root(int seg, int bus); +struct pci_bus * pcibios_scan_root(int bus); extern int (*pci_config_read)(int seg, int bus, int dev, int fn, int reg, int len, u32 *value); extern int (*pci_config_write)(int seg, int bus, int dev, int fn, int reg, int len, u32 value); @@ -90,7 +90,7 @@ pcibios_penalize_isa_irq (int irq) /* Return the index of the PCI controller for device PDEV. */ #define pci_controller_num(PDEV) (0) -#define sg_dma_len(sg) ((sg)->length) +#define sg_dma_len(sg) ((sg)->dma_length) #define HAVE_PCI_MMAP extern int pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma, diff --git a/include/asm-ia64/perfmon.h b/include/asm-ia64/perfmon.h index 86aaa50ab459..2b95dc27e73e 100644 --- a/include/asm-ia64/perfmon.h +++ b/include/asm-ia64/perfmon.h @@ -172,9 +172,8 @@ extern int pfm_use_debug_registers(struct task_struct *); extern int pfm_release_debug_registers(struct task_struct *); extern int pfm_cleanup_smpl_buf(struct task_struct *); extern void pfm_syst_wide_update_task(struct task_struct *, int); -extern void pfm_ovfl_block_reset (void); - -extern int pfm_syst_wide; +extern void pfm_ovfl_block_reset(void); +extern void perfmon_init_percpu(void); #endif /* __KERNEL__ */ diff --git a/include/asm-ia64/pgalloc.h b/include/asm-ia64/pgalloc.h index c919520f9bcc..493406169337 100644 --- a/include/asm-ia64/pgalloc.h +++ b/include/asm-ia64/pgalloc.h @@ -15,9 +15,10 @@ #include <linux/config.h> +#include <linux/compiler.h> #include <linux/mm.h> +#include <linux/page-flags.h> #include <linux/threads.h> -#include <linux/compiler.h> #include <asm/mmu_context.h> #include <asm/processor.h> diff --git a/include/asm-ia64/processor.h b/include/asm-ia64/processor.h index a08560e55f44..51993a853ac5 100644 --- a/include/asm-ia64/processor.h +++ b/include/asm-ia64/processor.h @@ -223,6 +223,7 @@ struct thread_struct { __u64 map_base; /* base address for get_unmapped_area() */ __u64 task_size; /* limit for task size */ struct siginfo *siginfo; /* current siginfo struct for ptrace() */ + int last_fph_cpu; /* CPU that may hold the contents of f32-f127 */ #ifdef CONFIG_IA32_SUPPORT __u64 eflag; /* IA32 EFLAGS reg */ @@ -270,12 +271,8 @@ struct thread_struct { #define start_thread(regs,new_ip,new_sp) do { \ set_fs(USER_DS); \ - ia64_psr(regs)->dfh = 1; /* disable fph */ \ - ia64_psr(regs)->mfh = 0; /* clear mfh */ \ - ia64_psr(regs)->cpl = 3; /* set user mode */ \ - ia64_psr(regs)->ri = 0; /* clear return slot number */ \ - ia64_psr(regs)->is = 0; /* IA-64 instruction set */ \ - ia64_psr(regs)->sp = 1; /* enforce secure perfmon */ \ + regs->cr_ipsr = ((regs->cr_ipsr | (IA64_PSR_BITS_TO_SET | IA64_PSR_CPL | IA64_PSR_SP)) \ + & ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_RI | IA64_PSR_IS)); \ regs->cr_iip = new_ip; \ regs->ar_rsc = 0xf; /* eager mode, privilege level 3 */ \ regs->ar_rnat = 0; \ @@ -284,7 +281,7 @@ struct thread_struct { regs->loadrs = 0; \ regs->r8 = current->mm->dumpable; /* set "don't zap registers" flag */ \ regs->r12 = new_sp - 16; /* allocate 16 byte scratch area */ \ - if (!likely (current->mm->dumpable)) { \ + if (unlikely(!current->mm->dumpable)) { \ /* \ * Zap scratch regs to avoid leaking bits between processes with different \ * uid/privileges. \ @@ -393,8 +390,6 @@ ia64_set_kr (unsigned long regnum, unsigned long r) } } -#ifndef CONFIG_SMP - static inline struct task_struct * ia64_get_fpu_owner (void) { @@ -407,8 +402,6 @@ ia64_set_fpu_owner (struct task_struct *t) ia64_set_kr(IA64_KR_FPU_OWNER, (unsigned long) t); } -#endif /* !CONFIG_SMP */ - extern void __ia64_init_fpu (void); extern void __ia64_save_fpu (struct ia64_fpreg *fph); extern void __ia64_load_fpu (struct ia64_fpreg *fph); diff --git a/include/asm-ia64/rmap.h b/include/asm-ia64/rmap.h index 6738fe9e228f..179c565dd7d6 100644 --- a/include/asm-ia64/rmap.h +++ b/include/asm-ia64/rmap.h @@ -1,7 +1,7 @@ -#ifndef _IA64_RMAP_H -#define _IA64_RMAP_H +#ifndef _ASM_IA64_RMAP_H +#define _ASM_IA64_RMAP_H /* nothing to see, move along */ #include <asm-generic/rmap.h> -#endif +#endif /* _ASM_IA64_RMAP_H */ diff --git a/include/asm-ia64/scatterlist.h b/include/asm-ia64/scatterlist.h index 22da42c46412..de4a8b6eef90 100644 --- a/include/asm-ia64/scatterlist.h +++ b/include/asm-ia64/scatterlist.h @@ -7,12 +7,12 @@ */ struct scatterlist { - char *orig_address; /* for use by swiotlb */ - - /* These two are only valid if ADDRESS member of this struct is NULL. */ struct page *page; unsigned int offset; unsigned int length; /* buffer length */ + + dma_addr_t dma_address; + unsigned int dma_length; }; #define ISA_DMA_THRESHOLD (~0UL) diff --git a/include/asm-ia64/serial.h b/include/asm-ia64/serial.h index 5f8042925c64..2d123f4d2aeb 100644 --- a/include/asm-ia64/serial.h +++ b/include/asm-ia64/serial.h @@ -1,6 +1,6 @@ /* * include/asm-ia64/serial.h - * + * * Derived from the i386 version. */ @@ -35,7 +35,7 @@ #else #define RS_TABLE_SIZE #endif - + /* * The following define the access methods for the HUB6 card. All * access is through two ports for all 24 possible chips. The card is @@ -115,21 +115,8 @@ #define HUB6_SERIAL_PORT_DFNS #endif -#ifdef CONFIG_MCA -#define MCA_SERIAL_PORT_DFNS \ - { 0, BASE_BAUD, 0x3220, 3, STD_COM_FLAGS }, \ - { 0, BASE_BAUD, 0x3228, 3, STD_COM_FLAGS }, \ - { 0, BASE_BAUD, 0x4220, 3, STD_COM_FLAGS }, \ - { 0, BASE_BAUD, 0x4228, 3, STD_COM_FLAGS }, \ - { 0, BASE_BAUD, 0x5220, 3, STD_COM_FLAGS }, \ - { 0, BASE_BAUD, 0x5228, 3, STD_COM_FLAGS }, -#else -#define MCA_SERIAL_PORT_DFNS -#endif - #define SERIAL_PORT_DFNS \ STD_SERIAL_PORT_DEFNS \ EXTRA_SERIAL_PORT_DEFNS \ - HUB6_SERIAL_PORT_DFNS \ - MCA_SERIAL_PORT_DFNS + HUB6_SERIAL_PORT_DFNS diff --git a/include/asm-ia64/smp.h b/include/asm-ia64/smp.h index 76698ee6028f..0ae7768fd7de 100644 --- a/include/asm-ia64/smp.h +++ b/include/asm-ia64/smp.h @@ -17,6 +17,7 @@ #include <linux/threads.h> #include <linux/kernel.h> +#include <asm/bitops.h> #include <asm/io.h> #include <asm/param.h> #include <asm/processor.h> @@ -36,6 +37,7 @@ extern struct smp_boot_data { extern char no_int_routing __initdata; +extern unsigned long phys_cpu_present_map; extern volatile unsigned long cpu_online_map; extern unsigned long ipi_base_addr; extern unsigned char smp_int_redirect; @@ -45,23 +47,26 @@ extern volatile int ia64_cpu_to_sapicid[]; extern unsigned long ap_wakeup_vector; -#define cpu_online(cpu) (cpu_online_map & (1<<(cpu))) -extern inline unsigned int num_online_cpus(void) +#define cpu_possible(cpu) (phys_cpu_present_map & (1UL << (cpu))) +#define cpu_online(cpu) (cpu_online_map & (1UL << (cpu))) + +static inline unsigned int +num_online_cpus (void) { return hweight64(cpu_online_map); } -extern inline int any_online_cpu(unsigned int mask) +static inline int +any_online_cpu (unsigned int mask) { if (mask & cpu_online_map) return __ffs(mask & cpu_online_map); - return -1; } /* - * Function to map hard smp processor id to logical id. Slow, so - * don't use this in performance-critical code. + * Function to map hard smp processor id to logical id. Slow, so don't use this in + * performance-critical code. */ static inline int cpu_logical_id (int cpuid) @@ -120,11 +125,10 @@ hard_smp_processor_id (void) } /* Upping and downing of CPUs */ -extern int __cpu_disable(void); -extern void __cpu_die(unsigned int cpu); -extern int __cpu_up(unsigned int cpu); - -#define NO_PROC_ID 0xffffffff /* no processor magic marker */ +extern int __cpu_disable (void); +extern void __cpu_die (unsigned int cpu); +extern int __cpu_up (unsigned int cpu); +extern void __init smp_build_cpu_map(void); extern void __init init_smp_config (void); extern void smp_do_timer (struct pt_regs *regs); diff --git a/include/asm-ia64/smplock.h b/include/asm-ia64/smplock.h index d5b5222b344b..103185f86e30 100644 --- a/include/asm-ia64/smplock.h +++ b/include/asm-ia64/smplock.h @@ -14,11 +14,6 @@ extern spinlock_t kernel_flag; #ifdef CONFIG_SMP # define kernel_locked() spin_is_locked(&kernel_flag) -# define check_irq_holder(cpu) \ -do { \ - if (global_irq_holder == (cpu)) \ - BUG(); \ -} while (0) #else # define kernel_locked() (1) #endif @@ -26,12 +21,10 @@ do { \ /* * Release global kernel lock and global interrupt lock */ -#define release_kernel_lock(task, cpu) \ +#define release_kernel_lock(task) \ do { \ - if (unlikely(task->lock_depth >= 0)) { \ + if (unlikely(task->lock_depth >= 0)) \ spin_unlock(&kernel_flag); \ - check_irq_holder(cpu); \ - } \ } while (0) /* diff --git a/include/asm-ia64/softirq.h b/include/asm-ia64/softirq.h index 1ab796be81fe..a50f2d240aac 100644 --- a/include/asm-ia64/softirq.h +++ b/include/asm-ia64/softirq.h @@ -4,22 +4,23 @@ #include <linux/compiler.h> /* - * Copyright (C) 1998-2001 Hewlett-Packard Co + * Copyright (C) 1998-2002 Hewlett-Packard Co * David Mosberger-Tang <davidm@hpl.hp.com> */ +#include <linux/compiler.h> +#include <linux/preempt.h> + #include <asm/hardirq.h> -#define __local_bh_enable() do { barrier(); really_local_bh_count()--; } while (0) +#define __local_bh_enable() do { barrier(); preempt_count() -= SOFTIRQ_OFFSET; } while (0) -#define local_bh_disable() do { really_local_bh_count()++; barrier(); } while (0) -#define local_bh_enable() \ -do { \ - __local_bh_enable(); \ - if (unlikely(local_softirq_pending()) && really_local_bh_count() == 0) \ - do_softirq(); \ +#define local_bh_disable() do { preempt_count() += SOFTIRQ_OFFSET; barrier(); } while (0) +#define local_bh_enable() \ +do { \ + __local_bh_enable(); \ + if (unlikely(!in_interrupt() && local_softirq_pending())) \ + do_softirq(); \ + preempt_check_resched(); \ } while (0) - -#define in_softirq() (really_local_bh_count() != 0) - #endif /* _ASM_IA64_SOFTIRQ_H */ diff --git a/include/asm-ia64/suspend.h b/include/asm-ia64/suspend.h new file mode 100644 index 000000000000..e69de29bb2d1 --- /dev/null +++ b/include/asm-ia64/suspend.h diff --git a/include/asm-ia64/system.h b/include/asm-ia64/system.h index 9cc45e562991..4e55d3f333e2 100644 --- a/include/asm-ia64/system.h +++ b/include/asm-ia64/system.h @@ -13,9 +13,11 @@ * Copyright (C) 1999 Don Dugger <don.dugger@intel.com> */ #include <linux/config.h> +#include <linux/percpu.h> #include <asm/kregs.h> #include <asm/page.h> +#include <asm/pal.h> #define KERNEL_START (PAGE_OFFSET + 68*1024*1024) @@ -102,6 +104,8 @@ ia64_insn_group_barrier (void) #define set_mb(var, value) do { (var) = (value); mb(); } while (0) #define set_wmb(var, value) do { (var) = (value); mb(); } while (0) +#define safe_halt() ia64_pal_halt(1) /* PAL_HALT */ + /* * The group barrier in front of the rsm & ssm are necessary to ensure * that none of the previous instructions in the same group are @@ -168,27 +172,7 @@ do { \ #endif /* !CONFIG_IA64_DEBUG_IRQ */ #define local_irq_enable() __asm__ __volatile__ (";; ssm psr.i;; srlz.d" ::: "memory") - -#define local_irq_disable() local_irq_disable () #define local_save_flags(flags) __asm__ __volatile__ ("mov %0=psr" : "=r" (flags) :: "memory") -#define local_irq_save(flags) local_irq_save(flags) -#define save_and_cli(flags) local_irq_save(flags) - -#ifdef CONFIG_SMP - extern void __global_cli (void); - extern void __global_sti (void); - extern unsigned long __global_save_flags (void); - extern void __global_restore_flags (unsigned long); -# define cli() __global_cli() -# define sti() __global_sti() -# define save_flags(flags) ((flags) = __global_save_flags()) -# define restore_flags(flags) __global_restore_flags(flags) -#else /* !CONFIG_SMP */ -# define cli() local_irq_disable() -# define sti() local_irq_enable() -# define save_flags(flags) local_save_flags(flags) -# define restore_flags(flags) local_irq_restore(flags) -#endif /* !CONFIG_SMP */ /* * Force an unresolved reference if someone tries to use @@ -376,7 +360,7 @@ static inline void ia32_load_state(struct task_struct *t __attribute__((unused)) * newly created thread returns directly to * ia64_ret_from_syscall_clear_r8. */ -extern void ia64_switch_to (void *next_task); +extern struct task_struct *ia64_switch_to (void *next_task); struct task_struct; @@ -384,19 +368,20 @@ extern void ia64_save_extra (struct task_struct *task); extern void ia64_load_extra (struct task_struct *task); #if defined(CONFIG_SMP) && defined(CONFIG_PERFMON) -# define PERFMON_IS_SYSWIDE() (local_cpu_data->pfm_syst_wide != 0) + extern int __per_cpu_data pfm_syst_wide; +# define PERFMON_IS_SYSWIDE() (this_cpu(pfm_syst_wide) != 0) #else # define PERFMON_IS_SYSWIDE() (0) #endif -#define __switch_to(prev,next) do { \ +#define __switch_to(prev,next,last) do { \ if (((prev)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID)) \ || IS_IA32_PROCESS(ia64_task_regs(prev)) || PERFMON_IS_SYSWIDE()) \ ia64_save_extra(prev); \ if (((next)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID)) \ || IS_IA32_PROCESS(ia64_task_regs(next)) || PERFMON_IS_SYSWIDE()) \ ia64_load_extra(next); \ - ia64_switch_to((next)); \ + (last) = ia64_switch_to((next)); \ } while (0) #ifdef CONFIG_SMP @@ -411,19 +396,28 @@ extern void ia64_load_extra (struct task_struct *task); * task->thread.fph, avoiding the complication of having to fetch * the latest fph state from another CPU. */ -# define switch_to(prev,next) do { \ - if (ia64_psr(ia64_task_regs(prev))->mfh) { \ - ia64_psr(ia64_task_regs(prev))->mfh = 0; \ - (prev)->thread.flags |= IA64_THREAD_FPH_VALID; \ - __ia64_save_fpu((prev)->thread.fph); \ - } \ - ia64_psr(ia64_task_regs(prev))->dfh = 1; \ - __switch_to(prev,next); \ +# define switch_to(prev,next,last) do { \ + if (ia64_psr(ia64_task_regs(prev))->mfh) { \ + ia64_psr(ia64_task_regs(prev))->mfh = 0; \ + (prev)->thread.flags |= IA64_THREAD_FPH_VALID; \ + __ia64_save_fpu((prev)->thread.fph); \ + (prev)->thread.last_fph_cpu = smp_processor_id(); \ + } \ + if ((next)->thread.flags & IA64_THREAD_FPH_VALID) { \ + if (((next)->thread.last_fph_cpu == smp_processor_id()) \ + && (ia64_get_fpu_owner() == next)) \ + { \ + ia64_psr(ia64_task_regs(next))->dfh = 0; \ + ia64_psr(ia64_task_regs(next))->mfh = 0; \ + } else \ + ia64_psr(ia64_task_regs(next))->dfh = 1; \ + } \ + __switch_to(prev,next,last); \ } while (0) #else -# define switch_to(prev,next) do { \ +# define switch_to(prev,next,last) do { \ ia64_psr(ia64_task_regs(next))->dfh = (ia64_get_fpu_owner() != (next)); \ - __switch_to(prev,next); \ + __switch_to(prev,next,last); \ } while (0) #endif diff --git a/include/asm-ia64/tlb.h b/include/asm-ia64/tlb.h index d9d97ef65969..ea335eb1b553 100644 --- a/include/asm-ia64/tlb.h +++ b/include/asm-ia64/tlb.h @@ -1,7 +1,202 @@ -/* XXX fix me! */ +#ifndef _ASM_IA64_TLB_H +#define _ASM_IA64_TLB_H +/* + * Copyright (C) 2002 Hewlett-Packard Co + * David Mosberger-Tang <davidm@hpl.hp.com> + * + * This file was derived from asm-generic/tlb.h. + */ +/* + * Removing a translation from a page table (including TLB-shootdown) is a four-step + * procedure: + * + * (1) Flush (virtual) caches --- ensures virtual memory is coherent with kernel memory + * (this is a no-op on ia64). + * (2) Clear the relevant portions of the page-table + * (3) Flush the TLBs --- ensures that stale content is gone from CPU TLBs + * (4) Release the pages that were freed up in step (2). + * + * Note that the ordering of these steps is crucial to avoid races on MP machines. + * + * The Linux kernel defines several platform-specific hooks for TLB-shootdown. When + * unmapping a portion of the virtual address space, these hooks are called according to + * the following template: + * + * tlb <- tlb_gather_mmu(mm, full_mm_flush); // start unmap for address space MM + * { + * for each vma that needs a shootdown do { + * tlb_start_vma(tlb, vma); + * for each page-table-entry PTE that needs to be removed do { + * tlb_remove_tlb_entry(tlb, pte, address); + * if (pte refers to a normal page) { + * tlb_remove_page(tlb, page); + * } + * } + * tlb_end_vma(tlb, vma); + * } + * } + * tlb_finish_mmu(tlb, start, end); // finish unmap for address space MM + */ +#include <linux/config.h> +#include <linux/mm.h> + +#include <asm/processor.h> +#include <asm/tlbflush.h> + +#ifdef CONFIG_SMP +# define FREE_PTE_NR 2048 +# define tlb_fast_mode(tlb) ((tlb)->nr == ~0U) +#else +# define FREE_PTE_NR 0 +# define tlb_fast_mode(tlb) (1) +#endif + +typedef struct { + struct mm_struct *mm; + unsigned int nr; /* == ~0U => fast mode */ + unsigned int fullmm; /* non-zero means full mm flush */ + unsigned long freed; /* number of pages freed */ + unsigned long start_addr; + unsigned long end_addr; + struct page *pages[FREE_PTE_NR]; +} mmu_gather_t; + +/* Users of the generic TLB shootdown code must declare this storage space. */ +extern mmu_gather_t mmu_gathers[NR_CPUS]; + +/* + * Flush the TLB for address range START to END and, if not in fast mode, release the + * freed pages that where gathered up to this point. + */ +static inline void +ia64_tlb_flush_mmu (mmu_gather_t *tlb, unsigned long start, unsigned long end) +{ + unsigned int nr; + + if (tlb->fullmm) { + /* + * Tearing down the entire address space. This happens both as a result + * of exit() and execve(). The latter case necessitates the call to + * flush_tlb_mm() here. + */ + flush_tlb_mm(tlb->mm); + } else if (unlikely (end - start >= 1024*1024*1024*1024UL + || REGION_NUMBER(start) != REGION_NUMBER(end - 1))) + { + /* + * If we flush more than a tera-byte or across regions, we're probably + * better off just flushing the entire TLB(s). This should be very rare + * and is not worth optimizing for. + */ + flush_tlb_all(); + } else { + /* + * XXX fix me: flush_tlb_range() should take an mm pointer instead of a + * vma pointer. + */ + struct vm_area_struct vma; + + vma.vm_mm = tlb->mm; + /* flush the address range from the tlb: */ + flush_tlb_range(&vma, start, end); + /* now flush the virt. page-table area mapping the address range: */ + flush_tlb_range(&vma, ia64_thash(start), ia64_thash(end)); + } + + /* lastly, release the freed pages */ + nr = tlb->nr; + if (!tlb_fast_mode(tlb)) { + unsigned long i; + tlb->nr = 0; + tlb->start_addr = ~0UL; + for (i = 0; i < nr; ++i) + free_page_and_swap_cache(tlb->pages[i]); + } +} + +/* + * Return a pointer to an initialized mmu_gather_t. + */ +static inline mmu_gather_t * +tlb_gather_mmu (struct mm_struct *mm, unsigned int full_mm_flush) +{ + mmu_gather_t *tlb = &mmu_gathers[smp_processor_id()]; + + tlb->mm = mm; + /* + * Use fast mode if only 1 CPU is online. + * + * It would be tempting to turn on fast-mode for full_mm_flush as well. But this + * doesn't work because of speculative accesses and software prefetching: the page + * table of "mm" may (and usually is) the currently active page table and even + * though the kernel won't do any user-space accesses during the TLB shoot down, a + * compiler might use speculation or lfetch.fault on what happens to be a valid + * user-space address. This in turn could trigger a TLB miss fault (or a VHPT + * walk) and re-insert a TLB entry we just removed. Slow mode avoids such + * problems. (We could make fast-mode work by switching the current task to a + * different "mm" during the shootdown.) --davidm 08/02/2002 + */ + tlb->nr = (num_online_cpus() == 1) ? ~0U : 0; + tlb->fullmm = full_mm_flush; + tlb->freed = 0; + tlb->start_addr = ~0UL; + return tlb; +} + +/* + * Called at the end of the shootdown operation to free up any resources that were + * collected. The page table lock is still held at this point. + */ +static inline void +tlb_finish_mmu (mmu_gather_t *tlb, unsigned long start, unsigned long end) +{ + unsigned long freed = tlb->freed; + struct mm_struct *mm = tlb->mm; + unsigned long rss = mm->rss; + + if (rss < freed) + freed = rss; + mm->rss = rss - freed; + /* + * Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and + * tlb->end_addr. + */ + ia64_tlb_flush_mmu(tlb, start, end); + + /* keep the page table cache within bounds */ + check_pgt_cache(); +} + +/* + * Remove TLB entry for PTE mapped at virtual address ADDRESS. This is called for any + * PTE, not just those pointing to (normal) physical memory. + */ +static inline void +tlb_remove_tlb_entry (mmu_gather_t *tlb, pte_t *ptep, unsigned long address) +{ + if (tlb->start_addr == ~0UL) + tlb->start_addr = address; + tlb->end_addr = address + PAGE_SIZE; +} + +/* + * Logically, this routine frees PAGE. On MP machines, the actual freeing of the page + * must be delayed until after the TLB has been flushed (see comments at the beginning of + * this file). + */ +static inline void +tlb_remove_page (mmu_gather_t *tlb, struct page *page) +{ + if (tlb_fast_mode(tlb)) { + free_page_and_swap_cache(page); + return; + } + tlb->pages[tlb->nr++] = page; + if (tlb->nr >= FREE_PTE_NR) + ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr); +} + #define tlb_start_vma(tlb, vma) do { } while (0) #define tlb_end_vma(tlb, vma) do { } while (0) -#define tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) -#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) -#include <asm-generic/tlb.h> +#endif /* _ASM_IA64_TLB_H */ diff --git a/include/asm-ia64/tlbflush.h b/include/asm-ia64/tlbflush.h index 4e2404ef4e36..a6ab64b738a8 100644 --- a/include/asm-ia64/tlbflush.h +++ b/include/asm-ia64/tlbflush.h @@ -60,6 +60,8 @@ flush_tlb_page (struct vm_area_struct *vma, unsigned long addr) #else if (vma->vm_mm == current->active_mm) asm volatile ("ptc.l %0,%1" :: "r"(addr), "r"(PAGE_SHIFT << 2) : "memory"); + else + vma->vm_mm->context = 0; #endif } @@ -70,12 +72,10 @@ flush_tlb_page (struct vm_area_struct *vma, unsigned long addr) static inline void flush_tlb_pgtables (struct mm_struct *mm, unsigned long start, unsigned long end) { - struct vm_area_struct vma; - - if (REGION_NUMBER(start) != REGION_NUMBER(end)) - printk("flush_tlb_pgtables: can't flush across regions!!\n"); - vma.vm_mm = mm; - flush_tlb_range(&vma, ia64_thash(start), ia64_thash(end)); + /* + * Deprecated. The virtual page table is now flushed via the normal gather/flush + * interface (see tlb.h). + */ } #define flush_tlb_kernel_range(start, end) flush_tlb_all() /* XXX fix me */ diff --git a/include/asm-ia64/unistd.h b/include/asm-ia64/unistd.h index 51b4034527de..970db22ffa1b 100644 --- a/include/asm-ia64/unistd.h +++ b/include/asm-ia64/unistd.h @@ -223,6 +223,10 @@ #define __NR_sched_setaffinity 1231 #define __NR_sched_getaffinity 1232 #define __NR_security 1233 +#define __NR_get_large_pages 1234 +#define __NR_free_large_pages 1235 +#define __NR_share_large_pages 1236 +#define __NR_unshare_large_pages 1237 #if !defined(__ASSEMBLY__) && !defined(ASSEMBLER) |
