summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2002-07-28 07:39:12 +1000
committerPaul Mackerras <paulus@samba.org>2002-07-28 07:39:12 +1000
commit0f4d7d658f3a51c745017ac4ccd6d5c682de4d0f (patch)
tree8094b2787c56e0ac4b24b61b827e12a2da3eb627 /include
parente9cdd788140681db68b1c541b857dd60a08ee03d (diff)
parent9a692aefc8d67c2300f7949b90dda93db7301f0b (diff)
Merge samba.org:/home/paulus/kernel/linux-2.5
into samba.org:/home/paulus/kernel/for-linus-ppc
Diffstat (limited to 'include')
-rw-r--r--include/asm-ppc/hardirq.h134
-rw-r--r--include/asm-ppc/hw_irq.h20
-rw-r--r--include/asm-ppc/pgtable.h17
-rw-r--r--include/asm-ppc/rwsem.h16
-rw-r--r--include/asm-ppc/smplock.h2
-rw-r--r--include/asm-ppc/softirq.h26
-rw-r--r--include/asm-ppc/system.h21
-rw-r--r--include/asm-ppc/thread_info.h20
-rw-r--r--include/asm-ppc/tlb.h59
-rw-r--r--include/asm-ppc/tlbflush.h5
10 files changed, 185 insertions, 135 deletions
diff --git a/include/asm-ppc/hardirq.h b/include/asm-ppc/hardirq.h
index bbe5bfb1cc54..6d365b537879 100644
--- a/include/asm-ppc/hardirq.h
+++ b/include/asm-ppc/hardirq.h
@@ -15,8 +15,6 @@
*/
typedef struct {
unsigned long __softirq_pending; /* set_bit is used on this */
- unsigned int __local_irq_count;
- unsigned int __local_bh_count;
unsigned int __syscall_count;
struct task_struct * __ksoftirqd_task;
unsigned int __last_jiffy_stamp;
@@ -25,91 +23,87 @@ typedef struct {
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
#define last_jiffy_stamp(cpu) __IRQ_STAT((cpu), __last_jiffy_stamp)
+
/*
- * Are we in an interrupt context? Either doing bottom half
- * or hardware interrupt processing?
+ * We put the hardirq and softirq counter into the preemption
+ * counter. The bitmask has the following meaning:
+ *
+ * - bits 0-7 are the preemption count (max preemption depth: 256)
+ * - bits 8-15 are the softirq count (max # of softirqs: 256)
+ * - bits 16-23 are the hardirq count (max # of hardirqs: 256)
+ *
+ * - ( bit 26 is the PREEMPT_ACTIVE flag. )
+ *
+ * PREEMPT_MASK: 0x000000ff
+ * HARDIRQ_MASK: 0x0000ff00
+ * SOFTIRQ_MASK: 0x00ff0000
*/
-#define in_interrupt() ({ int __cpu = smp_processor_id(); \
- (local_irq_count(__cpu) + local_bh_count(__cpu) != 0); })
-
-#define in_irq() (local_irq_count(smp_processor_id()) != 0)
-#ifndef CONFIG_SMP
+#define PREEMPT_BITS 8
+#define SOFTIRQ_BITS 8
+#define HARDIRQ_BITS 8
-#define hardirq_trylock(cpu) (local_irq_count(cpu) == 0)
-#define hardirq_endlock(cpu) do { } while (0)
+#define PREEMPT_SHIFT 0
+#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
+#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
-#define hardirq_enter(cpu) do { preempt_disable(); local_irq_count(cpu)++; } while (0)
-#define hardirq_exit(cpu) do { local_irq_count(cpu)--; preempt_enable(); } while (0)
+#define __MASK(x) ((1UL << (x))-1)
-#define synchronize_irq() do { } while (0)
-#define release_irqlock(cpu) do { } while (0)
+#define PREEMPT_MASK (__MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
+#define HARDIRQ_MASK (__MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
+#define SOFTIRQ_MASK (__MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
-#else /* CONFIG_SMP */
+#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
+#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
+#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK))
-#include <asm/atomic.h>
+#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
+#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
+#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
-extern unsigned char global_irq_holder;
-extern unsigned volatile long global_irq_lock;
+/*
+ * The hardirq mask has to be large enough to have
+ * space for potentially all IRQ sources in the system
+ * nesting on a single CPU:
+ */
+#if (1 << HARDIRQ_BITS) < NR_IRQS
+# error HARDIRQ_BITS is too low!
+#endif
-static inline int irqs_running (void)
-{
- int i;
+/*
+ * Are we doing bottom half or hardware interrupt processing?
+ * Are we in a softirq context? Interrupt context?
+ */
+#define in_irq() (hardirq_count())
+#define in_softirq() (softirq_count())
+#define in_interrupt() (irq_count())
- for (i = 0; i < NR_CPUS; i++)
- if (local_irq_count(i))
- return 1;
- return 0;
-}
-static inline void release_irqlock(int cpu)
-{
- /* if we didn't own the irq lock, just ignore.. */
- if (global_irq_holder == (unsigned char) cpu) {
- global_irq_holder = NO_PROC_ID;
- clear_bit(0,&global_irq_lock);
- }
-}
+#define hardirq_trylock() (!in_interrupt())
+#define hardirq_endlock() do { } while (0)
-static inline void hardirq_enter(int cpu)
-{
- unsigned int loops = 10000000;
+#define irq_enter() (preempt_count() += HARDIRQ_OFFSET)
- preempt_disable();
- ++local_irq_count(cpu);
- while (test_bit(0,&global_irq_lock)) {
- if (cpu == global_irq_holder) {
- printk("uh oh, interrupt while we hold global irq lock! (CPU %d)\n", cpu);
-#ifdef CONFIG_XMON
- xmon(0);
+#if CONFIG_PREEMPT
+# define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
+#else
+# define IRQ_EXIT_OFFSET HARDIRQ_OFFSET
#endif
- break;
- }
- if (loops-- == 0) {
- printk("do_IRQ waiting for irq lock (holder=%d)\n", global_irq_holder);
-#ifdef CONFIG_XMON
- xmon(0);
-#endif
- }
- }
-}
-
-static inline void hardirq_exit(int cpu)
-{
- --local_irq_count(cpu);
- preempt_enable();
-}
-
-static inline int hardirq_trylock(int cpu)
-{
- return !test_bit(0,&global_irq_lock);
-}
-
-#define hardirq_endlock(cpu) do { } while (0)
-
-extern void synchronize_irq(void);
+#define irq_exit() \
+do { \
+ preempt_count() -= IRQ_EXIT_OFFSET; \
+ if (!in_interrupt() && softirq_pending(smp_processor_id())) \
+ do_softirq(); \
+ preempt_enable_no_resched(); \
+} while (0)
+#ifndef CONFIG_SMP
+# define synchronize_irq(irq) barrier()
+#else
+ extern void synchronize_irq(unsigned int irq);
#endif /* CONFIG_SMP */
+extern void show_stack(unsigned long *sp);
+
#endif /* __ASM_HARDIRQ_H */
#endif /* __KERNEL__ */
diff --git a/include/asm-ppc/hw_irq.h b/include/asm-ppc/hw_irq.h
index e36d90289236..492de2c4c536 100644
--- a/include/asm-ppc/hw_irq.h
+++ b/include/asm-ppc/hw_irq.h
@@ -8,21 +8,18 @@
#ifndef _PPC_HW_IRQ_H
#define _PPC_HW_IRQ_H
-extern unsigned long timer_interrupt_intercept;
-extern unsigned long do_IRQ_intercept;
-extern int timer_interrupt(struct pt_regs *);
+extern void timer_interrupt(struct pt_regs *);
extern void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq);
#define INLINE_IRQS
-#ifdef INLINE_IRQS
-
#define mfmsr() ({unsigned int rval; \
asm volatile("mfmsr %0" : "=r" (rval)); rval;})
#define mtmsr(v) asm volatile("mtmsr %0" : : "r" (v))
-#define local_save_flags(flags) ((flags) = mfmsr())
-#define local_irq_restore(flags) mtmsr(flags)
+#define irqs_disabled() ((mfmsr() & MSR_EE) == 0)
+
+#ifdef INLINE_IRQS
static inline void local_irq_disable(void)
{
@@ -40,7 +37,7 @@ static inline void local_irq_enable(void)
mtmsr(msr | MSR_EE);
}
-static inline void __do_save_and_cli(unsigned long *flags)
+static inline void local_irq_save_ptr(unsigned long *flags)
{
unsigned long msr;
msr = mfmsr();
@@ -49,7 +46,9 @@ static inline void __do_save_and_cli(unsigned long *flags)
__asm__ __volatile__("": : :"memory");
}
-#define local_irq_save(flags) __do_save_and_cli(&flags)
+#define local_save_flags(flags) ((flags) = mfmsr())
+#define local_irq_save(flags) local_irq_save_ptr(&flags)
+#define local_irq_restore(flags) mtmsr(flags)
#else
@@ -57,9 +56,8 @@ extern void local_irq_enable(void);
extern void local_irq_disable(void);
extern void local_irq_restore(unsigned long);
extern void local_save_flags_ptr(unsigned long *);
-extern unsigned long local_irq_enable_end, local_irq_disable_end, local_irq_restore_end, local_save_flags_ptr_end;
-#define local_save_flags(flags) local_save_flags_ptr((unsigned long *)&flags)
+#define local_save_flags(flags) local_save_flags_ptr(&flags)
#define local_irq_save(flags) ({local_save_flags(flags);local_irq_disable();})
#endif
diff --git a/include/asm-ppc/pgtable.h b/include/asm-ppc/pgtable.h
index c5bfc1332e5f..a5ee2c62decc 100644
--- a/include/asm-ppc/pgtable.h
+++ b/include/asm-ppc/pgtable.h
@@ -246,6 +246,23 @@ extern unsigned long ioremap_bot, ioremap_base;
#define _PAGE_KERNEL _PAGE_BASE | _PAGE_WRENABLE | _PAGE_SHARED | _PAGE_HWEXEC
#define _PAGE_IO _PAGE_KERNEL | _PAGE_NO_CACHE | _PAGE_GUARDED
+#define _PAGE_RAM _PAGE_KERNEL
+
+#if defined(CONFIG_KGDB) || defined(CONFIG_XMON)
+/* We want the debuggers to be able to set breakpoints anywhere, so
+ * don't write protect the kernel text */
+#define _PAGE_RAM_TEXT _PAGE_RAM
+#else
+#ifdef CONFIG_PPC_STD_MMU
+/* On standard PPC MMU, no user access implies kernel read/write
+ * access, so to write-protect the kernel text we must turn on user
+ * access */
+#define _PAGE_RAM_TEXT (_PAGE_RAM & ~_PAGE_WRENABLE) | _PAGE_USER
+#else
+#define _PAGE_RAM_TEXT (_PAGE_RAM & ~_PAGE_WRENABLE)
+#endif
+#endif
+
#define PAGE_NONE __pgprot(_PAGE_BASE)
#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
diff --git a/include/asm-ppc/rwsem.h b/include/asm-ppc/rwsem.h
index b87a52945fdc..faf9928b682a 100644
--- a/include/asm-ppc/rwsem.h
+++ b/include/asm-ppc/rwsem.h
@@ -1,5 +1,5 @@
/*
- * BK Id: SCCS/s.rwsem.h 1.6 05/17/01 18:14:25 cort
+ * BK Id: %F% %I% %G% %U% %#%
*/
/*
* include/asm-ppc/rwsem.h: R/W semaphores for PPC using the stuff
@@ -55,6 +55,7 @@ struct rw_semaphore {
extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
static inline void init_rwsem(struct rw_semaphore *sem)
{
@@ -125,6 +126,19 @@ static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
}
/*
+ * downgrade write lock to read lock
+ */
+static inline void __downgrade_write(struct rw_semaphore *sem)
+{
+ int tmp;
+
+ smp_wmb();
+ tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count));
+ if (tmp < 0)
+ rwsem_downgrade_wake(sem);
+}
+
+/*
* implement exchange and add functionality
*/
static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
diff --git a/include/asm-ppc/smplock.h b/include/asm-ppc/smplock.h
index 469d3db3f651..e9a3e6487a4f 100644
--- a/include/asm-ppc/smplock.h
+++ b/include/asm-ppc/smplock.h
@@ -18,7 +18,7 @@ extern spinlock_t kernel_flag;
#ifdef CONFIG_SMP
#define kernel_locked() spin_is_locked(&kernel_flag)
#elif defined(CONFIG_PREEMPT)
-#define kernel_locked() preempt_get_count()
+#define kernel_locked() preempt_count()
#endif
/*
diff --git a/include/asm-ppc/softirq.h b/include/asm-ppc/softirq.h
index d78ec6f37d2e..3a95c80a1327 100644
--- a/include/asm-ppc/softirq.h
+++ b/include/asm-ppc/softirq.h
@@ -5,34 +5,30 @@
#ifndef __ASM_SOFTIRQ_H
#define __ASM_SOFTIRQ_H
-#include <asm/atomic.h>
+#include <linux/preempt.h>
#include <asm/hardirq.h>
#define local_bh_disable() \
do { \
- preempt_disable(); \
- local_bh_count(smp_processor_id())++; \
+ preempt_count() += SOFTIRQ_OFFSET; \
barrier(); \
} while (0)
#define __local_bh_enable() \
do { \
barrier(); \
- local_bh_count(smp_processor_id())--; \
- preempt_enable(); \
+ preempt_count() -= SOFTIRQ_OFFSET; \
} while (0)
-#define local_bh_enable() \
-do { \
- barrier(); \
- if (!--local_bh_count(smp_processor_id()) \
- && softirq_pending(smp_processor_id())) { \
- do_softirq(); \
- } \
- preempt_enable(); \
+#define local_bh_enable() \
+do { \
+ __local_bh_enable(); \
+ if (unlikely(!in_interrupt() \
+ && softirq_pending(smp_processor_id()))) \
+ do_softirq(); \
+ if (preempt_count() == 0) \
+ preempt_check_resched(); \
} while (0)
-#define in_softirq() (local_bh_count(smp_processor_id()) != 0)
-
#endif /* __ASM_SOFTIRQ_H */
#endif /* __KERNEL__ */
diff --git a/include/asm-ppc/system.h b/include/asm-ppc/system.h
index 00fafc7226c1..275e03e10300 100644
--- a/include/asm-ppc/system.h
+++ b/include/asm-ppc/system.h
@@ -96,27 +96,6 @@ extern unsigned int rtas_data;
struct pt_regs;
extern void dump_regs(struct pt_regs *);
-#ifndef CONFIG_SMP
-
-#define cli() local_irq_disable()
-#define sti() local_irq_enable()
-#define save_flags(flags) local_save_flags(flags)
-#define restore_flags(flags) local_irq_restore(flags)
-#define save_and_cli(flags) local_irq_save(flags)
-
-#else /* CONFIG_SMP */
-
-extern void __global_cli(void);
-extern void __global_sti(void);
-extern unsigned long __global_save_flags(void);
-extern void __global_restore_flags(unsigned long);
-#define cli() __global_cli()
-#define sti() __global_sti()
-#define save_flags(x) ((x)=__global_save_flags())
-#define restore_flags(x) __global_restore_flags(x)
-
-#endif /* !CONFIG_SMP */
-
static __inline__ unsigned long
xchg_u32(volatile void *p, unsigned long val)
{
diff --git a/include/asm-ppc/thread_info.h b/include/asm-ppc/thread_info.h
index 90003b433cbe..a89f707e903c 100644
--- a/include/asm-ppc/thread_info.h
+++ b/include/asm-ppc/thread_info.h
@@ -22,25 +22,25 @@ struct thread_info {
struct exec_domain *exec_domain; /* execution domain */
unsigned long flags; /* low level flags */
int cpu; /* cpu we're on */
- int preempt_count; /* not used at present */
- int softirq_count;
- int hardirq_count;
+ int preempt_count;
};
-/*
- * macros/functions for gaining access to the thread information structure
- */
#define INIT_THREAD_INFO(tsk) \
{ \
- task: &tsk, \
- exec_domain: &default_exec_domain, \
- flags: 0, \
- cpu: 0, \
+ .task = &tsk, \
+ .exec_domain = &default_exec_domain, \
+ .flags = 0, \
+ .cpu = 0, \
+ .preempt_count = 1 \
}
#define init_thread_info (init_thread_union.thread_info)
#define init_stack (init_thread_union.stack)
+/*
+ * macros/functions for gaining access to the thread information structure
+ */
+
/* how to get the thread information struct from C */
static inline struct thread_info *current_thread_info(void)
{
diff --git a/include/asm-ppc/tlb.h b/include/asm-ppc/tlb.h
index ba4ea097a73e..fd7cd0b460cc 100644
--- a/include/asm-ppc/tlb.h
+++ b/include/asm-ppc/tlb.h
@@ -1,4 +1,61 @@
/*
- * BK Id: SCCS/s.tlb.h 1.5 05/17/01 18:14:26 cort
+ * TLB shootdown specifics for PPC
+ *
+ * Copyright (C) 2002 Paul Mackerras, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
*/
+#ifndef _PPC_TLB_H
+#define _PPC_TLB_H
+
+#include <linux/config.h>
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
+#include <asm/page.h>
+#include <asm/mmu.h>
+
+#ifdef CONFIG_PPC_STD_MMU
+/* Classic PPC with hash-table based MMU... */
+
+struct free_pte_ctx;
+extern void tlb_flush(struct free_pte_ctx *tlb);
+
+/* Get the generic bits... */
#include <asm-generic/tlb.h>
+
+/* Nothing needed here in fact... */
+#define tlb_start_vma(tlb, vma) do { } while (0)
+#define tlb_end_vma(tlb, vma) do { } while (0)
+
+extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep,
+ unsigned long address);
+
+static inline void tlb_remove_tlb_entry(mmu_gather_t *tlb, pte_t *ptep,
+ unsigned long address)
+{
+ if (pte_val(*ptep) & _PAGE_HASHPTE)
+ flush_hash_entry(tlb->mm, ptep, address);
+}
+
+#else
+/* Embedded PPC with software-loaded TLB, very simple... */
+
+struct flush_tlb_arch { };
+
+#define tlb_init_arch(tlb, full_flush) do { } while (0)
+#define tlb_finish_arch(tlb) do { } while (0)
+#define tlb_start_vma(tlb, vma) do { } while (0)
+#define tlb_end_vma(tlb, vma) do { } while (0)
+#define tlb_remove_tlb_entry(tlb, pte, address) do { } while (0)
+#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
+
+/* Get the generic bits... */
+#include <asm-generic/tlb.h>
+
+#endif /* CONFIG_PPC_STD_MMU */
+
+#endif /* __PPC_TLB_H */
diff --git a/include/asm-ppc/tlbflush.h b/include/asm-ppc/tlbflush.h
index af4295e46861..9c06b88c45d9 100644
--- a/include/asm-ppc/tlbflush.h
+++ b/include/asm-ppc/tlbflush.h
@@ -22,8 +22,6 @@ extern void _tlbia(void);
#if defined(CONFIG_4xx)
-static inline void flush_tlb_all(void)
- { _tlbia(); }
static inline void flush_tlb_mm(struct mm_struct *mm)
{ _tlbia(); }
static inline void flush_tlb_page(struct vm_area_struct *vma,
@@ -40,8 +38,6 @@ static inline void flush_tlb_kernel_range(unsigned long start,
#elif defined(CONFIG_8xx)
#define __tlbia() asm volatile ("tlbia; sync" : : : "memory")
-static inline void flush_tlb_all(void)
- { __tlbia(); }
static inline void flush_tlb_mm(struct mm_struct *mm)
{ __tlbia(); }
static inline void flush_tlb_page(struct vm_area_struct *vma,
@@ -58,7 +54,6 @@ static inline void flush_tlb_kernel_range(unsigned long start,
#else /* 6xx, 7xx, 7xxx cpus */
struct mm_struct;
struct vm_area_struct;
-extern void flush_tlb_all(void);
extern void flush_tlb_mm(struct mm_struct *mm);
extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,