summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorGeert Uytterhoeven <geert@linux-m68k.org>2002-11-02 23:52:48 -0800
committerLinus Torvalds <torvalds@penguin.transmeta.com>2002-11-02 23:52:48 -0800
commit3e37014c1b2ea48103c7df8a29397c37ed678661 (patch)
tree5f24e97e5d06c19e943c18f997221855714c9524 /include
parent277963a40a5dc844b191337419df5e7c7b6285ef (diff)
[PATCH] M68k irq updates
M68k irq updates (most from Roman Zippel): - Update m68k to new irq API in 2.5.29 - synchronize_irq() takes a parameter now - Remove obsolete definitions - Avoid too much inline assembly in __xchg - Add irqs_disabled(), which appeared in 2.5.34 - Add missing implementation for in_atomic() (introduced in 2.5.35)
Diffstat (limited to 'include')
-rw-r--r--include/asm-m68k/hardirq.h81
-rw-r--r--include/asm-m68k/softirq.h19
-rw-r--r--include/asm-m68k/system.h74
3 files changed, 117 insertions, 57 deletions
diff --git a/include/asm-m68k/hardirq.h b/include/asm-m68k/hardirq.h
index 6b2248372f78..5a5db2a7718e 100644
--- a/include/asm-m68k/hardirq.h
+++ b/include/asm-m68k/hardirq.h
@@ -7,24 +7,87 @@
/* entry.S is sensitive to the offsets of these fields */
typedef struct {
unsigned int __softirq_pending;
- unsigned int __local_irq_count;
- unsigned int __local_bh_count;
unsigned int __syscall_count;
struct task_struct * __ksoftirqd_task;
} ____cacheline_aligned irq_cpustat_t;
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
-#define in_interrupt() (local_irq_count(smp_processor_id()) + local_bh_count(smp_processor_id()) != 0)
+/*
+ * We put the hardirq and softirq counter into the preemption
+ * counter. The bitmask has the following meaning:
+ *
+ * - bits 0-7 are the preemption count (max preemption depth: 256)
+ * - bits 8-15 are the softirq count (max # of softirqs: 256)
+ * - bits 16-23 are the hardirq count (max # of hardirqs: 256)
+ *
+ * - ( bit 26 is the PREEMPT_ACTIVE flag. )
+ *
+ * PREEMPT_MASK: 0x000000ff
+ * HARDIRQ_MASK: 0x0000ff00
+ * SOFTIRQ_MASK: 0x00ff0000
+ */
-#define in_irq() (local_irq_count(smp_processor_id()) != 0)
+#define PREEMPT_BITS 8
+#define SOFTIRQ_BITS 8
+#define HARDIRQ_BITS 8
-#define hardirq_trylock(cpu) (local_irq_count(cpu) == 0)
-#define hardirq_endlock(cpu) do { } while (0)
+#define PREEMPT_SHIFT 0
+#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
+#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
-#define irq_enter(cpu) (local_irq_count(cpu)++)
-#define irq_exit(cpu) (local_irq_count(cpu)--)
+#define __MASK(x) ((1UL << (x))-1)
-#define synchronize_irq() barrier()
+#define PREEMPT_MASK (__MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
+#define HARDIRQ_MASK (__MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
+#define SOFTIRQ_MASK (__MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
+
+#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
+#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
+#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK))
+
+#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
+#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
+#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
+
+/*
+ * The hardirq mask has to be large enough to have
+ * space for potentially all IRQ sources in the system
+ * nesting on a single CPU:
+ */
+#if (1 << HARDIRQ_BITS) < NR_IRQS
+# error HARDIRQ_BITS is too low!
+#endif
+
+/*
+ * Are we doing bottom half or hardware interrupt processing?
+ * Are we in a softirq context? Interrupt context?
+ */
+#define in_irq() (hardirq_count())
+#define in_softirq() (softirq_count())
+#define in_interrupt() (irq_count())
+
+
+#define hardirq_trylock() (!in_interrupt())
+#define hardirq_endlock() do { } while (0)
+
+#define irq_enter() (preempt_count() += HARDIRQ_OFFSET)
+
+#if CONFIG_PREEMPT
+# define in_atomic() (preempt_count() != kernel_locked())
+# define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
+#else
+# define in_atomic() (preempt_count() != 0)
+# define IRQ_EXIT_OFFSET HARDIRQ_OFFSET
+#endif
+#define irq_exit() \
+do { \
+ preempt_count() -= IRQ_EXIT_OFFSET; \
+ if (!in_interrupt() && softirq_pending(smp_processor_id())) \
+ do_softirq(); \
+ preempt_enable_no_resched(); \
+} while (0)
+
+#define synchronize_irq(irq) barrier()
#endif
diff --git a/include/asm-m68k/softirq.h b/include/asm-m68k/softirq.h
index c35c800e3c0d..0c2f0e2044c6 100644
--- a/include/asm-m68k/softirq.h
+++ b/include/asm-m68k/softirq.h
@@ -6,14 +6,19 @@
*/
#include <asm/atomic.h>
+#include <asm/hardirq.h>
-#define cpu_bh_disable(cpu) do { local_bh_count(cpu)++; barrier(); } while (0)
-#define cpu_bh_enable(cpu) do { barrier(); local_bh_count(cpu)--; } while (0)
+#define local_bh_disable() \
+ do { preempt_count() += SOFTIRQ_OFFSET; barrier(); } while (0)
+#define __local_bh_enable() \
+ do { barrier(); preempt_count() -= SOFTIRQ_OFFSET; } while (0)
-#define local_bh_disable() cpu_bh_disable(smp_processor_id())
-#define local_bh_enable() cpu_bh_enable(smp_processor_id())
-#define __local_bh_enable() local_bh_enable()
-
-#define in_softirq() (local_bh_count(smp_processor_id()) != 0)
+#define local_bh_enable() \
+do { \
+ __local_bh_enable(); \
+ if (unlikely(!in_interrupt() && softirq_pending(smp_processor_id()))) \
+ do_softirq(); \
+ preempt_check_resched(); \
+} while (0)
#endif
diff --git a/include/asm-m68k/system.h b/include/asm-m68k/system.h
index 974db3b084b7..8fe94c6017fd 100644
--- a/include/asm-m68k/system.h
+++ b/include/asm-m68k/system.h
@@ -7,11 +7,6 @@
#include <asm/segment.h>
#include <asm/entry.h>
-#define prepare_arch_schedule(prev) do { } while(0)
-#define finish_arch_schedule(prev) do { } while(0)
-#define prepare_arch_switch(rq) do { } while(0)
-#define finish_arch_switch(rq) spin_unlock_irq(&(rq)->lock)
-
/*
* switch_to(n) should switch tasks to task ptr, first checking that
* ptr isn't the current task, in which case it does nothing. This
@@ -52,24 +47,25 @@ asmlinkage void resume(void);
#define local_irq_enable() asm volatile ("andiw %0,%%sr": : "i" (ALLOWINT) : "memory")
#else
#include <asm/hardirq.h>
-#define local_irq_enable() ({ \
- if (MACH_IS_Q40 || !local_irq_count(smp_processor_id())) \
- asm volatile ("andiw %0,%%sr": : "i" (ALLOWINT) : "memory"); \
+#define local_irq_enable() ({ \
+ if (MACH_IS_Q40 || !hardirq_count()) \
+ asm volatile ("andiw %0,%%sr": : "i" (ALLOWINT) : "memory"); \
})
#endif
#define local_irq_disable() asm volatile ("oriw #0x0700,%%sr": : : "memory")
#define local_save_flags(x) asm volatile ("movew %%sr,%0":"=d" (x) : : "memory")
#define local_irq_restore(x) asm volatile ("movew %0,%%sr": :"d" (x) : "memory")
+static inline int irqs_disabled(void)
+{
+ unsigned long flags;
+ local_save_flags(flags);
+ return flags & ~ALLOWINT;
+}
+
/* For spinlocks etc */
#define local_irq_save(x) ({ local_save_flags(x); local_irq_disable(); })
-#define cli() local_irq_disable()
-#define sti() local_irq_enable()
-#define save_flags(x) local_save_flags(x)
-#define restore_flags(x) local_irq_restore(x)
-#define save_and_cli(flags) do { save_flags(flags); cli(); } while(0)
-
/*
* Force strict CPU ordering.
* Not really required on m68k...
@@ -95,33 +91,29 @@ struct __xchg_dummy { unsigned long a[100]; };
#ifndef CONFIG_RMW_INSNS
static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
{
- unsigned long tmp, flags;
-
- save_flags(flags);
- cli();
-
- switch (size) {
- case 1:
- __asm__ __volatile__
- ("moveb %2,%0\n\t"
- "moveb %1,%2"
- : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory");
- break;
- case 2:
- __asm__ __volatile__
- ("movew %2,%0\n\t"
- "movew %1,%2"
- : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory");
- break;
- case 4:
- __asm__ __volatile__
- ("movel %2,%0\n\t"
- "movel %1,%2"
- : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory");
- break;
- }
- restore_flags(flags);
- return tmp;
+ unsigned long flags, tmp;
+
+ local_irq_save(flags);
+
+ switch (size) {
+ case 1:
+ tmp = *(u8 *)ptr;
+ *(u8 *)ptr = x;
+ break;
+ case 2:
+ tmp = *(u16 *)ptr;
+ *(u16 *)ptr = x;
+ break;
+ case 4:
+ tmp = *(u32 *)ptr;
+ *(u32 *)ptr = x;
+ break;
+ default:
+ BUG();
+ }
+
+ local_irq_restore(flags);
+ return tmp;
}
#else
static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)