summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorPete Zaitcev <zaitcev@redhat.com>2002-08-11 03:56:43 -0700
committerDavid S. Miller <davem@nuts.ninka.net>2002-08-11 03:56:43 -0700
commit15c5cc6e25884d9fd528c93ddee8c05bccdebdb1 (patch)
tree0ea42cd81296b9016af33ccbf48d6cc56d4758dd /include
parent80e4e144134505f07319b4f6f1eef275f369912c (diff)
SPARC: More work to get sparc32 working in 2.5.x
- page-size PTE directory with 16-word pmd_t as suggested by RMK and Riel - support for 2.5.x softirq infrastructure - other miscellanea
Diffstat (limited to 'include')
-rw-r--r--include/asm-sparc/hardirq.h109
-rw-r--r--include/asm-sparc/page.h14
-rw-r--r--include/asm-sparc/pgalloc.h12
-rw-r--r--include/asm-sparc/pgtable.h39
-rw-r--r--include/asm-sparc/pgtsrmmu.h4
-rw-r--r--include/asm-sparc/softirq.h18
-rw-r--r--include/asm-sparc/system.h4
7 files changed, 122 insertions, 78 deletions
diff --git a/include/asm-sparc/hardirq.h b/include/asm-sparc/hardirq.h
index d82222c892fc..a80212dc3a2a 100644
--- a/include/asm-sparc/hardirq.h
+++ b/include/asm-sparc/hardirq.h
@@ -12,47 +12,108 @@
#include <linux/brlock.h>
#include <linux/spinlock.h>
-/* entry.S is sensitive to the offsets of these fields */
+/* entry.S is sensitive to the offsets of these fields */ /* XXX P3 Is it? */
typedef struct {
unsigned int __softirq_pending;
unsigned int __unused_1;
#ifndef CONFIG_SMP
- unsigned int __local_irq_count;
+ unsigned int WAS__local_irq_count;
#else
unsigned int __unused_on_SMP; /* DaveM says use brlock for SMP irq. KAO */
#endif
- unsigned int __local_bh_count;
+ unsigned int WAS__local_bh_count;
unsigned int __syscall_count;
struct task_struct * __ksoftirqd_task;
} ____cacheline_aligned irq_cpustat_t;
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
-/* Note that local_irq_count() is replaced by sparc64 specific version for SMP */
-#ifndef CONFIG_SMP
-#define irq_enter(cpu, irq) ((void)(irq), local_irq_count(cpu)++)
-#define irq_exit(cpu, irq) ((void)(irq), local_irq_count(cpu)--)
-#else
-#undef local_irq_count
-#define local_irq_count(cpu) (__brlock_array[cpu][BR_GLOBALIRQ_LOCK])
-#define irq_enter(cpu, irq) br_read_lock(BR_GLOBALIRQ_LOCK)
-#define irq_exit(cpu, irq) br_read_unlock(BR_GLOBALIRQ_LOCK)
+/*
+ * We put the hardirq and softirq counter into the preemption
+ * counter. The bitmask has the following meaning:
+ *
+ * - bits 0-7 are the preemption count (max preemption depth: 256)
+ * - bits 8-15 are the softirq count (max # of softirqs: 256)
+ * - bits 16-23 are the hardirq count (max # of hardirqs: 256)
+ *
+ * - ( bit 26 is the PREEMPT_ACTIVE flag. )
+ *
+ * PREEMPT_MASK: 0x000000ff
+ * HARDIRQ_MASK: 0x0000ff00
+ * SOFTIRQ_MASK: 0x00ff0000
+ */
+
+#define PREEMPT_BITS 8
+#define SOFTIRQ_BITS 8
+#define HARDIRQ_BITS 8
+
+#define PREEMPT_SHIFT 0
+#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
+#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
+
+#define __MASK(x) ((1UL << (x))-1)
+
+#define PREEMPT_MASK (__MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
+#define HARDIRQ_MASK (__MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
+#define SOFTIRQ_MASK (__MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
+
+#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
+#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
+#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK))
+
+#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
+#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
+#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
+
+/*
+ * The hardirq mask has to be large enough to have
+ * space for potentially all IRQ sources in the system
+ * nesting on a single CPU:
+ */
+#if (1 << HARDIRQ_BITS) < NR_IRQS
+# error HARDIRQ_BITS is too low!
#endif
/*
- * Are we in an interrupt context? Either doing bottom half
- * or hardware interrupt processing?
+ * Are we doing bottom half or hardware interrupt processing?
+ * Are we in a softirq context? Interrupt context?
*/
-#define in_interrupt() ((local_irq_count(smp_processor_id()) + \
- local_bh_count(smp_processor_id())) != 0)
+#define in_irq() (hardirq_count())
+#define in_softirq() (softirq_count())
+#define in_interrupt() (irq_count())
+
-/* This tests only the local processors hw IRQ context disposition. */
-#define in_irq() (local_irq_count(smp_processor_id()) != 0)
+#define hardirq_trylock() (!in_interrupt())
+#define hardirq_endlock() do { } while (0)
#ifndef CONFIG_SMP
+#define irq_enter() (preempt_count() += HARDIRQ_OFFSET)
-#define hardirq_trylock(cpu) ((void)(cpu), local_irq_count(smp_processor_id()) == 0)
-#define hardirq_endlock(cpu) do { (void)(cpu); } while(0)
+#if CONFIG_PREEMPT
+# define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
+#else
+# define IRQ_EXIT_OFFSET HARDIRQ_OFFSET
+#endif
+#define irq_exit() \
+do { \
+ preempt_count() -= IRQ_EXIT_OFFSET; \
+ if (!in_interrupt() && softirq_pending(smp_processor_id())) \
+ do_softirq(); \
+ preempt_enable_no_resched(); \
+} while (0)
+
+#else
+
+/* Note that local_irq_count() is replaced by sparc64 specific version for SMP */
+
+/* XXX This is likely to be broken by the above preempt-based IRQs */
+#define irq_enter() br_read_lock(BR_GLOBALIRQ_LOCK)
+#undef local_irq_count
+#define local_irq_count(cpu) (__brlock_array[cpu][BR_GLOBALIRQ_LOCK])
+#define irq_exit() br_read_unlock(BR_GLOBALIRQ_LOCK)
+#endif
+
+#ifndef CONFIG_SMP
#define synchronize_irq() barrier()
@@ -79,17 +140,19 @@ static inline void release_irqlock(int cpu)
}
}
+#if 0
static inline int hardirq_trylock(int cpu)
{
spinlock_t *lock = &__br_write_locks[BR_GLOBALIRQ_LOCK].lock;
return (!local_irq_count(cpu) && !spin_is_locked(lock));
}
+#endif
-#define hardirq_endlock(cpu) do { (void)(cpu); } while (0)
-
-extern void synchronize_irq(void);
+extern void synchronize_irq(unsigned int irq);
#endif /* CONFIG_SMP */
+// extern void show_stack(unsigned long * esp);
+
#endif /* __SPARC_HARDIRQ_H */
diff --git a/include/asm-sparc/page.h b/include/asm-sparc/page.h
index fc5d61ae9f0b..a08100e59c69 100644
--- a/include/asm-sparc/page.h
+++ b/include/asm-sparc/page.h
@@ -24,7 +24,7 @@
#ifdef __KERNEL__
-#include <asm/head.h> /* for KERNBASE */
+/* #include <asm/head.h> XXX */ /* for KERNBASE */
#include <asm/btfixup.h>
#ifndef __ASSEMBLY__
@@ -89,7 +89,7 @@ extern struct cache_palias *sparc_aliases;
*/
typedef struct { unsigned long pte; } pte_t;
typedef struct { unsigned long iopte; } iopte_t;
-typedef struct { unsigned long pmd; } pmd_t;
+typedef struct { unsigned long pmdv[16]; } pmd_t;
typedef struct { unsigned long pgd; } pgd_t;
typedef struct { unsigned long ctxd; } ctxd_t;
typedef struct { unsigned long pgprot; } pgprot_t;
@@ -97,7 +97,7 @@ typedef struct { unsigned long iopgprot; } iopgprot_t;
#define pte_val(x) ((x).pte)
#define iopte_val(x) ((x).iopte)
-#define pmd_val(x) ((x).pmd)
+#define pmd_val(x) ((x).pmdv[0])
#define pgd_val(x) ((x).pgd)
#define ctxd_val(x) ((x).ctxd)
#define pgprot_val(x) ((x).pgprot)
@@ -105,7 +105,7 @@ typedef struct { unsigned long iopgprot; } iopgprot_t;
#define __pte(x) ((pte_t) { (x) } )
#define __iopte(x) ((iopte_t) { (x) } )
-#define __pmd(x) ((pmd_t) { (x) } )
+/* #define __pmd(x) ((pmd_t) { (x) } ) */ /* XXX procedure with loop */
#define __pgd(x) ((pgd_t) { (x) } )
#define __ctxd(x) ((ctxd_t) { (x) } )
#define __pgprot(x) ((pgprot_t) { (x) } )
@@ -117,7 +117,7 @@ typedef struct { unsigned long iopgprot; } iopgprot_t;
*/
typedef unsigned long pte_t;
typedef unsigned long iopte_t;
-typedef unsigned long pmd_t;
+typedef struct { unsigned long pmdv[16]; } pmd_t;
typedef unsigned long pgd_t;
typedef unsigned long ctxd_t;
typedef unsigned long pgprot_t;
@@ -125,7 +125,7 @@ typedef unsigned long iopgprot_t;
#define pte_val(x) (x)
#define iopte_val(x) (x)
-#define pmd_val(x) (x)
+#define pmd_val(x) ((x).pmdv[0])
#define pgd_val(x) (x)
#define ctxd_val(x) (x)
#define pgprot_val(x) (x)
@@ -133,7 +133,7 @@ typedef unsigned long iopgprot_t;
#define __pte(x) (x)
#define __iopte(x) (x)
-#define __pmd(x) (x)
+/* #define __pmd(x) (x) */ /* XXX later */
#define __pgd(x) (x)
#define __ctxd(x) (x)
#define __pgprot(x) (x)
diff --git a/include/asm-sparc/pgalloc.h b/include/asm-sparc/pgalloc.h
index 4a8b93251dcf..09c9decddbc4 100644
--- a/include/asm-sparc/pgalloc.h
+++ b/include/asm-sparc/pgalloc.h
@@ -40,13 +40,8 @@ BTFIXUPDEF_CALL(void, pgd_set, pgd_t *, pmd_t *)
#define pgd_set(pgdp,pmdp) BTFIXUP_CALL(pgd_set)(pgdp,pmdp)
#define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
-static __inline__ pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
-{
- return 0;
-}
-
-BTFIXUPDEF_CALL(pmd_t *, pmd_alloc_one_fast, struct mm_struct *, unsigned long)
-#define pmd_alloc_one_fast(mm, address) BTFIXUP_CALL(pmd_alloc_one_fast)(mm, address)
+BTFIXUPDEF_CALL(pmd_t *, pmd_alloc_one, struct mm_struct *, unsigned long)
+#define pmd_alloc_one(mm, address) BTFIXUP_CALL(pmd_alloc_one)(mm, address)
BTFIXUPDEF_CALL(void, free_pmd_fast, pmd_t *)
#define free_pmd_fast(pmd) BTFIXUP_CALL(free_pmd_fast)(pmd)
@@ -65,8 +60,7 @@ BTFIXUPDEF_CALL(pte_t *, pte_alloc_one_kernel, struct mm_struct *, unsigned long
#define pte_alloc_one_kernel(mm, addr) BTFIXUP_CALL(pte_alloc_one_kernel)(mm, addr)
BTFIXUPDEF_CALL(void, free_pte_fast, pte_t *)
-#define free_pte_fast(pte) BTFIXUP_CALL(free_pte_fast)(pte)
-#define pte_free_kernel(pte) free_pte_fast(pte)
+#define pte_free_kernel(pte) BTFIXUP_CALL(free_pte_fast)(pte)
BTFIXUPDEF_CALL(void, pte_free, struct page *)
#define pte_free(pte) BTFIXUP_CALL(pte_free)(pte)
diff --git a/include/asm-sparc/pgtable.h b/include/asm-sparc/pgtable.h
index 17f6ed0984f5..527611357a6d 100644
--- a/include/asm-sparc/pgtable.h
+++ b/include/asm-sparc/pgtable.h
@@ -11,9 +11,7 @@
#include <linux/config.h>
#include <linux/spinlock.h>
-/* XXX This creates many nasty warnings. */
-/* #include <linux/highmem.h> */ /* kmap_atomic in pte_offset_map */
-#include <asm/asi.h>
+/* #include <asm/asi.h> */ /* doesn't seem like being used XXX */
#ifdef CONFIG_SUN4
#include <asm/pgtsun4.h>
#else
@@ -32,10 +30,6 @@ struct vm_area_struct;
extern void load_mmu(void);
extern unsigned long calc_highpages(void);
-
-BTFIXUPDEF_CALL(void, quick_kernel_fault, unsigned long)
-
-#define quick_kernel_fault(addr) BTFIXUP_CALL(quick_kernel_fault)(addr)
/* Routines for data transfer buffers. */
BTFIXUPDEF_CALL(char *, mmu_lockarea, char *, unsigned long)
@@ -189,14 +183,8 @@ extern unsigned long empty_zero_page;
#define BAD_PAGE __bad_page()
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
-/* number of bits that fit into a memory pointer */
-#define BITS_PER_PTR (8*sizeof(unsigned long))
-
-/* to align the pointer to a pointer address */
-#define PTR_MASK (~(sizeof(void*)-1))
-
-#define SIZEOF_PTR_LOG2 2
-
+/*
+ */
BTFIXUPDEF_CALL_CONST(struct page *, pmd_page, pmd_t)
BTFIXUPDEF_CALL_CONST(unsigned long, pgd_page, pgd_t)
@@ -340,20 +328,15 @@ BTFIXUPDEF_CALL(pmd_t *, pmd_offset, pgd_t *, unsigned long)
BTFIXUPDEF_CALL(pte_t *, pte_offset_kernel, pmd_t *, unsigned long)
#define pte_offset_kernel(dir,addr) BTFIXUP_CALL(pte_offset_kernel)(dir,addr)
-/* __pte_offset is not BTFIXUP-ed, but PTRS_PER_PTE is, so it's ok. */
-#define __pte_offset(address) \
- (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-#if 0 /* XXX Should we expose pmd_page_kernel? */
-#define pte_offset_kernel(dir, addr) \
- ((pte_t *) pmd_page_kernel(*(dir)) + __pte_offset(addr))
-#endif
-#define pte_offset_map(dir, addr) \
- ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE0) + __pte_offset(addr))
-#define pte_offset_map_nested(dir, addr) \
- ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE1) + __pte_offset(addr))
+/*
+ * This shortcut works on sun4m (and sun4d) because the nocache area is static,
+ * and sun4c is guaranteed to have no highmem anyway.
+ */
+#define pte_offset_map(d, a) pte_offset_kernel(d,a)
+#define pte_offset_map_nested(d, a) pte_offset_kernel(d,a)
-#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
-#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
+#define pte_unmap(pte) do{}while(0)
+#define pte_unmap_nested(pte) do{}while(0)
/* The permissions for pgprot_val to make a page mapped on the obio space */
extern unsigned int pg_iobits;
diff --git a/include/asm-sparc/pgtsrmmu.h b/include/asm-sparc/pgtsrmmu.h
index 895a6fd07b67..ad924985d6af 100644
--- a/include/asm-sparc/pgtsrmmu.h
+++ b/include/asm-sparc/pgtsrmmu.h
@@ -10,11 +10,11 @@
#include <asm/page.h>
#include <asm/thread_info.h> /* TI_UWINMASK for WINDOW_FLUSH */
-/* PMD_SHIFT determines the size of the area a second-level page table can map */
+/* PMD_SHIFT determines the size of the area a second-level page table entry can map */
#define SRMMU_PMD_SHIFT 18
#define SRMMU_PMD_SIZE (1UL << SRMMU_PMD_SHIFT)
#define SRMMU_PMD_MASK (~(SRMMU_PMD_SIZE-1))
-#define SRMMU_PMD_ALIGN(addr) (((addr)+SRMMU_PMD_SIZE-1)&SRMMU_PMD_MASK)
+/* #define SRMMU_PMD_ALIGN(addr) (((addr)+SRMMU_PMD_SIZE-1)&SRMMU_PMD_MASK) */
/* PGDIR_SHIFT determines what a third-level page table entry can map */
#define SRMMU_PGDIR_SHIFT 24
diff --git a/include/asm-sparc/softirq.h b/include/asm-sparc/softirq.h
index dd486a2d3932..e9f7d10e97d0 100644
--- a/include/asm-sparc/softirq.h
+++ b/include/asm-sparc/softirq.h
@@ -7,21 +7,25 @@
#ifndef __SPARC_SOFTIRQ_H
#define __SPARC_SOFTIRQ_H
-#include <linux/threads.h> /* For NR_CPUS */
+// #include <linux/threads.h> /* For NR_CPUS */
-#include <asm/atomic.h>
+// #include <asm/atomic.h>
#include <asm/smp.h>
#include <asm/hardirq.h>
-#define local_bh_disable() (local_bh_count(smp_processor_id())++)
-#define __local_bh_enable() (local_bh_count(smp_processor_id())--)
+#define local_bh_disable() \
+ do { preempt_count() += SOFTIRQ_OFFSET; barrier(); } while (0)
+#define __local_bh_enable() \
+ do { barrier(); preempt_count() -= SOFTIRQ_OFFSET; } while (0)
+
#define local_bh_enable() \
-do { if (!--local_bh_count(smp_processor_id()) && \
+do { \
+ __local_bh_enable(); \
+ if (!in_interrupt() && \
softirq_pending(smp_processor_id())) { \
do_softirq(); \
- local_irq_enable(); \
} \
+ preempt_check_resched(); \
} while (0)
-#define in_softirq() (local_bh_count(smp_processor_id()) != 0)
#endif /* __SPARC_SOFTIRQ_H */
diff --git a/include/asm-sparc/system.h b/include/asm-sparc/system.h
index fa904f4b5d3a..b9599127fad1 100644
--- a/include/asm-sparc/system.h
+++ b/include/asm-sparc/system.h
@@ -104,7 +104,7 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
* XXX prepare_arch_switch() is much smarter than this in sparc64, are we sure?
* XXX Cosider if doing it the flush_user_windows way is faster (by uwinmask).
*/
-#define prepare_arch_switch(rq) do { \
+#define prepare_arch_switch(rq, next) do { \
__asm__ __volatile__( \
".globl\tflush_patch_switch\nflush_patch_switch:\n\t" \
"save %sp, -0x40, %sp; save %sp, -0x40, %sp; save %sp, -0x40, %sp\n\t" \
@@ -112,7 +112,7 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
"save %sp, -0x40, %sp\n\t" \
"restore; restore; restore; restore; restore; restore; restore"); \
} while(0)
-#define finish_arch_switch(rq) do{ }while(0)
+#define finish_arch_switch(rq, next) do{ }while(0)
/* Much care has gone into this code, do not touch it.
*