summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@penguin.transmeta.com>2002-10-03 21:57:30 -0700
committerLinus Torvalds <torvalds@penguin.transmeta.com>2002-10-03 21:57:30 -0700
commitb3819ec5b145e04795833505e61eb764b508eee6 (patch)
treea403c800689f297edf670fb46eb38881337c36fb /include
parent7ac4f06b2f6626ab135faa9ef61624d47a4f6718 (diff)
parent5cc974f558b45f35422eb53f1f8f56808fa69114 (diff)
Merge s390 update into current tree
Diffstat (limited to 'include')
-rw-r--r--include/asm-s390/bitops.h10
-rw-r--r--include/asm-s390/checksum.h40
-rw-r--r--include/asm-s390/dasd.h4
-rw-r--r--include/asm-s390/debug.h6
-rw-r--r--include/asm-s390/hardirq.h111
-rw-r--r--include/asm-s390/irq.h89
-rw-r--r--include/asm-s390/kmap_types.h21
-rw-r--r--include/asm-s390/lowcore.h48
-rw-r--r--include/asm-s390/param.h10
-rw-r--r--include/asm-s390/pgalloc.h2
-rw-r--r--include/asm-s390/pgtable.h2
-rw-r--r--include/asm-s390/processor.h22
-rw-r--r--include/asm-s390/ptrace.h35
-rw-r--r--include/asm-s390/rwsem.h68
-rw-r--r--include/asm-s390/s390io.h33
-rw-r--r--include/asm-s390/smp.h17
-rw-r--r--include/asm-s390/softirq.h31
-rw-r--r--include/asm-s390/spinlock.h2
-rw-r--r--include/asm-s390/system.h294
-rw-r--r--include/asm-s390/thread_info.h6
-rw-r--r--include/asm-s390/tlbflush.h3
-rw-r--r--include/asm-s390/unistd.h10
-rw-r--r--include/asm-s390x/bitops.h21
-rw-r--r--include/asm-s390x/checksum.h38
-rw-r--r--include/asm-s390x/dasd.h4
-rw-r--r--include/asm-s390x/debug.h6
-rw-r--r--include/asm-s390x/hardirq.h108
-rw-r--r--include/asm-s390x/irq.h89
-rw-r--r--include/asm-s390x/kmap_types.h21
-rw-r--r--include/asm-s390x/lowcore.h47
-rw-r--r--include/asm-s390x/param.h13
-rw-r--r--include/asm-s390x/pgalloc.h2
-rw-r--r--include/asm-s390x/pgtable.h2
-rw-r--r--include/asm-s390x/processor.h22
-rw-r--r--include/asm-s390x/ptrace.h33
-rw-r--r--include/asm-s390x/rwsem.h68
-rw-r--r--include/asm-s390x/s390io.h33
-rw-r--r--include/asm-s390x/setup.h9
-rw-r--r--include/asm-s390x/smp.h17
-rw-r--r--include/asm-s390x/softirq.h31
-rw-r--r--include/asm-s390x/spinlock.h41
-rw-r--r--include/asm-s390x/system.h306
-rw-r--r--include/asm-s390x/thread_info.h6
-rw-r--r--include/asm-s390x/tlbflush.h3
-rw-r--r--include/asm-s390x/unistd.h10
45 files changed, 1056 insertions, 738 deletions
diff --git a/include/asm-s390/bitops.h b/include/asm-s390/bitops.h
index 29f287dd9f87..b848a9dd86af 100644
--- a/include/asm-s390/bitops.h
+++ b/include/asm-s390/bitops.h
@@ -59,8 +59,8 @@ static inline void set_bit_cs(int nr, volatile void *ptr)
addr = (unsigned long) ptr;
#if ALIGN_CS == 1
- addr ^= addr & 3; /* align address to 4 */
nr += (addr & 3) << 3; /* add alignment to bit number */
+ addr ^= addr & 3; /* align address to 4 */
#endif
addr += (nr ^ (nr & 31)) >> 3; /* calculate address for CS */
mask = 1UL << (nr & 31); /* make OR mask */
@@ -84,8 +84,8 @@ static inline void clear_bit_cs(int nr, volatile void *ptr)
addr = (unsigned long) ptr;
#if ALIGN_CS == 1
- addr ^= addr & 3; /* align address to 4 */
nr += (addr & 3) << 3; /* add alignment to bit number */
+ addr ^= addr & 3; /* align address to 4 */
#endif
addr += (nr ^ (nr & 31)) >> 3; /* calculate address for CS */
mask = ~(1UL << (nr & 31)); /* make AND mask */
@@ -109,8 +109,8 @@ static inline void change_bit_cs(int nr, volatile void *ptr)
addr = (unsigned long) ptr;
#if ALIGN_CS == 1
- addr ^= addr & 3; /* align address to 4 */
nr += (addr & 3) << 3; /* add alignment to bit number */
+ addr ^= addr & 3; /* align address to 4 */
#endif
addr += (nr ^ (nr & 31)) >> 3; /* calculate address for CS */
mask = 1UL << (nr & 31); /* make XOR mask */
@@ -160,8 +160,8 @@ static inline int test_and_clear_bit_cs(int nr, volatile void *ptr)
addr = (unsigned long) ptr;
#if ALIGN_CS == 1
- addr ^= addr & 3; /* align address to 4 */
nr += (addr & 3) << 3; /* add alignment to bit number */
+ addr ^= addr & 3; /* align address to 4 */
#endif
addr += (nr ^ (nr & 31)) >> 3; /* calculate address for CS */
mask = ~(1UL << (nr & 31)); /* make AND mask */
@@ -186,8 +186,8 @@ static inline int test_and_change_bit_cs(int nr, volatile void *ptr)
addr = (unsigned long) ptr;
#if ALIGN_CS == 1
- addr ^= addr & 3; /* align address to 4 */
nr += (addr & 3) << 3; /* add alignment to bit number */
+ addr ^= addr & 3; /* align address to 4 */
#endif
addr += (nr ^ (nr & 31)) >> 3; /* calculate address for CS */
mask = 1UL << (nr & 31); /* make XOR mask */
diff --git a/include/asm-s390/checksum.h b/include/asm-s390/checksum.h
index f2621064c8dc..fb53233f0399 100644
--- a/include/asm-s390/checksum.h
+++ b/include/asm-s390/checksum.h
@@ -27,13 +27,27 @@
*
* it's best to have buff aligned on a 32-bit boundary
*/
-unsigned int
-csum_partial(const unsigned char * buff, int len, unsigned int sum);
+static inline unsigned int
+csum_partial(const unsigned char * buff, int len, unsigned int sum)
+{
+ register_pair rp;
+ /*
+ * Experiments with ethernet and slip connections show that buf
+ * is aligned on either a 2-byte or 4-byte boundary.
+ */
+ rp.subreg.even = (unsigned long) buff;
+ rp.subreg.odd = (unsigned long) len;
+ __asm__ __volatile__ (
+ "0: cksm %0,%1\n" /* do checksum on longs */
+ " jo 0b\n"
+ : "+&d" (sum), "+&a" (rp) : : "cc" );
+ return sum;
+}
/*
* csum_partial as an inline function
*/
-extern inline unsigned int
+static inline unsigned int
csum_partial_inline(const unsigned char * buff, int len, unsigned int sum)
{
register_pair rp;
@@ -55,7 +69,7 @@ csum_partial_inline(const unsigned char * buff, int len, unsigned int sum)
* better 64-bit) boundary
*/
-extern inline unsigned int
+static inline unsigned int
csum_partial_copy(const char *src, char *dst, int len,unsigned int sum)
{
memcpy(dst,src,len);
@@ -71,7 +85,7 @@ csum_partial_copy(const char *src, char *dst, int len,unsigned int sum)
* Copy from userspace and compute checksum. If we catch an exception
* then zero the rest of the buffer.
*/
-extern inline unsigned int
+static inline unsigned int
csum_partial_copy_from_user (const char *src, char *dst,
int len, unsigned int sum,
int *err_ptr)
@@ -88,7 +102,7 @@ csum_partial_copy_from_user (const char *src, char *dst,
}
-extern inline unsigned int
+static inline unsigned int
csum_partial_copy_nocheck (const char *src, char *dst, int len, unsigned int sum)
{
memcpy(dst,src,len);
@@ -98,10 +112,7 @@ csum_partial_copy_nocheck (const char *src, char *dst, int len, unsigned int sum
/*
* Fold a partial checksum without adding pseudo headers
*/
-#if 1
-unsigned short csum_fold(unsigned int sum);
-#else
-extern inline unsigned short
+static inline unsigned short
csum_fold(unsigned int sum)
{
register_pair rp;
@@ -116,14 +127,13 @@ csum_fold(unsigned int sum)
: "+&d" (sum), "=d" (rp) : : "cc" );
return ((unsigned short) ~sum);
}
-#endif
/*
* This is a version of ip_compute_csum() optimized for IP headers,
* which always checksum on 4 octet boundaries.
*
*/
-extern inline unsigned short
+static inline unsigned short
ip_fast_csum(unsigned char *iph, unsigned int ihl)
{
register_pair rp;
@@ -143,7 +153,7 @@ ip_fast_csum(unsigned char *iph, unsigned int ihl)
* computes the checksum of the TCP/UDP pseudo-header
* returns a 32-bit checksum
*/
-extern inline unsigned int
+static inline unsigned int
csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr,
unsigned short len, unsigned short proto,
unsigned int sum)
@@ -176,7 +186,7 @@ csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr,
* returns a 16-bit checksum, already complemented
*/
-extern inline unsigned short int
+static inline unsigned short int
csum_tcpudp_magic(unsigned long saddr, unsigned long daddr,
unsigned short len, unsigned short proto,
unsigned int sum)
@@ -189,7 +199,7 @@ csum_tcpudp_magic(unsigned long saddr, unsigned long daddr,
* in icmp.c
*/
-extern inline unsigned short
+static inline unsigned short
ip_compute_csum(unsigned char * buff, int len)
{
return csum_fold(csum_partial(buff, len, 0));
diff --git a/include/asm-s390/dasd.h b/include/asm-s390/dasd.h
index 1ea18f182184..d536b229bfc9 100644
--- a/include/asm-s390/dasd.h
+++ b/include/asm-s390/dasd.h
@@ -13,6 +13,8 @@
* 12/06/01 DASD_API_VERSION 2 - binary compatible to 0 (new BIODASDINFO2)
* 01/23/02 DASD_API_VERSION 3 - added BIODASDPSRD (and BIODASDENAPAV) IOCTL
* 02/15/02 DASD_API_VERSION 4 - added BIODASDSATTR IOCTL
+ * ##/##/## DASD_API_VERSION 5 - added boxed dasd support TOBEDONE
+ * 21/06/02 DASD_API_VERSION 6 - fixed HDIO_GETGEO: geo.start is in sectors!
*
*/
@@ -22,7 +24,7 @@
#define DASD_IOCTL_LETTER 'D'
-#define DASD_API_VERSION 4
+#define DASD_API_VERSION 6
/*
* struct dasd_information2_t
diff --git a/include/asm-s390/debug.h b/include/asm-s390/debug.h
index e46698ba43b1..56a4043a1155 100644
--- a/include/asm-s390/debug.h
+++ b/include/asm-s390/debug.h
@@ -160,7 +160,8 @@ debug_text_event(debug_info_t* id, int level, const char* txt)
}
extern debug_entry_t *
-debug_sprintf_event(debug_info_t* id,int level,char *string,...);
+debug_sprintf_event(debug_info_t* id,int level,char *string,...)
+ __attribute__ ((format(printf, 3, 4)));
extern inline debug_entry_t*
@@ -195,7 +196,8 @@ debug_text_exception(debug_info_t* id, int level, const char* txt)
extern debug_entry_t *
-debug_sprintf_exception(debug_info_t* id,int level,char *string,...);
+debug_sprintf_exception(debug_info_t* id,int level,char *string,...)
+ __attribute__ ((format(printf, 3, 4)));
int debug_register_view(debug_info_t* id, struct debug_view* view);
int debug_unregister_view(debug_info_t* id, struct debug_view* view);
diff --git a/include/asm-s390/hardirq.h b/include/asm-s390/hardirq.h
index d7b47844d7c2..7f38c6f01e05 100644
--- a/include/asm-s390/hardirq.h
+++ b/include/asm-s390/hardirq.h
@@ -14,15 +14,13 @@
#include <linux/config.h>
#include <linux/threads.h>
-#include <asm/lowcore.h>
#include <linux/sched.h>
#include <linux/cache.h>
+#include <asm/lowcore.h>
/* entry.S is sensitive to the offsets of these fields */
typedef struct {
unsigned int __softirq_pending;
- unsigned int __local_irq_count;
- unsigned int __local_bh_count;
unsigned int __syscall_count;
struct task_struct * __ksoftirqd_task; /* waitqueue is too large */
} ____cacheline_aligned irq_cpustat_t;
@@ -30,64 +28,81 @@ typedef struct {
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
/*
- * Are we in an interrupt context? Either doing bottom half
- * or hardware interrupt processing?
+ * We put the hardirq and softirq counter into the preemption
+ * counter. The bitmask has the following meaning:
+ *
+ * - bits 0-7 are the preemption count (max preemption depth: 256)
+ * - bits 8-15 are the softirq count (max # of softirqs: 256)
+ * - bits 16-23 are the hardirq count (max # of hardirqs: 256)
+ *
+ * - ( bit 26 is the PREEMPT_ACTIVE flag. )
+ *
+ * PREEMPT_MASK: 0x000000ff
+ * SOFTIRQ_MASK: 0x0000ff00
+ * HARDIRQ_MASK: 0x00010000
*/
-#define in_interrupt() ({ int __cpu = smp_processor_id(); \
- (local_irq_count(__cpu) + local_bh_count(__cpu) != 0); })
-#define in_irq() (local_irq_count(smp_processor_id()) != 0)
-
-#ifndef CONFIG_SMP
+#define PREEMPT_BITS 8
+#define SOFTIRQ_BITS 8
+#define HARDIRQ_BITS 1
-#define hardirq_trylock(cpu) (local_irq_count(cpu) == 0)
-#define hardirq_endlock(cpu) do { } while (0)
-
-#define hardirq_enter(cpu) (local_irq_count(cpu)++)
-#define hardirq_exit(cpu) (local_irq_count(cpu)--)
+#define PREEMPT_SHIFT 0
+#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
+#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
-#define synchronize_irq() do { } while (0)
+#define __MASK(x) ((1UL << (x))-1)
-#else /* CONFIG_SMP */
+#define PREEMPT_MASK (__MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
+#define SOFTIRQ_MASK (__MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
+#define HARDIRQ_MASK (__MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
-#include <asm/atomic.h>
-#include <asm/smp.h>
+#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
+#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
+#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK))
-extern atomic_t global_irq_holder;
-extern atomic_t global_irq_lock;
-extern atomic_t global_irq_count;
+#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
+#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
+#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
+
+/*
+ * Are we doing bottom half or hardware interrupt processing?
+ * Are we in a softirq context? Interrupt context?
+ */
+#define in_irq() (hardirq_count())
+#define in_softirq() (softirq_count())
+#define in_interrupt() (irq_count())
-static inline void release_irqlock(int cpu)
-{
- /* if we didn't own the irq lock, just ignore.. */
- if (atomic_read(&global_irq_holder) == cpu) {
- atomic_set(&global_irq_holder,NO_PROC_ID);
- atomic_set(&global_irq_lock,0);
- }
-}
-static inline void hardirq_enter(int cpu)
-{
- ++local_irq_count(cpu);
- atomic_inc(&global_irq_count);
-}
+#define hardirq_trylock() (!in_interrupt())
+#define hardirq_endlock() do { } while (0)
-static inline void hardirq_exit(int cpu)
-{
- atomic_dec(&global_irq_count);
- --local_irq_count(cpu);
-}
+#define irq_enter() (preempt_count() += HARDIRQ_OFFSET)
-static inline int hardirq_trylock(int cpu)
-{
- return !atomic_read(&global_irq_count) &&
- !atomic_read(&global_irq_lock);
-}
+extern void do_call_softirq(void);
-#define hardirq_endlock(cpu) do { } while (0)
+#if CONFIG_PREEMPT
+# define in_atomic() (in_interrupt() || preempt_count() == PREEMPT_ACTIVE)
+# define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
+#else
+# define in_atomic() (preempt_count() != 0)
+# define IRQ_EXIT_OFFSET HARDIRQ_OFFSET
+#endif
-extern void synchronize_irq(void);
+#define irq_exit() \
+do { \
+ preempt_count() -= IRQ_EXIT_OFFSET; \
+ if (!in_interrupt() && softirq_pending(smp_processor_id())) \
+ /* Use the async. stack for softirq */ \
+ do_call_softirq(); \
+ preempt_enable_no_resched(); \
+} while (0)
+
+#ifndef CONFIG_SMP
+# define synchronize_irq(irq) barrier()
+#else
+ extern void synchronize_irq(unsigned int irq);
+#endif /* CONFIG_SMP */
-#endif /* CONFIG_SMP */
+extern void show_stack(unsigned long * esp);
#endif /* __ASM_HARDIRQ_H */
diff --git a/include/asm-s390/irq.h b/include/asm-s390/irq.h
index 8ecfe3324a44..bcc71eccbc63 100644
--- a/include/asm-s390/irq.h
+++ b/include/asm-s390/irq.h
@@ -637,6 +637,12 @@ int s390_request_irq_special( int irq,
const char *devname,
void *dev_id);
+extern int s390_request_console_irq (int irq,
+ void (*handler) (int, void *, struct pt_regs *),
+ unsigned long irqflags,
+ const char *devname,
+ void *dev_id);
+
extern int set_cons_dev(int irq);
extern int wait_cons_dev(int irq);
extern schib_t *s390_get_schib( int irq );
@@ -860,28 +866,8 @@ typedef struct {
__u32 vrdccrft : 8; /* real device feature (output) */
} __attribute__ ((packed,aligned(4))) diag210_t;
-void VM_virtual_device_info( __u16 devno, /* device number */
- senseid_t *ps ); /* ptr to senseID data */
+extern int diag210( diag210_t * addr);
-extern __inline__ int diag210( diag210_t * addr)
-{
- int ccode;
-
- __asm__ __volatile__(
-#ifdef CONFIG_ARCH_S390X
- " sam31\n"
- " diag %1,0,0x210\n"
- " sam64\n"
-#else
- " diag %1,0,0x210\n"
-#endif
- " ipm %0\n"
- " srl %0,28"
- : "=d" (ccode)
- : "a" (addr)
- : "cc" );
- return ccode;
-}
extern __inline__ int chsc( chsc_area_t * chsc_area)
{
int cc;
@@ -897,67 +883,6 @@ extern __inline__ int chsc( chsc_area_t * chsc_area)
return cc;
}
-/*
- * Various low-level irq details needed by irq.c, process.c,
- * time.c, io_apic.c and smp.c
- *
- * Interrupt entry/exit code at both C and assembly level
- */
-
-#ifdef CONFIG_SMP
-
-#include <asm/atomic.h>
-
-static inline void irq_enter(int cpu, unsigned int irq)
-{
- hardirq_enter(cpu);
- while (atomic_read(&global_irq_lock) != 0) {
- eieio();
- }
-}
-
-static inline void irq_exit(int cpu, unsigned int irq)
-{
- hardirq_exit(cpu);
- release_irqlock(cpu);
-}
-
-
-#else
-
-#define irq_enter(cpu, irq) (++local_irq_count(cpu))
-#define irq_exit(cpu, irq) (--local_irq_count(cpu))
-
-#endif
-
-#define __STR(x) #x
-#define STR(x) __STR(x)
-
-/*
- * x86 profiling function, SMP safe. We might want to do this in
- * assembly totally?
- * is this ever used anyway?
- */
-extern char _stext;
-static inline void s390_do_profile (unsigned long addr)
-{
- if (prof_buffer && current->pid) {
-#ifndef CONFIG_ARCH_S390X
- addr &= 0x7fffffff;
-#endif
- addr -= (unsigned long) &_stext;
- addr >>= prof_shift;
- /*
- * Don't ignore out-of-bounds EIP values silently,
- * put them into the last histogram slot, so if
- * present, they will show up as a sharp peak.
- */
- if (addr > prof_len-1)
- addr = prof_len-1;
- atomic_inc((atomic_t *)&prof_buffer[addr]);
- }
-}
-
#include <asm/s390io.h>
#define get_irq_lock(irq) &ioinfo[irq]->irq_lock
diff --git a/include/asm-s390/kmap_types.h b/include/asm-s390/kmap_types.h
new file mode 100644
index 000000000000..27f3d6c49ad5
--- /dev/null
+++ b/include/asm-s390/kmap_types.h
@@ -0,0 +1,21 @@
+#ifdef __KERNEL__
+#ifndef _ASM_KMAP_TYPES_H
+#define _ASM_KMAP_TYPES_H
+
+enum km_type {
+ KM_BOUNCE_READ,
+ KM_SKB_SUNRPC_DATA,
+ KM_SKB_DATA_SOFTIRQ,
+ KM_USER0,
+ KM_USER1,
+ KM_BIO_SRC_IRQ,
+ KM_BIO_DST_IRQ,
+ KM_PTE0,
+ KM_PTE1,
+ KM_IRQ0,
+ KM_IRQ1,
+ KM_TYPE_NR
+};
+
+#endif
+#endif /* __KERNEL__ */
diff --git a/include/asm-s390/lowcore.h b/include/asm-s390/lowcore.h
index d5ad56f99aa4..d33376528713 100644
--- a/include/asm-s390/lowcore.h
+++ b/include/asm-s390/lowcore.h
@@ -52,41 +52,12 @@
#define __LC_PFAULT_INTPARM 0x080
-/* interrupt handler start with all io, external and mcck interrupt disabled */
-
-#define _RESTART_PSW_MASK 0x00080000
-#define _EXT_PSW_MASK 0x04080000
-#define _PGM_PSW_MASK 0x04080000
-#define _SVC_PSW_MASK 0x04080000
-#define _MCCK_PSW_MASK 0x04080000
-#define _IO_PSW_MASK 0x04080000
-#define _USER_PSW_MASK 0x070DC000/* DAT, IO, EXT, Home-space */
-#define _WAIT_PSW_MASK 0x070E0000/* DAT, IO, EXT, Wait, Home-space */
-#define _DW_PSW_MASK 0x000A0000/* disabled wait PSW mask */
-
-#define _PRIMARY_MASK 0x0000 /* MASK for SACF */
-#define _SECONDARY_MASK 0x0100 /* MASK for SACF */
-#define _ACCESS_MASK 0x0200 /* MASK for SACF */
-#define _HOME_MASK 0x0300 /* MASK for SACF */
-
-#define _PSW_PRIM_SPACE_MODE 0x00000000
-#define _PSW_SEC_SPACE_MODE 0x00008000
-#define _PSW_ACC_REG_MODE 0x00004000
-#define _PSW_HOME_SPACE_MODE 0x0000C000
-
-#define _PSW_WAIT_MASK_BIT 0x00020000 /* Wait bit */
-#define _PSW_IO_MASK_BIT 0x02000000 /* IO bit */
-#define _PSW_IO_WAIT 0x02020000 /* IO & Wait bit */
-
-/* we run in 31 Bit mode */
-#define _ADDR_31 0x80000000
-
#ifndef __ASSEMBLY__
#include <linux/config.h>
-#include <asm/processor.h>
#include <linux/types.h>
#include <asm/atomic.h>
+#include <asm/processor.h>
#include <asm/sigp.h>
void restart_int_handler(void);
@@ -176,25 +147,16 @@ struct _lowcore
__u8 pad12[0x1000-0xe04]; /* 0xe04 */
} __attribute__((packed)); /* End structure*/
+#define S390_lowcore (*((struct _lowcore *) 0))
+extern struct _lowcore *lowcore_ptr[];
+
extern __inline__ void set_prefix(__u32 address)
{
__asm__ __volatile__ ("spx %0" : : "m" (address) : "memory" );
}
-#define S390_lowcore (*((struct _lowcore *) 0))
-extern struct _lowcore *lowcore_ptr[];
-
-#ifndef CONFIG_SMP
-#define get_cpu_lowcore(cpu) (&S390_lowcore)
-#define safe_get_cpu_lowcore(cpu) (&S390_lowcore)
-#else
-#define get_cpu_lowcore(cpu) (lowcore_ptr[(cpu)])
-#define safe_get_cpu_lowcore(cpu) \
- ((cpu) == smp_processor_id() ? &S390_lowcore : lowcore_ptr[(cpu)])
-#endif
-#endif /* __ASSEMBLY__ */
-
#define __PANIC_MAGIC 0xDEADC0DE
#endif
+#endif
diff --git a/include/asm-s390/param.h b/include/asm-s390/param.h
index 4c47f9f048f2..753b8bdeecba 100644
--- a/include/asm-s390/param.h
+++ b/include/asm-s390/param.h
@@ -9,6 +9,12 @@
#ifndef _ASMS390_PARAM_H
#define _ASMS390_PARAM_H
+#ifdef __KERNEL__
+# define HZ 100 /* Internal kernel timer frequency */
+# define USER_HZ 100 /* .. some user interfaces are in "ticks" */
+# define CLOCKS_PER_SEC (USER_HZ) /* like times() */
+#endif
+
#ifndef HZ
#define HZ 100
#endif
@@ -25,8 +31,4 @@
#define MAXHOSTNAMELEN 64 /* max length of hostname */
-#ifdef __KERNEL__
-# define CLOCKS_PER_SEC HZ /* frequency at which times() counts */
-#endif
-
#endif
diff --git a/include/asm-s390/pgalloc.h b/include/asm-s390/pgalloc.h
index 9ef3dd733514..5a216e91891b 100644
--- a/include/asm-s390/pgalloc.h
+++ b/include/asm-s390/pgalloc.h
@@ -16,6 +16,8 @@
#include <linux/config.h>
#include <asm/processor.h>
#include <linux/threads.h>
+#include <linux/gfp.h>
+#include <linux/mm.h>
#define check_pgt_cache() do {} while (0)
diff --git a/include/asm-s390/pgtable.h b/include/asm-s390/pgtable.h
index b48ab817e095..b11c0deb09c4 100644
--- a/include/asm-s390/pgtable.h
+++ b/include/asm-s390/pgtable.h
@@ -176,6 +176,8 @@ extern char empty_zero_page[PAGE_SIZE];
#define _SEGMENT_TABLE (_USER_SEG_TABLE_LEN|0x80000000|0x100)
#define _KERNSEG_TABLE (_KERNEL_SEG_TABLE_LEN)
+#define USER_STD_MASK 0x00000080UL
+
/*
* No mapping available
*/
diff --git a/include/asm-s390/processor.h b/include/asm-s390/processor.h
index 542f7f1e3150..31bbde234ee9 100644
--- a/include/asm-s390/processor.h
+++ b/include/asm-s390/processor.h
@@ -101,8 +101,8 @@ typedef struct thread_struct thread_struct;
/* need to define ... */
#define start_thread(regs, new_psw, new_stackp) do { \
- regs->psw.mask = _USER_PSW_MASK; \
- regs->psw.addr = new_psw | 0x80000000; \
+ regs->psw.mask = PSW_USER_BITS; \
+ regs->psw.addr = new_psw | PSW_ADDR_AMODE31; \
regs->gprs[15] = new_stackp ; \
} while (0)
@@ -137,19 +137,6 @@ unsigned long get_wchan(struct task_struct *p);
#define cpu_relax() barrier()
/*
- * Set of msr bits that gdb can change on behalf of a process.
- */
-/* Only let our hackers near the condition codes */
-#define PSW_MASK_DEBUGCHANGE 0x00003000UL
-/* Don't let em near the addressing mode either */
-#define PSW_ADDR_DEBUGCHANGE 0x7FFFFFFFUL
-#define PSW_ADDR_MASK 0x7FFFFFFFUL
-/* Program event recording mask */
-#define PSW_PER_MASK 0x40000000UL
-#define USER_STD_MASK 0x00000080UL
-#define PSW_PROBLEM_STATE 0x00010000UL
-
-/*
* Set PSW mask to specified value, while leaving the
* PSW addr pointing to the next instruction.
*/
@@ -178,7 +165,8 @@ static inline void enabled_wait(void)
unsigned long reg;
psw_t wait_psw;
- wait_psw.mask = 0x070e0000;
+ wait_psw.mask = PSW_BASE_BITS | PSW_MASK_IO | PSW_MASK_EXT |
+ PSW_MASK_MCHECK | PSW_MASK_WAIT;
asm volatile (
" basr %0,0\n"
"0: la %0,1f-0b(%0)\n"
@@ -200,7 +188,7 @@ static inline void disabled_wait(unsigned long code)
psw_t *dw_psw = (psw_t *)(((unsigned long) &psw_buffer+sizeof(psw_t)-1)
& -sizeof(psw_t));
- dw_psw->mask = 0x000a0000;
+ dw_psw->mask = PSW_BASE_BITS | PSW_MASK_WAIT;
dw_psw->addr = code;
/*
* Store status and then load disabled wait psw,
diff --git a/include/asm-s390/ptrace.h b/include/asm-s390/ptrace.h
index edc3b5b5c1f2..5f64a65843f7 100644
--- a/include/asm-s390/ptrace.h
+++ b/include/asm-s390/ptrace.h
@@ -114,7 +114,6 @@
#include <linux/config.h>
#include <linux/stddef.h>
#include <linux/types.h>
-
#include <asm/setup.h>
/* this typedef defines how a Program Status Word looks like */
@@ -124,10 +123,32 @@ typedef struct
__u32 addr;
} __attribute__ ((aligned(8))) psw_t;
-#ifdef __KERNEL__
-#define FIX_PSW(addr) ((unsigned long)(addr)|0x80000000UL)
-#define ADDR_BITS_REMOVE(addr) ((addr)&0x7fffffff)
-#endif
+#define PSW_MASK_PER 0x40000000UL
+#define PSW_MASK_DAT 0x04000000UL
+#define PSW_MASK_IO 0x02000000UL
+#define PSW_MASK_EXT 0x01000000UL
+#define PSW_MASK_KEY 0x00F00000UL
+#define PSW_MASK_MCHECK 0x00040000UL
+#define PSW_MASK_WAIT 0x00020000UL
+#define PSW_MASK_PSTATE 0x00010000UL
+#define PSW_MASK_ASC 0x0000C000UL
+#define PSW_MASK_CC 0x00003000UL
+#define PSW_MASK_PM 0x00000F00UL
+
+#define PSW_ADDR_AMODE31 0x80000000UL
+#define PSW_ADDR_INSN 0x7FFFFFFFUL
+
+#define PSW_BASE_BITS 0x00080000UL
+
+#define PSW_ASC_PRIMARY 0x00000000UL
+#define PSW_ASC_ACCREG 0x00004000UL
+#define PSW_ASC_SECONDARY 0x00008000UL
+#define PSW_ASC_HOME 0x0000C000UL
+
+#define PSW_KERNEL_BITS (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_PRIMARY)
+#define PSW_USER_BITS (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_HOME | \
+ PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | \
+ PSW_MASK_PSTATE)
typedef union
{
@@ -328,8 +349,8 @@ struct user_regs_struct
};
#ifdef __KERNEL__
-#define user_mode(regs) (((regs)->psw.mask & PSW_PROBLEM_STATE) != 0)
-#define instruction_pointer(regs) ((regs)->psw.addr)
+#define user_mode(regs) (((regs)->psw.mask & PSW_MASK_PSTATE) != 0)
+#define instruction_pointer(regs) ((regs)->psw.addr & PSW_MASK_INSN)
extern void show_regs(struct pt_regs * regs);
#endif
diff --git a/include/asm-s390/rwsem.h b/include/asm-s390/rwsem.h
index 1072694713be..4345d83de401 100644
--- a/include/asm-s390/rwsem.h
+++ b/include/asm-s390/rwsem.h
@@ -48,9 +48,11 @@
struct rwsem_waiter;
-extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *);
+extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *);
extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
+extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *);
+extern struct rw_semaphore *rwsem_downgrade_write(struct rw_semaphore *);
/*
* the semaphore definition
@@ -105,6 +107,27 @@ static inline void __down_read(struct rw_semaphore *sem)
}
/*
+ * trylock for reading -- returns 1 if successful, 0 if contention
+ */
+static inline int __down_read_trylock(struct rw_semaphore *sem)
+{
+ signed long old, new;
+
+ __asm__ __volatile__(
+ " l %0,0(%2)\n"
+ "0: ltr %1,%0\n"
+ " jm 1f\n"
+ " ahi %1,%3\n"
+ " cs %0,%1,0(%2)\n"
+ " jl 0b\n"
+ "1:"
+ : "=&d" (old), "=&d" (new)
+ : "a" (&sem->count), "i" (RWSEM_ACTIVE_READ_BIAS)
+ : "cc", "memory" );
+ return old >= 0 ? 1 : 0;
+}
+
+/*
* lock for writing
*/
static inline void __down_write(struct rw_semaphore *sem)
@@ -126,6 +149,26 @@ static inline void __down_write(struct rw_semaphore *sem)
}
/*
+ * trylock for writing -- returns 1 if successful, 0 if contention
+ */
+static inline int __down_write_trylock(struct rw_semaphore *sem)
+{
+ signed long old;
+
+ __asm__ __volatile__(
+ " l %0,0(%1)\n"
+ "0: ltr %0,%0\n"
+ " jnz 1f\n"
+ " cs %0,%2,0(%1)\n"
+ " jl 0b\n"
+ "1:"
+ : "=&d" (old)
+ : "a" (&sem->count), "d" (RWSEM_ACTIVE_WRITE_BIAS)
+ : "cc", "memory" );
+ return (old == RWSEM_UNLOCKED_VALUE) ? 1 : 0;
+}
+
+/*
* unlock after reading
*/
static inline void __up_read(struct rw_semaphore *sem)
@@ -169,6 +212,27 @@ static inline void __up_write(struct rw_semaphore *sem)
}
/*
+ * downgrade write lock to read lock
+ */
+static inline void __downgrade_write(struct rw_semaphore *sem)
+{
+ signed long old, new, tmp;
+
+ tmp = -RWSEM_WAITING_BIAS;
+ __asm__ __volatile__(
+ " l %0,0(%2)\n"
+ "0: lr %1,%0\n"
+ " a %1,%3\n"
+ " cs %0,%1,0(%2)\n"
+ " jl 0b"
+ : "=&d" (old), "=&d" (new)
+ : "a" (&sem->count), "m" (tmp)
+ : "cc", "memory" );
+ if (new > 1) // FIXME: is this correct ?!?
+ rwsem_downgrade_wake(sem);
+}
+
+/*
* implement atomic add functionality
*/
static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
diff --git a/include/asm-s390/s390io.h b/include/asm-s390/s390io.h
index 95f9c5b61973..8424a470e887 100644
--- a/include/asm-s390/s390io.h
+++ b/include/asm-s390/s390io.h
@@ -9,22 +9,25 @@
#ifndef __s390io_h
#define __s390io_h
+#include <linux/device.h>
+
/*
* IRQ data structure used by I/O subroutines
*
* Note : If bit flags are added, the "unused" value must be
* decremented accordingly !
*/
-typedef struct _ioinfo {
+typedef struct subchannel {
unsigned int irq; /* aka. subchannel number */
spinlock_t irq_lock; /* irq lock */
- void *private_data; /* pointer to private data */
-
- struct _ioinfo *prev;
- struct _ioinfo *next;
__u8 st; /* subchannel type */
+ void *private_data; /* pointer to private data */
+
+ struct subchannel *prev;
+ struct subchannel *next;
+
union {
unsigned int info;
struct {
@@ -78,8 +81,26 @@ typedef struct _ioinfo {
unsigned long qflag; /* queued flags */
__u8 qlpm; /* queued logical path mask */
ssd_info_t ssd_info; /* subchannel description */
+ struct device dev; /* entry in device tree */
+} __attribute__ ((aligned(8))) ioinfo_t;
+
- } __attribute__ ((aligned(8))) ioinfo_t;
+/*
+ * There are four different subchannel types, but we are currently
+ * only interested in I/O subchannels. This means there is only
+ * one subchannel_driver, other subchannels belonging to css_bus_type
+ * are simply ignored.
+ */
+struct subchannel_driver {
+ enum {
+ SUBCHANNEL_TYPE_IO = 0,
+ SUBCHANNEL_TYPE_CHSC = 1,
+ SUBCHANNEL_TYPE_MESSAGE = 2,
+ SUBCHANNEL_TYPE_ADM = 3,
+ } st; /* subchannel type */
+ struct device_driver drv; /* entry in driver tree */
+};
+extern struct bus_type css_bus_type;
#define IOINFO_FLAGS_BUSY 0x80000000
#define IOINFO_FLAGS_OPER 0x40000000
diff --git a/include/asm-s390/smp.h b/include/asm-s390/smp.h
index 8caa0b8bdd87..7bd666e535ae 100644
--- a/include/asm-s390/smp.h
+++ b/include/asm-s390/smp.h
@@ -11,7 +11,7 @@
#include <linux/config.h>
#include <linux/threads.h>
-#include <linux/ptrace.h>
+#include <linux/bitops.h>
#if defined(__KERNEL__) && defined(CONFIG_SMP) && !defined(__ASSEMBLY__)
@@ -29,6 +29,7 @@ typedef struct
} sigp_info;
extern volatile unsigned long cpu_online_map;
+extern unsigned long cpu_possible_map;
#define NO_PROC_ID 0xFF /* No processor magic marker */
@@ -46,14 +47,20 @@ extern volatile unsigned long cpu_online_map;
#define smp_processor_id() (current_thread_info()->cpu)
-extern __inline__ int cpu_logical_map(int cpu)
+#define cpu_online(cpu) (cpu_online_map & (1<<(cpu)))
+#define cpu_possible(cpu) (cpu_possible_map & (1<<(cpu)))
+
+extern inline unsigned int num_online_cpus(void)
{
- return cpu;
+ return hweight32(cpu_online_map);
}
-extern __inline__ int cpu_number_map(int cpu)
+extern inline int any_online_cpu(unsigned int mask)
{
- return cpu;
+ if (mask & cpu_online_map)
+ return __ffs(mask & cpu_online_map);
+
+ return -1;
}
extern __inline__ __u16 hard_smp_processor_id(void)
diff --git a/include/asm-s390/softirq.h b/include/asm-s390/softirq.h
index b82aac30db21..91f9853561dd 100644
--- a/include/asm-s390/softirq.h
+++ b/include/asm-s390/softirq.h
@@ -9,34 +9,27 @@
#ifndef __ASM_SOFTIRQ_H
#define __ASM_SOFTIRQ_H
-#ifndef __LINUX_SMP_H
#include <linux/smp.h>
-#endif
+#include <linux/preempt.h>
#include <asm/atomic.h>
#include <asm/hardirq.h>
#include <asm/lowcore.h>
-#define __cpu_bh_enable(cpu) \
- do { barrier(); local_bh_count(cpu)--; } while (0)
-#define cpu_bh_disable(cpu) \
- do { local_bh_count(cpu)++; barrier(); } while (0)
-
-#define local_bh_disable() cpu_bh_disable(smp_processor_id())
-#define __local_bh_enable() __cpu_bh_enable(smp_processor_id())
-
-#define in_softirq() (local_bh_count(smp_processor_id()) != 0)
+#define local_bh_disable() \
+ do { preempt_count() += SOFTIRQ_OFFSET; barrier(); } while (0)
+#define __local_bh_enable() \
+ do { barrier(); preempt_count() -= SOFTIRQ_OFFSET; } while (0)
extern void do_call_softirq(void);
-#define local_bh_enable() \
-do { \
- unsigned int *ptr = &local_bh_count(smp_processor_id()); \
- barrier(); \
- if (!--*ptr) \
- if (softirq_pending(smp_processor_id())) \
- /* Use the async. stack for softirq */ \
- do_call_softirq(); \
+#define local_bh_enable() \
+do { \
+ __local_bh_enable(); \
+ if (!in_interrupt() && softirq_pending(smp_processor_id())) \
+ /* Use the async. stack for softirq */ \
+ do_call_softirq(); \
+ preempt_check_resched(); \
} while (0)
#endif /* __ASM_SOFTIRQ_H */
diff --git a/include/asm-s390/spinlock.h b/include/asm-s390/spinlock.h
index 46e77f6fb9f1..176feb128687 100644
--- a/include/asm-s390/spinlock.h
+++ b/include/asm-s390/spinlock.h
@@ -76,6 +76,8 @@ typedef struct {
#define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
+#define rwlock_is_locked(x) ((x)->lock != 0)
+
#define _raw_read_lock(rw) \
asm volatile(" l 2,0(%1)\n" \
" j 1f\n" \
diff --git a/include/asm-s390/system.h b/include/asm-s390/system.h
index 15e1af97a358..e0a825fdc763 100644
--- a/include/asm-s390/system.h
+++ b/include/asm-s390/system.h
@@ -12,96 +12,207 @@
#define __ASM_SYSTEM_H
#include <linux/config.h>
+#include <linux/kernel.h>
#include <asm/types.h>
+#include <asm/ptrace.h>
+#include <asm/setup.h>
+
#ifdef __KERNEL__
-#include <asm/lowcore.h>
-#endif
-#include <linux/kernel.h>
+
+struct task_struct;
+
+extern struct task_struct *resume(void *, void *);
+
+static inline void save_fp_regs(s390_fp_regs *fpregs)
+{
+ asm volatile (
+ " std 0,8(%0)\n"
+ " std 2,24(%0)\n"
+ " std 4,40(%0)\n"
+ " std 6,56(%0)"
+ : : "a" (fpregs) : "memory" );
+ if (!MACHINE_HAS_IEEE)
+ return;
+ asm volatile(
+ " stfpc 0(%0)\n"
+ " std 1,16(%0)\n"
+ " std 3,32(%0)\n"
+ " std 5,48(%0)\n"
+ " std 7,64(%0)\n"
+ " std 8,72(%0)\n"
+ " std 9,80(%0)\n"
+ " std 10,88(%0)\n"
+ " std 11,96(%0)\n"
+ " std 12,104(%0)\n"
+ " std 13,112(%0)\n"
+ " std 14,120(%0)\n"
+ " std 15,128(%0)\n"
+ : : "a" (fpregs) : "memory" );
+}
+
+static inline void restore_fp_regs(s390_fp_regs *fpregs)
+{
+ asm volatile (
+ " ld 0,8(%0)\n"
+ " ld 2,24(%0)\n"
+ " ld 4,40(%0)\n"
+ " ld 6,56(%0)"
+ : : "a" (fpregs));
+ if (!MACHINE_HAS_IEEE)
+ return;
+ asm volatile(
+ " lfpc 0(%0)\n"
+ " ld 1,16(%0)\n"
+ " ld 3,32(%0)\n"
+ " ld 5,48(%0)\n"
+ " ld 7,64(%0)\n"
+ " ld 8,72(%0)\n"
+ " ld 9,80(%0)\n"
+ " ld 10,88(%0)\n"
+ " ld 11,96(%0)\n"
+ " ld 12,104(%0)\n"
+ " ld 13,112(%0)\n"
+ " ld 14,120(%0)\n"
+ " ld 15,128(%0)\n"
+ : : "a" (fpregs));
+}
#define switch_to(prev,next,last) do { \
if (prev == next) \
break; \
- save_fp_regs1(&prev->thread.fp_regs); \
- restore_fp_regs1(&next->thread.fp_regs); \
+ save_fp_regs(&prev->thread.fp_regs); \
+ restore_fp_regs(&next->thread.fp_regs); \
resume(prev,next); \
} while (0)
-struct task_struct;
-
#define nop() __asm__ __volatile__ ("nop")
-#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
-
-extern void __misaligned_u16(void);
-extern void __misaligned_u32(void);
+#define xchg(ptr,x) \
+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
{
+ unsigned long addr, old;
+ int shift;
+
switch (size) {
- case 1:
- asm volatile (
- " lhi 1,3\n"
- " nr 1,%0\n" /* isolate last 2 bits */
- " xr %0,1\n" /* align ptr */
- " bras 2,0f\n"
- " icm 1,8,3(%1)\n" /* for ptr&3 == 0 */
- " stcm 0,8,3(%1)\n"
- " icm 1,4,3(%1)\n" /* for ptr&3 == 1 */
- " stcm 0,4,3(%1)\n"
- " icm 1,2,3(%1)\n" /* for ptr&3 == 2 */
- " stcm 0,2,3(%1)\n"
- " icm 1,1,3(%1)\n" /* for ptr&3 == 3 */
- " stcm 0,1,3(%1)\n"
- "0: sll 1,3\n"
- " la 2,0(1,2)\n" /* r2 points to an icm */
- " l 0,0(%0)\n" /* get fullword */
- "1: lr 1,0\n" /* cs loop */
- " ex 0,0(2)\n" /* insert x */
- " cs 0,1,0(%0)\n"
- " jl 1b\n"
- " ex 0,4(2)" /* store *ptr to x */
- : "+a&" (ptr) : "a" (&x)
- : "memory", "cc", "0", "1", "2");
- break;
- case 2:
- if(((__u32)ptr)&1)
- __misaligned_u16();
- asm volatile (
- " lhi 1,2\n"
- " nr 1,%0\n" /* isolate bit 2^1 */
- " xr %0,1\n" /* align ptr */
- " bras 2,0f\n"
- " icm 1,12,2(%1)\n" /* for ptr&2 == 0 */
- " stcm 0,12,2(%1)\n"
- " icm 1,3,2(%1)\n" /* for ptr&2 == 1 */
- " stcm 0,3,2(%1)\n"
- "0: sll 1,2\n"
- " la 2,0(1,2)\n" /* r2 points to an icm */
- " l 0,0(%0)\n" /* get fullword */
- "1: lr 1,0\n" /* cs loop */
- " ex 0,0(2)\n" /* insert x */
- " cs 0,1,0(%0)\n"
- " jl 1b\n"
- " ex 0,4(2)" /* store *ptr to x */
- : "+a&" (ptr) : "a" (&x)
- : "memory", "cc", "0", "1", "2");
- break;
- case 4:
- if(((__u32)ptr)&3)
- __misaligned_u32();
- asm volatile (
- " l 0,0(%1)\n"
- "0: cs 0,%0,0(%1)\n"
- " jl 0b\n"
- " lr %0,0\n"
- : "+d&" (x) : "a" (ptr)
- : "memory", "cc", "0" );
- break;
+ case 1:
+ addr = (unsigned long) ptr;
+ shift = (3 ^ (addr & 3)) << 3;
+ addr ^= addr & 3;
+ asm volatile(
+ " l %0,0(%3)\n"
+ "0: lr 0,%0\n"
+ " nr 0,%2\n"
+ " or 0,%1\n"
+ " cs %0,0,0(%3)\n"
+ " jl 0b\n"
+ : "=&d" (old)
+ : "d" (x << shift), "d" (~(255 << shift)), "a" (addr)
+ : "memory", "cc", "0" );
+ x = old >> shift;
+ break;
+ case 2:
+ addr = (unsigned long) ptr;
+ shift = (2 ^ (addr & 2)) << 3;
+ addr ^= addr & 2;
+ asm volatile(
+ " l %0,0(%3)\n"
+ "0: lr 0,%0\n"
+ " nr 0,%2\n"
+ " or 0,%1\n"
+ " cs %0,0,0(%3)\n"
+ " jl 0b\n"
+ : "=&d" (old)
+ : "d" (x << shift), "d" (~(65535 << shift)), "a" (addr)
+ : "memory", "cc", "0" );
+ x = old >> shift;
+ break;
+ case 4:
+ asm volatile (
+ " l %0,0(%2)\n"
+ "0: cs %0,%1,0(%2)\n"
+ " jl 0b\n"
+ : "=&d" (old) : "d" (x), "a" (ptr)
+ : "memory", "cc", "0" );
+ x = old;
+ break;
}
return x;
}
/*
+ * Atomic compare and exchange. Compare OLD with MEM, if identical,
+ * store NEW in MEM. Return the initial value in MEM. Success is
+ * indicated by comparing RETURN with OLD.
+ */
+
+#define __HAVE_ARCH_CMPXCHG 1
+
+#define cmpxchg(ptr,o,n)\
+ ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
+ (unsigned long)(n),sizeof(*(ptr))))
+
+static inline unsigned long
+__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
+{
+ unsigned long addr, prev, tmp;
+ int shift;
+
+ switch (size) {
+ case 1:
+ addr = (unsigned long) ptr;
+ shift = (3 ^ (addr & 3)) << 3;
+ addr ^= addr & 3;
+ asm volatile(
+ " l %0,0(%4)\n"
+ "0: nr %0,%5\n"
+ " lr %1,%0\n"
+ " or %0,%2\n"
+ " or %1,%3\n"
+ " cs %0,%1,0(%4)\n"
+ " jnl 1f\n"
+ " xr %1,%0\n"
+ " nr %1,%5\n"
+ " jnz 0b\n"
+ "1:"
+ : "=&d" (prev), "=&d" (tmp)
+ : "d" (old << shift), "d" (new << shift), "a" (ptr),
+ "d" (~(255 << shift))
+ : "memory", "cc" );
+ return prev >> shift;
+ case 2:
+ addr = (unsigned long) ptr;
+ shift = (2 ^ (addr & 2)) << 3;
+ addr ^= addr & 2;
+ asm volatile(
+ " l %0,0(%4)\n"
+ "0: nr %0,%5\n"
+ " lr %1,%0\n"
+ " or %0,%2\n"
+ " or %1,%3\n"
+ " cs %0,%1,0(%4)\n"
+ " jnl 1f\n"
+ " xr %1,%0\n"
+ " nr %1,%5\n"
+ " jnz 0b\n"
+ "1:"
+ : "=&d" (prev), "=&d" (tmp)
+ : "d" (old << shift), "d" (new << shift), "a" (ptr),
+ "d" (~(65535 << shift))
+ : "memory", "cc" );
+ return prev >> shift;
+ case 4:
+ asm volatile (
+ " cs %0,%2,0(%3)\n"
+ : "=&d" (prev) : "0" (old), "d" (new), "a" (ptr)
+ : "memory", "cc" );
+ return prev;
+ }
+ return old;
+}
+
+/*
* Force strict CPU ordering.
* And yes, this is required on UP too when we're talking
* to devices.
@@ -130,22 +241,29 @@ static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
#define local_irq_enable() ({ \
__u8 __dummy; \
__asm__ __volatile__ ( \
- "stosm 0(%0),0x03" : : "a" (&__dummy) : "memory"); \
+ "stosm 0(%1),0x03" : "=m" (__dummy) : "a" (&__dummy) ); \
})
#define local_irq_disable() ({ \
__u32 __flags; \
__asm__ __volatile__ ( \
- "stnsm 0(%0),0xFC" : : "a" (&__flags) : "memory"); \
+ "stnsm 0(%1),0xFC" : "=m" (__flags) : "a" (&__flags) ); \
__flags; \
})
#define local_save_flags(x) \
- __asm__ __volatile__("stosm 0(%0),0" : : "a" (&x) : "memory")
+ __asm__ __volatile__("stosm 0(%1),0" : "=m" (x) : "a" (&x) )
#define local_irq_restore(x) \
__asm__ __volatile__("ssm 0(%0)" : : "a" (&x) : "memory")
+#define irqs_disabled() \
+({ \
+ unsigned long flags; \
+ local_save_flags(flags); \
+ !((flags >> 24) & 3); \
+})
+
#define __load_psw(psw) \
__asm__ __volatile__("lpsw 0(%0)" : : "a" (&psw) : "cc" );
@@ -210,16 +328,6 @@ static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
#ifdef CONFIG_SMP
-extern void __global_cli(void);
-extern void __global_sti(void);
-
-extern unsigned long __global_save_flags(void);
-extern void __global_restore_flags(unsigned long);
-#define cli() __global_cli()
-#define sti() __global_sti()
-#define save_flags(x) ((x)=__global_save_flags())
-#define restore_flags(x) __global_restore_flags(x)
-
extern void smp_ctl_set_bit(int cr, int bit);
extern void smp_ctl_clear_bit(int cr, int bit);
#define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit)
@@ -227,32 +335,16 @@ extern void smp_ctl_clear_bit(int cr, int bit);
#else
-#define cli() local_irq_disable()
-#define sti() local_irq_enable()
-#define save_flags(x) local_save_flags(x)
-#define restore_flags(x) local_irq_restore(x)
-
#define ctl_set_bit(cr, bit) __ctl_set_bit(cr, bit)
#define ctl_clear_bit(cr, bit) __ctl_clear_bit(cr, bit)
-
-#endif
-
-#ifdef __KERNEL__
-extern struct task_struct *resume(void *, void *);
-
-extern int save_fp_regs1(s390_fp_regs *fpregs);
-extern void save_fp_regs(s390_fp_regs *fpregs);
-extern int restore_fp_regs1(s390_fp_regs *fpregs);
-extern void restore_fp_regs(s390_fp_regs *fpregs);
+#endif /* CONFIG_SMP */
extern void (*_machine_restart)(char *command);
extern void (*_machine_halt)(void);
extern void (*_machine_power_off)(void);
-#endif
+#endif /* __KERNEL__ */
#endif
-
-
diff --git a/include/asm-s390/thread_info.h b/include/asm-s390/thread_info.h
index 329f401b52c0..26a7c4d21be8 100644
--- a/include/asm-s390/thread_info.h
+++ b/include/asm-s390/thread_info.h
@@ -25,11 +25,9 @@ struct thread_info {
struct exec_domain *exec_domain; /* execution domain */
unsigned long flags; /* low level flags */
unsigned int cpu; /* current CPU */
- int preempt_count; /* 0 => preemptable, <0 => BUG */
+ unsigned int preempt_count; /* 0 => preemptable */
};
-#define PREEMPT_ACTIVE 0x4000000
-
/*
* macros/functions for gaining access to the thread information structure
*/
@@ -84,4 +82,6 @@ static inline struct thread_info *current_thread_info(void)
#endif /* __KERNEL__ */
+#define PREEMPT_ACTIVE 0x4000000
+
#endif /* _ASM_THREAD_INFO_H */
diff --git a/include/asm-s390/tlbflush.h b/include/asm-s390/tlbflush.h
index 372c70f353f3..f079ea53a2a2 100644
--- a/include/asm-s390/tlbflush.h
+++ b/include/asm-s390/tlbflush.h
@@ -91,8 +91,7 @@ static inline void global_flush_tlb(void)
static inline void __flush_tlb_mm(struct mm_struct * mm)
{
- if ((smp_num_cpus > 1) &&
- ((atomic_read(&mm->mm_count) != 1) ||
+ if (((atomic_read(&mm->mm_count) != 1) ||
(mm->cpu_vm_mask != (1UL << smp_processor_id())))) {
mm->cpu_vm_mask = (1UL << smp_processor_id());
global_flush_tlb();
diff --git a/include/asm-s390/unistd.h b/include/asm-s390/unistd.h
index ff9cf61ee3e6..e295910bd72d 100644
--- a/include/asm-s390/unistd.h
+++ b/include/asm-s390/unistd.h
@@ -231,6 +231,16 @@
#define __NR_futex 238
#define __NR_sched_setaffinity 239
#define __NR_sched_getaffinity 240
+#define __NR_security 241 /* syscall for security modules */
+/*
+ * Number 242 is reserved for tux
+ */
+#define __NR_io_setup 243
+#define __NR_io_destroy 244
+#define __NR_io_getevents 245
+#define __NR_io_submit 246
+#define __NR_io_cancel 247
+#define __NR_exit_group 248
/* user-visible error numbers are in the range -1 - -122: see <asm-s390/errno.h> */
diff --git a/include/asm-s390x/bitops.h b/include/asm-s390x/bitops.h
index eaff499377dd..8e95aa43780d 100644
--- a/include/asm-s390x/bitops.h
+++ b/include/asm-s390x/bitops.h
@@ -63,8 +63,8 @@ static inline void set_bit_cs(unsigned long nr, volatile void *ptr)
addr = (unsigned long) ptr;
#if ALIGN_CS == 1
- addr ^= addr & 7; /* align address to 8 */
nr += (addr & 7) << 3; /* add alignment to bit number */
+ addr ^= addr & 7; /* align address to 8 */
#endif
addr += (nr ^ (nr & 63)) >> 3; /* calculate address for CS */
mask = 1UL << (nr & 63); /* make OR mask */
@@ -88,8 +88,8 @@ static inline void clear_bit_cs(unsigned long nr, volatile void *ptr)
addr = (unsigned long) ptr;
#if ALIGN_CS == 1
- addr ^= addr & 7; /* align address to 8 */
nr += (addr & 7) << 3; /* add alignment to bit number */
+ addr ^= addr & 7; /* align address to 8 */
#endif
addr += (nr ^ (nr & 63)) >> 3; /* calculate address for CS */
mask = ~(1UL << (nr & 63)); /* make AND mask */
@@ -113,8 +113,8 @@ static inline void change_bit_cs(unsigned long nr, volatile void *ptr)
addr = (unsigned long) ptr;
#if ALIGN_CS == 1
- addr ^= addr & 7; /* align address to 8 */
nr += (addr & 7) << 3; /* add alignment to bit number */
+ addr ^= addr & 7; /* align address to 8 */
#endif
addr += (nr ^ (nr & 63)) >> 3; /* calculate address for CS */
mask = 1UL << (nr & 63); /* make XOR mask */
@@ -139,8 +139,8 @@ test_and_set_bit_cs(unsigned long nr, volatile void *ptr)
addr = (unsigned long) ptr;
#if ALIGN_CS == 1
- addr ^= addr & 7; /* align address to 8 */
nr += (addr & 7) << 3; /* add alignment to bit number */
+ addr ^= addr & 7; /* align address to 8 */
#endif
addr += (nr ^ (nr & 63)) >> 3; /* calculate address for CS */
mask = 1UL << (nr & 63); /* make OR/test mask */
@@ -166,8 +166,8 @@ test_and_clear_bit_cs(unsigned long nr, volatile void *ptr)
addr = (unsigned long) ptr;
#if ALIGN_CS == 1
- addr ^= addr & 7; /* align address to 8 */
nr += (addr & 7) << 3; /* add alignment to bit number */
+ addr ^= addr & 7; /* align address to 8 */
#endif
addr += (nr ^ (nr & 63)) >> 3; /* calculate address for CS */
mask = ~(1UL << (nr & 63)); /* make AND mask */
@@ -193,8 +193,8 @@ test_and_change_bit_cs(unsigned long nr, volatile void *ptr)
addr = (unsigned long) ptr;
#if ALIGN_CS == 1
- addr ^= addr & 7; /* align address to 8 */
nr += (addr & 7) << 3; /* add alignment to bit number */
+ addr ^= addr & 7; /* align address to 8 */
#endif
addr += (nr ^ (nr & 63)) >> 3; /* calculate address for CS */
mask = 1UL << (nr & 63); /* make XOR mask */
@@ -811,7 +811,14 @@ extern __inline__ int fls(int x)
* hweightN: returns the hamming weight (i.e. the number
* of bits set) of a N-bit word
*/
-
+#define hweight64(x) \
+({ \
+ unsigned long __x = (x); \
+ unsigned int __w; \
+ __w = generic_hweight32((unsigned int) __x); \
+ __w += generic_hweight32((unsigned int) (__x>>32)); \
+ __w; \
+})
#define hweight32(x) generic_hweight32(x)
#define hweight16(x) generic_hweight16(x)
#define hweight8(x) generic_hweight8(x)
diff --git a/include/asm-s390x/checksum.h b/include/asm-s390x/checksum.h
index 8c436737e6f0..e44eb70fb63f 100644
--- a/include/asm-s390x/checksum.h
+++ b/include/asm-s390x/checksum.h
@@ -27,13 +27,29 @@
*
* it's best to have buff aligned on a 32-bit boundary
*/
-unsigned int
-csum_partial(const unsigned char * buff, int len, unsigned int sum);
+static inline unsigned int
+csum_partial(const unsigned char * buff, int len, unsigned int sum)
+{
+ /*
+ * Experiments with ethernet and slip connections show that buff
+ * is aligned on either a 2-byte or 4-byte boundary.
+ */
+ __asm__ __volatile__ (
+ " lgr 2,%1\n" /* address in gpr 2 */
+ " lgfr 3,%2\n" /* length in gpr 3 */
+ "0: cksm %0,2\n" /* do checksum on longs */
+ " jo 0b\n"
+ : "+&d" (sum)
+ : "d" (buff), "d" (len)
+ : "cc", "2", "3" );
+ return sum;
+
+}
/*
* csum_partial as an inline function
*/
-extern inline unsigned int
+static inline unsigned int
csum_partial_inline(const unsigned char * buff, int len, unsigned int sum)
{
__asm__ __volatile__ (
@@ -55,7 +71,7 @@ csum_partial_inline(const unsigned char * buff, int len, unsigned int sum)
* better 64-bit) boundary
*/
-extern inline unsigned int
+static inline unsigned int
csum_partial_copy(const char *src, char *dst, int len,unsigned int sum)
{
memcpy(dst,src,len);
@@ -71,7 +87,7 @@ csum_partial_copy(const char *src, char *dst, int len,unsigned int sum)
* Copy from userspace and compute checksum. If we catch an exception
* then zero the rest of the buffer.
*/
-extern inline unsigned int
+static inline unsigned int
csum_partial_copy_from_user (const char *src, char *dst,
int len, unsigned int sum,
int *err_ptr)
@@ -87,7 +103,7 @@ csum_partial_copy_from_user (const char *src, char *dst,
return csum_partial(dst, len, sum);
}
-extern inline unsigned int
+static inline unsigned int
csum_partial_copy_nocheck (const char *src, char *dst, int len, unsigned int sum)
{
memcpy(dst,src,len);
@@ -97,7 +113,7 @@ csum_partial_copy_nocheck (const char *src, char *dst, int len, unsigned int sum
/*
* Fold a partial checksum without adding pseudo headers
*/
-extern inline unsigned short
+static inline unsigned short
csum_fold(unsigned int sum)
{
__asm__ __volatile__ (
@@ -116,7 +132,7 @@ csum_fold(unsigned int sum)
* which always checksum on 4 octet boundaries.
*
*/
-extern inline unsigned short
+static inline unsigned short
ip_fast_csum(unsigned char *iph, unsigned int ihl)
{
unsigned long sum;
@@ -137,7 +153,7 @@ ip_fast_csum(unsigned char *iph, unsigned int ihl)
* computes the checksum of the TCP/UDP pseudo-header
* returns a 32-bit checksum
*/
-extern inline unsigned int
+static inline unsigned int
csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr,
unsigned short len, unsigned short proto,
unsigned int sum)
@@ -170,7 +186,7 @@ csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr,
* returns a 16-bit checksum, already complemented
*/
-extern inline unsigned short int
+static inline unsigned short int
csum_tcpudp_magic(unsigned long saddr, unsigned long daddr,
unsigned short len, unsigned short proto,
unsigned int sum)
@@ -183,7 +199,7 @@ csum_tcpudp_magic(unsigned long saddr, unsigned long daddr,
* in icmp.c
*/
-extern inline unsigned short
+static inline unsigned short
ip_compute_csum(unsigned char * buff, int len)
{
return csum_fold(csum_partial_inline(buff, len, 0));
diff --git a/include/asm-s390x/dasd.h b/include/asm-s390x/dasd.h
index 1ea18f182184..d536b229bfc9 100644
--- a/include/asm-s390x/dasd.h
+++ b/include/asm-s390x/dasd.h
@@ -13,6 +13,8 @@
* 12/06/01 DASD_API_VERSION 2 - binary compatible to 0 (new BIODASDINFO2)
* 01/23/02 DASD_API_VERSION 3 - added BIODASDPSRD (and BIODASDENAPAV) IOCTL
* 02/15/02 DASD_API_VERSION 4 - added BIODASDSATTR IOCTL
+ * ##/##/## DASD_API_VERSION 5 - added boxed dasd support TOBEDONE
+ * 21/06/02 DASD_API_VERSION 6 - fixed HDIO_GETGEO: geo.start is in sectors!
*
*/
@@ -22,7 +24,7 @@
#define DASD_IOCTL_LETTER 'D'
-#define DASD_API_VERSION 4
+#define DASD_API_VERSION 6
/*
* struct dasd_information2_t
diff --git a/include/asm-s390x/debug.h b/include/asm-s390x/debug.h
index e46698ba43b1..56a4043a1155 100644
--- a/include/asm-s390x/debug.h
+++ b/include/asm-s390x/debug.h
@@ -160,7 +160,8 @@ debug_text_event(debug_info_t* id, int level, const char* txt)
}
extern debug_entry_t *
-debug_sprintf_event(debug_info_t* id,int level,char *string,...);
+debug_sprintf_event(debug_info_t* id,int level,char *string,...)
+ __attribute__ ((format(printf, 3, 4)));
extern inline debug_entry_t*
@@ -195,7 +196,8 @@ debug_text_exception(debug_info_t* id, int level, const char* txt)
extern debug_entry_t *
-debug_sprintf_exception(debug_info_t* id,int level,char *string,...);
+debug_sprintf_exception(debug_info_t* id,int level,char *string,...)
+ __attribute__ ((format(printf, 3, 4)));
int debug_register_view(debug_info_t* id, struct debug_view* view);
int debug_unregister_view(debug_info_t* id, struct debug_view* view);
diff --git a/include/asm-s390x/hardirq.h b/include/asm-s390x/hardirq.h
index 607f34ee659a..05ead85061d9 100644
--- a/include/asm-s390x/hardirq.h
+++ b/include/asm-s390x/hardirq.h
@@ -21,8 +21,6 @@
/* entry.S is sensitive to the offsets of these fields */
typedef struct {
unsigned int __softirq_pending;
- unsigned int __local_irq_count;
- unsigned int __local_bh_count;
unsigned int __syscall_count;
struct task_struct * __ksoftirqd_task; /* waitqueue is too large */
} ____cacheline_aligned irq_cpustat_t;
@@ -30,64 +28,82 @@ typedef struct {
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
/*
- * Are we in an interrupt context? Either doing bottom half
- * or hardware interrupt processing?
+ * We put the hardirq and softirq counter into the preemption
+ * counter. The bitmask has the following meaning:
+ *
+ * - bits 0-7 are the preemption count (max preemption depth: 256)
+ * - bits 8-15 are the softirq count (max # of softirqs: 256)
+ * - bits 16-23 are the hardirq count (max # of hardirqs: 256)
+ *
+ * - ( bit 26 is the PREEMPT_ACTIVE flag. )
+ *
+ * PREEMPT_MASK: 0x000000ff
+ * SOFTIRQ_MASK: 0x0000ff00
+ * HARDIRQ_MASK: 0x00010000
*/
-#define in_interrupt() ({ int __cpu = smp_processor_id(); \
- (local_irq_count(__cpu) + local_bh_count(__cpu) != 0); })
-#define in_irq() (local_irq_count(smp_processor_id()) != 0)
+#define PREEMPT_BITS 8
+#define SOFTIRQ_BITS 8
+// FIXME: we have 2^16 i/o and 2^16 external interrupts...
+#define HARDIRQ_BITS 1
-#ifndef CONFIG_SMP
-
-#define hardirq_trylock(cpu) (local_irq_count(cpu) == 0)
-#define hardirq_endlock(cpu) do { } while (0)
-
-#define hardirq_enter(cpu) (local_irq_count(cpu)++)
-#define hardirq_exit(cpu) (local_irq_count(cpu)--)
+#define PREEMPT_SHIFT 0
+#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
+#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
-#define synchronize_irq() do { } while (0)
+#define __MASK(x) ((1UL << (x))-1)
-#else
+#define PREEMPT_MASK (__MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
+#define SOFTIRQ_MASK (__MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
+#define HARDIRQ_MASK (__MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
-#include <asm/atomic.h>
-#include <asm/smp.h>
+#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
+#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
+#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK))
-extern atomic_t global_irq_holder;
-extern atomic_t global_irq_lock;
-extern atomic_t global_irq_count;
+#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
+#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
+#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
-static inline void release_irqlock(int cpu)
-{
- /* if we didn't own the irq lock, just ignore.. */
- if (atomic_read(&global_irq_holder) == cpu) {
- atomic_set(&global_irq_holder,NO_PROC_ID);
- atomic_set(&global_irq_lock,0);
- }
-}
+/*
+ * Are we doing bottom half or hardware interrupt processing?
+ * Are we in a softirq context? Interrupt context?
+ */
+#define in_irq() (hardirq_count())
+#define in_softirq() (softirq_count())
+#define in_interrupt() (irq_count())
-static inline void hardirq_enter(int cpu)
-{
- ++local_irq_count(cpu);
- atomic_inc(&global_irq_count);
-}
-static inline void hardirq_exit(int cpu)
-{
- atomic_dec(&global_irq_count);
- --local_irq_count(cpu);
-}
+#define hardirq_trylock() (!in_interrupt())
+#define hardirq_endlock() do { } while (0)
-static inline int hardirq_trylock(int cpu)
-{
- return !atomic_read(&global_irq_count) &&
- !atomic_read(&global_irq_lock);
-}
+#define irq_enter() (preempt_count() += HARDIRQ_OFFSET)
-#define hardirq_endlock(cpu) do { } while (0)
+extern void do_call_softirq(void);
-extern void synchronize_irq(void);
+#if CONFIG_PREEMPT
+# define in_atomic() (in_interrupt() || preempt_count() == PREEMPT_ACTIVE)
+# define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
+#else
+# define in_atomic() (preempt_count() != 0)
+# define IRQ_EXIT_OFFSET HARDIRQ_OFFSET
+#endif
+
+#define irq_exit() \
+do { \
+ preempt_count() -= IRQ_EXIT_OFFSET; \
+ if (!in_interrupt() && softirq_pending(smp_processor_id())) \
+ /* Use the async. stack for softirq */ \
+ do_call_softirq(); \
+ preempt_enable_no_resched(); \
+} while (0)
+#ifndef CONFIG_SMP
+# define synchronize_irq(irq) barrier()
+#else
+ extern void synchronize_irq(unsigned int irq);
#endif /* CONFIG_SMP */
+extern void show_stack(unsigned long * esp);
+
#endif /* __ASM_HARDIRQ_H */
diff --git a/include/asm-s390x/irq.h b/include/asm-s390x/irq.h
index 8ecfe3324a44..bcc71eccbc63 100644
--- a/include/asm-s390x/irq.h
+++ b/include/asm-s390x/irq.h
@@ -637,6 +637,12 @@ int s390_request_irq_special( int irq,
const char *devname,
void *dev_id);
+extern int s390_request_console_irq (int irq,
+ void (*handler) (int, void *, struct pt_regs *),
+ unsigned long irqflags,
+ const char *devname,
+ void *dev_id);
+
extern int set_cons_dev(int irq);
extern int wait_cons_dev(int irq);
extern schib_t *s390_get_schib( int irq );
@@ -860,28 +866,8 @@ typedef struct {
__u32 vrdccrft : 8; /* real device feature (output) */
} __attribute__ ((packed,aligned(4))) diag210_t;
-void VM_virtual_device_info( __u16 devno, /* device number */
- senseid_t *ps ); /* ptr to senseID data */
+extern int diag210( diag210_t * addr);
-extern __inline__ int diag210( diag210_t * addr)
-{
- int ccode;
-
- __asm__ __volatile__(
-#ifdef CONFIG_ARCH_S390X
- " sam31\n"
- " diag %1,0,0x210\n"
- " sam64\n"
-#else
- " diag %1,0,0x210\n"
-#endif
- " ipm %0\n"
- " srl %0,28"
- : "=d" (ccode)
- : "a" (addr)
- : "cc" );
- return ccode;
-}
extern __inline__ int chsc( chsc_area_t * chsc_area)
{
int cc;
@@ -897,67 +883,6 @@ extern __inline__ int chsc( chsc_area_t * chsc_area)
return cc;
}
-/*
- * Various low-level irq details needed by irq.c, process.c,
- * time.c, io_apic.c and smp.c
- *
- * Interrupt entry/exit code at both C and assembly level
- */
-
-#ifdef CONFIG_SMP
-
-#include <asm/atomic.h>
-
-static inline void irq_enter(int cpu, unsigned int irq)
-{
- hardirq_enter(cpu);
- while (atomic_read(&global_irq_lock) != 0) {
- eieio();
- }
-}
-
-static inline void irq_exit(int cpu, unsigned int irq)
-{
- hardirq_exit(cpu);
- release_irqlock(cpu);
-}
-
-
-#else
-
-#define irq_enter(cpu, irq) (++local_irq_count(cpu))
-#define irq_exit(cpu, irq) (--local_irq_count(cpu))
-
-#endif
-
-#define __STR(x) #x
-#define STR(x) __STR(x)
-
-/*
- * x86 profiling function, SMP safe. We might want to do this in
- * assembly totally?
- * is this ever used anyway?
- */
-extern char _stext;
-static inline void s390_do_profile (unsigned long addr)
-{
- if (prof_buffer && current->pid) {
-#ifndef CONFIG_ARCH_S390X
- addr &= 0x7fffffff;
-#endif
- addr -= (unsigned long) &_stext;
- addr >>= prof_shift;
- /*
- * Don't ignore out-of-bounds EIP values silently,
- * put them into the last histogram slot, so if
- * present, they will show up as a sharp peak.
- */
- if (addr > prof_len-1)
- addr = prof_len-1;
- atomic_inc((atomic_t *)&prof_buffer[addr]);
- }
-}
-
#include <asm/s390io.h>
#define get_irq_lock(irq) &ioinfo[irq]->irq_lock
diff --git a/include/asm-s390x/kmap_types.h b/include/asm-s390x/kmap_types.h
new file mode 100644
index 000000000000..27f3d6c49ad5
--- /dev/null
+++ b/include/asm-s390x/kmap_types.h
@@ -0,0 +1,21 @@
+#ifdef __KERNEL__
+#ifndef _ASM_KMAP_TYPES_H
+#define _ASM_KMAP_TYPES_H
+
+enum km_type {
+ KM_BOUNCE_READ,
+ KM_SKB_SUNRPC_DATA,
+ KM_SKB_DATA_SOFTIRQ,
+ KM_USER0,
+ KM_USER1,
+ KM_BIO_SRC_IRQ,
+ KM_BIO_DST_IRQ,
+ KM_PTE0,
+ KM_PTE1,
+ KM_IRQ0,
+ KM_IRQ1,
+ KM_TYPE_NR
+};
+
+#endif
+#endif /* __KERNEL__ */
diff --git a/include/asm-s390x/lowcore.h b/include/asm-s390x/lowcore.h
index edb29f4df6bf..41e0c5fc07cf 100644
--- a/include/asm-s390x/lowcore.h
+++ b/include/asm-s390x/lowcore.h
@@ -38,6 +38,8 @@
#define __LC_IO_INT_WORD 0x0C0
#define __LC_MCCK_CODE 0x0E8
+#define __LC_DIAG44_OPCODE 0x214
+
#define __LC_SAVE_AREA 0xC00
#define __LC_KERNEL_STACK 0xD40
#define __LC_ASYNC_STACK 0xD48
@@ -54,32 +56,6 @@
#define __LC_PFAULT_INTPARM 0x11B8
-/* interrupt handler start with all io, external and mcck interrupt disabled */
-
-#define _RESTART_PSW_MASK 0x0000000180000000
-#define _EXT_PSW_MASK 0x0400000180000000
-#define _PGM_PSW_MASK 0x0400000180000000
-#define _SVC_PSW_MASK 0x0400000180000000
-#define _MCCK_PSW_MASK 0x0400000180000000
-#define _IO_PSW_MASK 0x0400000180000000
-#define _USER_PSW_MASK 0x0705C00180000000
-#define _WAIT_PSW_MASK 0x0706000180000000
-#define _DW_PSW_MASK 0x0002000180000000
-
-#define _PRIMARY_MASK 0x0000 /* MASK for SACF */
-#define _SECONDARY_MASK 0x0100 /* MASK for SACF */
-#define _ACCESS_MASK 0x0200 /* MASK for SACF */
-#define _HOME_MASK 0x0300 /* MASK for SACF */
-
-#define _PSW_PRIM_SPACE_MODE 0x0000000000000000
-#define _PSW_SEC_SPACE_MODE 0x0000800000000000
-#define _PSW_ACC_REG_MODE 0x0000400000000000
-#define _PSW_HOME_SPACE_MODE 0x0000C00000000000
-
-#define _PSW_WAIT_MASK_BIT 0x0002000000000000
-#define _PSW_IO_MASK_BIT 0x0200000000000000
-#define _PSW_IO_WAIT 0x0202000000000000
-
#ifndef __ASSEMBLY__
#include <linux/config.h>
@@ -146,7 +122,8 @@ struct _lowcore
psw_t io_new_psw; /* 0x1f0 */
psw_t return_psw; /* 0x200 */
__u32 sync_io_word; /* 0x210 */
- __u8 pad8[0xc00-0x214]; /* 0x214 */
+ __u32 diag44_opcode; /* 0x214 */
+ __u8 pad8[0xc00-0x218]; /* 0x218 */
/* System info area */
__u64 save_area[16]; /* 0xc00 */
__u8 pad9[0xd40-0xc80]; /* 0xc80 */
@@ -191,25 +168,17 @@ struct _lowcore
__u8 pad17[0x2000-0x1400]; /* 0x1400 */
} __attribute__((packed)); /* End structure*/
+#define S390_lowcore (*((struct _lowcore *) 0))
+extern struct _lowcore *lowcore_ptr[];
+
extern __inline__ void set_prefix(__u32 address)
{
__asm__ __volatile__ ("spx %0" : : "m" (address) : "memory" );
}
-#define S390_lowcore (*((struct _lowcore *) 0))
-extern struct _lowcore *lowcore_ptr[];
+#define __PANIC_MAGIC 0xDEADC0DE
-#ifndef CONFIG_SMP
-#define get_cpu_lowcore(cpu) (&S390_lowcore)
-#define safe_get_cpu_lowcore(cpu) (&S390_lowcore)
-#else
-#define get_cpu_lowcore(cpu) (lowcore_ptr[(cpu)])
-#define safe_get_cpu_lowcore(cpu) \
- ((cpu) == smp_processor_id() ? &S390_lowcore : lowcore_ptr[(cpu)])
#endif
-#endif /* __ASSEMBLY__ */
-
-#define __PANIC_MAGIC 0xDEADC0DE
#endif
diff --git a/include/asm-s390x/param.h b/include/asm-s390x/param.h
index f2e0cc0a4dcc..753b8bdeecba 100644
--- a/include/asm-s390x/param.h
+++ b/include/asm-s390x/param.h
@@ -9,11 +9,14 @@
#ifndef _ASMS390_PARAM_H
#define _ASMS390_PARAM_H
-#ifndef HZ
-#define HZ 100
#ifdef __KERNEL__
-#define hz_to_std(a) (a)
+# define HZ 100 /* Internal kernel timer frequency */
+# define USER_HZ 100 /* .. some user interfaces are in "ticks" */
+# define CLOCKS_PER_SEC (USER_HZ) /* like times() */
#endif
+
+#ifndef HZ
+#define HZ 100
#endif
#define EXEC_PAGESIZE 4096
@@ -28,8 +31,4 @@
#define MAXHOSTNAMELEN 64 /* max length of hostname */
-#ifdef __KERNEL__
-# define CLOCKS_PER_SEC HZ /* frequency at which times() counts */
-#endif
-
#endif
diff --git a/include/asm-s390x/pgalloc.h b/include/asm-s390x/pgalloc.h
index 282ec93f29d4..838eb9ff927e 100644
--- a/include/asm-s390x/pgalloc.h
+++ b/include/asm-s390x/pgalloc.h
@@ -16,6 +16,8 @@
#include <linux/config.h>
#include <asm/processor.h>
#include <linux/threads.h>
+#include <linux/gfp.h>
+#include <linux/mm.h>
#define check_pgt_cache() do { } while (0)
diff --git a/include/asm-s390x/pgtable.h b/include/asm-s390x/pgtable.h
index 6c590334ab3a..f84930684b68 100644
--- a/include/asm-s390x/pgtable.h
+++ b/include/asm-s390x/pgtable.h
@@ -168,6 +168,8 @@ extern char empty_zero_page[PAGE_SIZE];
#define _REGION_TABLE (_REGION_THIRD|_REGION_THIRD_LEN|0x40|0x100)
#define _KERN_REGION_TABLE (_REGION_THIRD|_REGION_THIRD_LEN)
+#define USER_STD_MASK 0x0000000000000080UL
+
/* Bits in the storage key */
#define _PAGE_CHANGED 0x02 /* HW changed bit */
#define _PAGE_REFERENCED 0x04 /* HW referenced bit */
diff --git a/include/asm-s390x/processor.h b/include/asm-s390x/processor.h
index f578c9eba468..334cd6dcf6c4 100644
--- a/include/asm-s390x/processor.h
+++ b/include/asm-s390x/processor.h
@@ -111,13 +111,13 @@ typedef struct thread_struct thread_struct;
/* need to define ... */
#define start_thread(regs, new_psw, new_stackp) do { \
- regs->psw.mask = _USER_PSW_MASK; \
+ regs->psw.mask = PSW_USER_BITS; \
regs->psw.addr = new_psw; \
regs->gprs[15] = new_stackp; \
} while (0)
#define start_thread31(regs, new_psw, new_stackp) do { \
- regs->psw.mask = _USER_PSW_MASK & ~(1L << 32); \
+ regs->psw.mask = PSW_USER32_BITS; \
regs->psw.addr = new_psw; \
regs->gprs[15] = new_stackp; \
} while (0)
@@ -154,19 +154,6 @@ unsigned long get_wchan(struct task_struct *p);
#define cpu_relax() barrier()
/*
- * Set of msr bits that gdb can change on behalf of a process.
- */
-/* Only let our hackers near the condition codes */
-#define PSW_MASK_DEBUGCHANGE 0x0000300000000000UL
-/* Don't let em near the addressing mode either */
-#define PSW_ADDR_DEBUGCHANGE 0xFFFFFFFFFFFFFFFFUL
-#define PSW_ADDR_MASK 0xFFFFFFFFFFFFFFFFUL
-/* Program event recording mask */
-#define PSW_PER_MASK 0x4000000000000000UL
-#define USER_STD_MASK 0x0000000000000080UL
-#define PSW_PROBLEM_STATE 0x0001000000000000UL
-
-/*
* Set PSW mask to specified value, while leaving the
* PSW addr pointing to the next instruction.
*/
@@ -194,7 +181,8 @@ static inline void enabled_wait(void)
unsigned long reg;
psw_t wait_psw;
- wait_psw.mask = 0x0706000180000000;
+ wait_psw.mask = PSW_BASE_BITS | PSW_MASK_IO | PSW_MASK_EXT |
+ PSW_MASK_MCHECK | PSW_MASK_WAIT;
asm volatile (
" larl %0,0f\n"
" stg %0,8(%1)\n"
@@ -214,7 +202,7 @@ static inline void disabled_wait(addr_t code)
psw_t *dw_psw = (psw_t *)(((unsigned long) &psw_buffer+sizeof(psw_t)-1)
& -sizeof(psw_t));
- dw_psw->mask = 0x0002000180000000;
+ dw_psw->mask = PSW_BASE_BITS | PSW_MASK_WAIT;
dw_psw->addr = code;
/*
* Store status and then load disabled wait psw,
diff --git a/include/asm-s390x/ptrace.h b/include/asm-s390x/ptrace.h
index 311d6624a6d0..adf7fbaf629b 100644
--- a/include/asm-s390x/ptrace.h
+++ b/include/asm-s390x/ptrace.h
@@ -104,10 +104,33 @@ typedef struct
__u64 addr;
} __attribute__ ((aligned(8))) psw_t;
-#ifdef __KERNEL__
-#define FIX_PSW(addr) ((unsigned long)(addr))
-#define ADDR_BITS_REMOVE(addr) ((addr))
-#endif
+#define PSW_MASK_PER 0x4000000000000000UL
+#define PSW_MASK_DAT 0x0400000000000000UL
+#define PSW_MASK_IO 0x0200000000000000UL
+#define PSW_MASK_EXT 0x0100000000000000UL
+#define PSW_MASK_KEY 0x00F0000000000000UL
+#define PSW_MASK_MCHECK 0x0004000000000000UL
+#define PSW_MASK_WAIT 0x0002000000000000UL
+#define PSW_MASK_PSTATE 0x0001000000000000UL
+#define PSW_MASK_ASC 0x0000C00000000000UL
+#define PSW_MASK_CC 0x0000300000000000UL
+#define PSW_MASK_PM 0x00000F0000000000UL
+
+#define PSW_BASE_BITS 0x0000000180000000UL
+#define PSW_BASE32_BITS 0x0000000080000000UL
+
+#define PSW_ASC_PRIMARY 0x0000000000000000UL
+#define PSW_ASC_ACCREG 0x0000400000000000UL
+#define PSW_ASC_SECONDARY 0x0000800000000000UL
+#define PSW_ASC_HOME 0x0000C00000000000UL
+
+#define PSW_KERNEL_BITS (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_PRIMARY)
+#define PSW_USER_BITS (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_HOME | \
+ PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | \
+ PSW_MASK_PSTATE)
+#define PSW_USER32_BITS (PSW_BASE32_BITS | PSW_MASK_DAT | PSW_ASC_HOME | \
+ PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | \
+ PSW_MASK_PSTATE)
typedef union
{
@@ -309,7 +332,7 @@ struct user_regs_struct
};
#ifdef __KERNEL__
-#define user_mode(regs) (((regs)->psw.mask & PSW_PROBLEM_STATE) != 0)
+#define user_mode(regs) (((regs)->psw.mask & PSW_MASK_PSTATE) != 0)
#define instruction_pointer(regs) ((regs)->psw.addr)
extern void show_regs(struct pt_regs * regs);
#endif
diff --git a/include/asm-s390x/rwsem.h b/include/asm-s390x/rwsem.h
index 5ee597eadc88..ec635241c20e 100644
--- a/include/asm-s390x/rwsem.h
+++ b/include/asm-s390x/rwsem.h
@@ -48,9 +48,11 @@
struct rwsem_waiter;
-extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *);
+extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *);
extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
+extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *);
+extern struct rw_semaphore *rwsem_downgrade_write(struct rw_semaphore *);
/*
* the semaphore definition
@@ -105,6 +107,27 @@ static inline void __down_read(struct rw_semaphore *sem)
}
/*
+ * trylock for reading -- returns 1 if successful, 0 if contention
+ */
+static inline int __down_read_trylock(struct rw_semaphore *sem)
+{
+ signed long old, new;
+
+ __asm__ __volatile__(
+ " lg %0,0(%2)\n"
+ "0: ltgr %1,%0\n"
+ " jm 1f\n"
+ " aghi %1,%3\n"
+ " csg %0,%1,0(%2)\n"
+ " jl 0b\n"
+ "1:"
+ : "=&d" (old), "=&d" (new)
+ : "a" (&sem->count), "i" (RWSEM_ACTIVE_READ_BIAS)
+ : "cc", "memory" );
+ return old >= 0 ? 1 : 0;
+}
+
+/*
* lock for writing
*/
static inline void __down_write(struct rw_semaphore *sem)
@@ -126,6 +149,26 @@ static inline void __down_write(struct rw_semaphore *sem)
}
/*
+ * trylock for writing -- returns 1 if successful, 0 if contention
+ */
+static inline int __down_write_trylock(struct rw_semaphore *sem)
+{
+ signed long old;
+
+ __asm__ __volatile__(
+ " lg %0,0(%1)\n"
+ "0: ltgr %0,%0\n"
+ " jnz 1f\n"
+ " csg %0,%2,0(%1)\n"
+ " jl 0b\n"
+ "1:"
+ : "=&d" (old)
+ : "a" (&sem->count), "d" (RWSEM_ACTIVE_WRITE_BIAS)
+ : "cc", "memory" );
+ return (old == RWSEM_UNLOCKED_VALUE) ? 1 : 0;
+}
+
+/*
* unlock after reading
*/
static inline void __up_read(struct rw_semaphore *sem)
@@ -169,6 +212,27 @@ static inline void __up_write(struct rw_semaphore *sem)
}
/*
+ * downgrade write lock to read lock
+ */
+static inline void __downgrade_write(struct rw_semaphore *sem)
+{
+ signed long old, new, tmp;
+
+ tmp = -RWSEM_WAITING_BIAS;
+ __asm__ __volatile__(
+ " lg %0,0(%2)\n"
+ "0: lgr %1,%0\n"
+ " ag %1,%3\n"
+ " csg %0,%1,0(%2)\n"
+ " jl 0b"
+ : "=&d" (old), "=&d" (new)
+ : "a" (&sem->count), "m" (tmp)
+ : "cc", "memory" );
+ if (new > 1) // FIXME: is this correct ?!?
+ rwsem_downgrade_wake(sem);
+}
+
+/*
* implement atomic add functionality
*/
static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
diff --git a/include/asm-s390x/s390io.h b/include/asm-s390x/s390io.h
index 9a984f96dcfa..8424a470e887 100644
--- a/include/asm-s390x/s390io.h
+++ b/include/asm-s390x/s390io.h
@@ -9,21 +9,24 @@
#ifndef __s390io_h
#define __s390io_h
+#include <linux/device.h>
+
/*
* IRQ data structure used by I/O subroutines
*
* Note : If bit flags are added, the "unused" value must be
* decremented accordingly !
*/
-typedef struct _ioinfo {
+typedef struct subchannel {
unsigned int irq; /* aka. subchannel number */
spinlock_t irq_lock; /* irq lock */
- void *private_data; /* pointer to private data */
- struct _ioinfo *prev;
- struct _ioinfo *next;
+ __u8 st; /* subchannel type */
+
+ void *private_data; /* pointer to private data */
- __u8 st; /* subchannel type */
+ struct subchannel *prev;
+ struct subchannel *next;
union {
unsigned int info;
@@ -78,8 +81,26 @@ typedef struct _ioinfo {
unsigned long qflag; /* queued flags */
__u8 qlpm; /* queued logical path mask */
ssd_info_t ssd_info; /* subchannel description */
+ struct device dev; /* entry in device tree */
+} __attribute__ ((aligned(8))) ioinfo_t;
- } __attribute__ ((aligned(8))) ioinfo_t;
+
+/*
+ * There are four different subchannel types, but we are currently
+ * only interested in I/O subchannels. This means there is only
+ * one subchannel_driver, other subchannels belonging to css_bus_type
+ * are simply ignored.
+ */
+struct subchannel_driver {
+ enum {
+ SUBCHANNEL_TYPE_IO = 0,
+ SUBCHANNEL_TYPE_CHSC = 1,
+ SUBCHANNEL_TYPE_MESSAGE = 2,
+ SUBCHANNEL_TYPE_ADM = 3,
+ } st; /* subchannel type */
+ struct device_driver drv; /* entry in driver tree */
+};
+extern struct bus_type css_bus_type;
#define IOINFO_FLAGS_BUSY 0x80000000
#define IOINFO_FLAGS_OPER 0x40000000
diff --git a/include/asm-s390x/setup.h b/include/asm-s390x/setup.h
index 0a0419089bf5..9aa13a641520 100644
--- a/include/asm-s390x/setup.h
+++ b/include/asm-s390x/setup.h
@@ -25,11 +25,12 @@
*/
extern unsigned long machine_flags;
-#define MACHINE_IS_VM (machine_flags & 1)
-#define MACHINE_IS_P390 (machine_flags & 4)
-#define MACHINE_HAS_MVPG (machine_flags & 16)
+#define MACHINE_IS_VM (machine_flags & 1)
+#define MACHINE_IS_P390 (machine_flags & 4)
+#define MACHINE_HAS_MVPG (machine_flags & 16)
+#define MACHINE_HAS_DIAG44 (machine_flags & 32)
-#define MACHINE_HAS_HWC (!MACHINE_IS_P390)
+#define MACHINE_HAS_HWC (!MACHINE_IS_P390)
/*
* Console mode. Override with conmode=
diff --git a/include/asm-s390x/smp.h b/include/asm-s390x/smp.h
index 8caa0b8bdd87..28e1fd23b426 100644
--- a/include/asm-s390x/smp.h
+++ b/include/asm-s390x/smp.h
@@ -11,7 +11,7 @@
#include <linux/config.h>
#include <linux/threads.h>
-#include <linux/ptrace.h>
+#include <linux/bitops.h>
#if defined(__KERNEL__) && defined(CONFIG_SMP) && !defined(__ASSEMBLY__)
@@ -29,6 +29,7 @@ typedef struct
} sigp_info;
extern volatile unsigned long cpu_online_map;
+extern volatile unsigned long cpu_possible_map;
#define NO_PROC_ID 0xFF /* No processor magic marker */
@@ -46,14 +47,20 @@ extern volatile unsigned long cpu_online_map;
#define smp_processor_id() (current_thread_info()->cpu)
-extern __inline__ int cpu_logical_map(int cpu)
+#define cpu_online(cpu) (cpu_online_map & (1<<(cpu)))
+#define cpu_possible(cpu) (cpu_possible_map & (1<<(cpu)))
+
+extern inline unsigned int num_online_cpus(void)
{
- return cpu;
+ return hweight64(cpu_online_map);
}
-extern __inline__ int cpu_number_map(int cpu)
+extern inline int any_online_cpu(unsigned int mask)
{
- return cpu;
+ if (mask & cpu_online_map)
+ return __ffs(mask & cpu_online_map);
+
+ return -1;
}
extern __inline__ __u16 hard_smp_processor_id(void)
diff --git a/include/asm-s390x/softirq.h b/include/asm-s390x/softirq.h
index b82aac30db21..91f9853561dd 100644
--- a/include/asm-s390x/softirq.h
+++ b/include/asm-s390x/softirq.h
@@ -9,34 +9,27 @@
#ifndef __ASM_SOFTIRQ_H
#define __ASM_SOFTIRQ_H
-#ifndef __LINUX_SMP_H
#include <linux/smp.h>
-#endif
+#include <linux/preempt.h>
#include <asm/atomic.h>
#include <asm/hardirq.h>
#include <asm/lowcore.h>
-#define __cpu_bh_enable(cpu) \
- do { barrier(); local_bh_count(cpu)--; } while (0)
-#define cpu_bh_disable(cpu) \
- do { local_bh_count(cpu)++; barrier(); } while (0)
-
-#define local_bh_disable() cpu_bh_disable(smp_processor_id())
-#define __local_bh_enable() __cpu_bh_enable(smp_processor_id())
-
-#define in_softirq() (local_bh_count(smp_processor_id()) != 0)
+#define local_bh_disable() \
+ do { preempt_count() += SOFTIRQ_OFFSET; barrier(); } while (0)
+#define __local_bh_enable() \
+ do { barrier(); preempt_count() -= SOFTIRQ_OFFSET; } while (0)
extern void do_call_softirq(void);
-#define local_bh_enable() \
-do { \
- unsigned int *ptr = &local_bh_count(smp_processor_id()); \
- barrier(); \
- if (!--*ptr) \
- if (softirq_pending(smp_processor_id())) \
- /* Use the async. stack for softirq */ \
- do_call_softirq(); \
+#define local_bh_enable() \
+do { \
+ __local_bh_enable(); \
+ if (!in_interrupt() && softirq_pending(smp_processor_id())) \
+ /* Use the async. stack for softirq */ \
+ do_call_softirq(); \
+ preempt_check_resched(); \
} while (0)
#endif /* __ASM_SOFTIRQ_H */
diff --git a/include/asm-s390x/spinlock.h b/include/asm-s390x/spinlock.h
index d18dba14f2eb..e9e226f5f23d 100644
--- a/include/asm-s390x/spinlock.h
+++ b/include/asm-s390x/spinlock.h
@@ -12,6 +12,20 @@
#define __ASM_SPINLOCK_H
/*
+ * Grmph, take care of %&#! user space programs that include
+ * asm/spinlock.h. The diagnose is only available in kernel
+ * context.
+ */
+#ifdef __KERNEL__
+#include <asm/lowcore.h>
+#define __DIAG44_INSN "ex"
+#define __DIAG44_OPERAND __LC_DIAG44_OPCODE
+#else
+#define __DIAG44_INSN "#"
+#define __DIAG44_OPERAND 0
+#endif
+
+/*
* Simple spin lock operations. There are two variants, one clears IRQ's
* on the local processor, one does not.
*
@@ -31,12 +45,13 @@ extern inline void _raw_spin_lock(spinlock_t *lp)
{
unsigned long reg1, reg2;
__asm__ __volatile(" bras %1,1f\n"
- "0: # diag 0,0,68\n"
+ "0: " __DIAG44_INSN " 0,%4\n"
"1: slr %0,%0\n"
" cs %0,%1,0(%3)\n"
" jl 0b\n"
: "=&d" (reg1), "=&d" (reg2), "+m" (lp->lock)
- : "a" (&lp->lock) : "cc" );
+ : "a" (&lp->lock), "i" (__DIAG44_OPERAND)
+ : "cc" );
}
extern inline int _raw_spin_trylock(spinlock_t *lp)
@@ -76,46 +91,52 @@ typedef struct {
#define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
+#define rwlock_is_locked(x) ((x)->lock != 0)
+
#define _raw_read_lock(rw) \
asm volatile(" lg 2,0(%1)\n" \
" j 1f\n" \
- "0: # diag 0,0,68\n" \
+ "0: " __DIAG44_INSN " 0,%2\n" \
"1: nihh 2,0x7fff\n" /* clear high (=write) bit */ \
" la 3,1(2)\n" /* one more reader */ \
" csg 2,3,0(%1)\n" /* try to write new value */ \
" jl 0b" \
- : "+m" ((rw)->lock) : "a" (&(rw)->lock) \
+ : "+m" ((rw)->lock) \
+ : "a" (&(rw)->lock), "i" (__DIAG44_OPERAND) \
: "2", "3", "cc" )
#define _raw_read_unlock(rw) \
asm volatile(" lg 2,0(%1)\n" \
" j 1f\n" \
- "0: # diag 0,0,68\n" \
+ "0: " __DIAG44_INSN " 0,%2\n" \
"1: lgr 3,2\n" \
" bctgr 3,0\n" /* one less reader */ \
" csg 2,3,0(%1)\n" \
" jl 0b" \
- : "+m" ((rw)->lock) : "a" (&(rw)->lock) \
+ : "+m" ((rw)->lock) \
+ : "a" (&(rw)->lock), "i" (__DIAG44_OPERAND) \
: "2", "3", "cc" )
#define _raw_write_lock(rw) \
asm volatile(" llihh 3,0x8000\n" /* new lock value = 0x80...0 */ \
" j 1f\n" \
- "0: # diag 0,0,68\n" \
+ "0: " __DIAG44_INSN " 0,%2\n" \
"1: slgr 2,2\n" /* old lock value must be 0 */ \
" csg 2,3,0(%1)\n" \
" jl 0b" \
- : "+m" ((rw)->lock) : "a" (&(rw)->lock) \
+ : "+m" ((rw)->lock) \
+ : "a" (&(rw)->lock), "i" (__DIAG44_OPERAND) \
: "2", "3", "cc" )
#define _raw_write_unlock(rw) \
asm volatile(" slgr 3,3\n" /* new lock value = 0 */ \
" j 1f\n" \
- "0: # diag 0,0,68\n" \
+ "0: " __DIAG44_INSN " 0,%2\n" \
"1: llihh 2,0x8000\n" /* old lock value must be 0x8..0 */\
" csg 2,3,0(%1)\n" \
" jl 0b" \
- : "+m" ((rw)->lock) : "a" (&(rw)->lock) \
+ : "+m" ((rw)->lock) \
+ : "a" (&(rw)->lock), "i" (__DIAG44_OPERAND) \
: "2", "3", "cc" )
#endif /* __ASM_SPINLOCK_H */
diff --git a/include/asm-s390x/system.h b/include/asm-s390x/system.h
index 81810a4819b9..74470fc09edd 100644
--- a/include/asm-s390x/system.h
+++ b/include/asm-s390x/system.h
@@ -12,13 +12,64 @@
#define __ASM_SYSTEM_H
#include <linux/config.h>
+#include <linux/kernel.h>
#include <asm/types.h>
+#include <asm/ptrace.h>
+#include <asm/setup.h>
+
#ifdef __KERNEL__
-#include <asm/lowcore.h>
-#endif
-#include <linux/kernel.h>
-#define switch_to(prev,next),last do { \
+struct task_struct;
+
+extern struct task_struct *resume(void *, void *);
+
+static inline void save_fp_regs(s390_fp_regs *fpregs)
+{
+ asm volatile (
+ " stfpc 0(%0)\n"
+ " std 0,8(%0)\n"
+ " std 1,16(%0)\n"
+ " std 2,24(%0)\n"
+ " std 3,32(%0)\n"
+ " std 4,40(%0)\n"
+ " std 5,48(%0)\n"
+ " std 6,56(%0)\n"
+ " std 7,64(%0)\n"
+ " std 8,72(%0)\n"
+ " std 9,80(%0)\n"
+ " std 10,88(%0)\n"
+ " std 11,96(%0)\n"
+ " std 12,104(%0)\n"
+ " std 13,112(%0)\n"
+ " std 14,120(%0)\n"
+ " std 15,128(%0)\n"
+ : : "a" (fpregs) : "memory" );
+}
+
+static inline void restore_fp_regs(s390_fp_regs *fpregs)
+{
+ asm volatile (
+ " lfpc 0(%0)\n"
+ " ld 0,8(%0)\n"
+ " ld 1,16(%0)\n"
+ " ld 2,24(%0)\n"
+ " ld 3,32(%0)\n"
+ " ld 4,40(%0)\n"
+ " ld 5,48(%0)\n"
+ " ld 6,56(%0)\n"
+ " ld 7,64(%0)\n"
+ " ld 8,72(%0)\n"
+ " ld 9,80(%0)\n"
+ " ld 10,88(%0)\n"
+ " ld 11,96(%0)\n"
+ " ld 12,104(%0)\n"
+ " ld 13,112(%0)\n"
+ " ld 14,120(%0)\n"
+ " ld 15,128(%0)\n"
+ : : "a" (fpregs));
+}
+
+#define switch_to(prev,next,last) do { \
if (prev == next) \
break; \
save_fp_regs(&prev->thread.fp_regs); \
@@ -26,8 +77,6 @@
resume(prev,next); \
} while (0)
-struct task_struct;
-
#define nop() __asm__ __volatile__ ("nop")
#define xchg(ptr,x) \
@@ -39,82 +88,142 @@ extern void __misaligned_u64(void);
static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
{
+ unsigned long addr, old;
+ int shift;
+
switch (size) {
- case 1:
- asm volatile (
- " lghi 1,3\n"
- " nr 1,%0\n" /* isolate last 2 bits */
- " xr %0,1\n" /* align ptr */
- " bras 2,0f\n"
- " icm 1,8,7(%1)\n" /* for ptr&3 == 0 */
- " stcm 0,8,7(%1)\n"
- " icm 1,4,7(%1)\n" /* for ptr&3 == 1 */
- " stcm 0,4,7(%1)\n"
- " icm 1,2,7(%1)\n" /* for ptr&3 == 2 */
- " stcm 0,2,7(%1)\n"
- " icm 1,1,7(%1)\n" /* for ptr&3 == 3 */
- " stcm 0,1,7(%1)\n"
- "0: sll 1,3\n"
- " la 2,0(1,2)\n" /* r2 points to an icm */
- " l 0,0(%0)\n" /* get fullword */
- "1: lr 1,0\n" /* cs loop */
- " ex 0,0(2)\n" /* insert x */
- " cs 0,1,0(%0)\n"
- " jl 1b\n"
- " ex 0,4(2)" /* store *ptr to x */
- : "+&a" (ptr) : "a" (&x)
- : "memory", "cc", "0", "1", "2");
- break;
- case 2:
- if(((addr_t)ptr)&1)
- __misaligned_u16();
- asm volatile (
- " lghi 1,2\n"
- " nr 1,%0\n" /* isolate bit 2^1 */
- " xr %0,1\n" /* align ptr */
- " bras 2,0f\n"
- " icm 1,12,6(%1)\n" /* for ptr&2 == 0 */
- " stcm 0,12,6(%1)\n"
- " icm 1,3,2(%1)\n" /* for ptr&2 == 1 */
- " stcm 0,3,2(%1)\n"
- "0: sll 1,2\n"
- " la 2,0(1,2)\n" /* r2 points to an icm */
- " l 0,0(%0)\n" /* get fullword */
- "1: lr 1,0\n" /* cs loop */
- " ex 0,0(2)\n" /* insert x */
- " cs 0,1,0(%0)\n"
- " jl 1b\n"
- " ex 0,4(2)" /* store *ptr to x */
- : "+&a" (ptr) : "a" (&x)
- : "memory", "cc", "0", "1", "2");
- break;
- case 4:
- if(((addr_t)ptr)&3)
- __misaligned_u32();
- asm volatile (
- " l 0,0(%1)\n"
- "0: cs 0,%0,0(%1)\n"
- " jl 0b\n"
- " lgfr %0,0\n"
- : "+d" (x) : "a" (ptr)
- : "memory", "cc", "0" );
- break;
- case 8:
- if(((addr_t)ptr)&7)
- __misaligned_u64();
- asm volatile (
- " lg 0,0(%1)\n"
- "0: csg 0,%0,0(%1)\n"
- " jl 0b\n"
- " lgr %0,0\n"
- : "+d" (x) : "a" (ptr)
- : "memory", "cc", "0" );
- break;
+ case 1:
+ addr = (unsigned long) ptr;
+ shift = (3 ^ (addr & 3)) << 3;
+ addr ^= addr & 3;
+ asm volatile(
+ " l %0,0(%3)\n"
+ "0: lr 0,%0\n"
+ " nr 0,%2\n"
+ " or 0,%1\n"
+ " cs %0,0,0(%3)\n"
+ " jl 0b\n"
+ : "=&d" (old)
+ : "d" (x << shift), "d" (~(255 << shift)), "a" (addr)
+ : "memory", "cc", "0" );
+ x = old >> shift;
+ break;
+ case 2:
+ addr = (unsigned long) ptr;
+ shift = (2 ^ (addr & 2)) << 3;
+ addr ^= addr & 2;
+ asm volatile(
+ " l %0,0(%3)\n"
+ "0: lr 0,%0\n"
+ " nr 0,%2\n"
+ " or 0,%1\n"
+ " cs %0,0,0(%3)\n"
+ " jl 0b\n"
+ : "=&d" (old)
+ : "d" (x << shift), "d" (~(65535 << shift)), "a" (addr)
+ : "memory", "cc", "0" );
+ x = old >> shift;
+ break;
+ case 4:
+ asm volatile (
+ " l %0,0(%2)\n"
+ "0: cs %0,%1,0(%2)\n"
+ " jl 0b\n"
+ : "=&d" (old) : "d" (x), "a" (ptr)
+ : "memory", "cc", "0" );
+ x = old;
+ break;
+ case 8:
+ asm volatile (
+ " lg %0,0(%2)\n"
+ "0: csg %0,%1,0(%2)\n"
+ " jl 0b\n"
+ : "=&d" (old) : "d" (x), "a" (ptr)
+ : "memory", "cc", "0" );
+ x = old;
+ break;
}
return x;
}
/*
+ * Atomic compare and exchange. Compare OLD with MEM, if identical,
+ * store NEW in MEM. Return the initial value in MEM. Success is
+ * indicated by comparing RETURN with OLD.
+ */
+
+#define __HAVE_ARCH_CMPXCHG 1
+
+#define cmpxchg(ptr,o,n)\
+ ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
+ (unsigned long)(n),sizeof(*(ptr))))
+
+static inline unsigned long
+__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
+{
+ unsigned long addr, prev, tmp;
+ int shift;
+
+ switch (size) {
+ case 1:
+ addr = (unsigned long) ptr;
+ shift = (3 ^ (addr & 3)) << 3;
+ addr ^= addr & 3;
+ asm volatile(
+ " l %0,0(%4)\n"
+ "0: nr %0,%5\n"
+ " lr %1,%0\n"
+ " or %0,%2\n"
+ " or %1,%3\n"
+ " cs %0,%1,0(%4)\n"
+ " jnl 1f\n"
+ " xr %1,%0\n"
+ " nr %1,%5\n"
+ " jnz 0b\n"
+ "1:"
+ : "=&d" (prev), "=&d" (tmp)
+ : "d" (old << shift), "d" (new << shift), "a" (ptr),
+ "d" (~(255 << shift))
+ : "memory", "cc" );
+ return prev >> shift;
+ case 2:
+ addr = (unsigned long) ptr;
+ shift = (2 ^ (addr & 2)) << 3;
+ addr ^= addr & 2;
+ asm volatile(
+ " l %0,0(%4)\n"
+ "0: nr %0,%5\n"
+ " lr %1,%0\n"
+ " or %0,%2\n"
+ " or %1,%3\n"
+ " cs %0,%1,0(%4)\n"
+ " jnl 1f\n"
+ " xr %1,%0\n"
+ " nr %1,%5\n"
+ " jnz 0b\n"
+ "1:"
+ : "=&d" (prev), "=&d" (tmp)
+ : "d" (old << shift), "d" (new << shift), "a" (ptr),
+ "d" (~(65535 << shift))
+ : "memory", "cc" );
+ return prev >> shift;
+ case 4:
+ asm volatile (
+ " cs %0,%2,0(%3)\n"
+ : "=&d" (prev) : "0" (old), "d" (new), "a" (ptr)
+ : "memory", "cc" );
+ return prev;
+ case 8:
+ asm volatile (
+ " csg %0,%2,0(%3)\n"
+ : "=&d" (prev) : "0" (old), "d" (new), "a" (ptr)
+ : "memory", "cc" );
+ return prev;
+ }
+ return old;
+}
+
+/*
* Force strict CPU ordering.
* And yes, this is required on UP too when we're talking
* to devices.
@@ -142,21 +251,28 @@ static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
#define local_irq_enable() ({ \
unsigned long __dummy; \
__asm__ __volatile__ ( \
- "stosm 0(%0),0x03" : : "a" (&__dummy) : "memory"); \
+ "stosm 0(%1),0x03" : "=m" (__dummy) : "a" (&__dummy) ); \
})
#define local_irq_disable() ({ \
unsigned long __flags; \
__asm__ __volatile__ ( \
- "stnsm 0(%0),0xFC" : : "a" (&__flags) : "memory"); \
+ "stnsm 0(%1),0xfc" : "=m" (__flags) : "a" (&__flags) ); \
__flags; \
})
#define local_save_flags(x) \
- __asm__ __volatile__("stosm 0(%0),0" : : "a" (&x) : "memory")
+ __asm__ __volatile__("stosm 0(%1),0" : "=m" (x) : "a" (&x) )
#define local_irq_restore(x) \
- __asm__ __volatile__("ssm 0(%0)" : : "a" (&x) : "memory")
+ __asm__ __volatile__("ssm 0(%0)" : : "a" (&x) )
+
+#define irqs_disabled() \
+({ \
+ unsigned long flags; \
+ local_save_flags(flags); \
+ !((flags >> 56) & 3); \
+})
#define __load_psw(psw) \
__asm__ __volatile__("lpswe 0(%0)" : : "a" (&psw) : "cc" );
@@ -220,16 +336,6 @@ static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
#ifdef CONFIG_SMP
-extern void __global_cli(void);
-extern void __global_sti(void);
-
-extern unsigned long __global_save_flags(void);
-extern void __global_restore_flags(unsigned long);
-#define cli() __global_cli()
-#define sti() __global_sti()
-#define save_flags(x) ((x)=__global_save_flags())
-#define restore_flags(x) __global_restore_flags(x)
-
extern void smp_ctl_set_bit(int cr, int bit);
extern void smp_ctl_clear_bit(int cr, int bit);
#define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit)
@@ -237,32 +343,16 @@ extern void smp_ctl_clear_bit(int cr, int bit);
#else
-#define cli() local_irq_disable()
-#define sti() local_irq_enable()
-#define save_flags(x) local_save_flags(x)
-#define restore_flags(x) local_irq_restore(x)
-
#define ctl_set_bit(cr, bit) __ctl_set_bit(cr, bit)
#define ctl_clear_bit(cr, bit) __ctl_clear_bit(cr, bit)
-
-#endif
-
-#ifdef __KERNEL__
-extern struct task_struct *resume(void *, void *);
-
-extern int save_fp_regs1(s390_fp_regs *fpregs);
-extern void save_fp_regs(s390_fp_regs *fpregs);
-extern int restore_fp_regs1(s390_fp_regs *fpregs);
-extern void restore_fp_regs(s390_fp_regs *fpregs);
+#endif /* CONFIG_SMP */
extern void (*_machine_restart)(char *command);
extern void (*_machine_halt)(void);
extern void (*_machine_power_off)(void);
-#endif
+#endif /* __KERNEL __ */
#endif
-
-
diff --git a/include/asm-s390x/thread_info.h b/include/asm-s390x/thread_info.h
index 788dc05c66ab..788c7e6654ce 100644
--- a/include/asm-s390x/thread_info.h
+++ b/include/asm-s390x/thread_info.h
@@ -25,11 +25,9 @@ struct thread_info {
struct exec_domain *exec_domain; /* execution domain */
unsigned long flags; /* low level flags */
unsigned int cpu; /* current CPU */
- int preempt_count; /* 0 => preemptable, <0 => BUG */
+ unsigned int preempt_count; /* 0 => preemptable */
};
-#define PREEMPT_ACTIVE 0x4000000
-
/*
* macros/functions for gaining access to the thread information structure
*/
@@ -84,4 +82,6 @@ static inline struct thread_info *current_thread_info(void)
#endif /* __KERNEL__ */
+#define PREEMPT_ACTIVE 0x4000000
+
#endif /* _ASM_THREAD_INFO_H */
diff --git a/include/asm-s390x/tlbflush.h b/include/asm-s390x/tlbflush.h
index d064a59b0678..77b6f6dd7f21 100644
--- a/include/asm-s390x/tlbflush.h
+++ b/include/asm-s390x/tlbflush.h
@@ -88,8 +88,7 @@ static inline void global_flush_tlb(void)
static inline void __flush_tlb_mm(struct mm_struct * mm)
{
- if ((smp_num_cpus > 1) &&
- ((atomic_read(&mm->mm_count) != 1) ||
+ if (((atomic_read(&mm->mm_count) != 1) ||
(mm->cpu_vm_mask != (1UL << smp_processor_id())))) {
mm->cpu_vm_mask = (1UL << smp_processor_id());
global_flush_tlb();
diff --git a/include/asm-s390x/unistd.h b/include/asm-s390x/unistd.h
index cec7c863e072..dc8edd478cd5 100644
--- a/include/asm-s390x/unistd.h
+++ b/include/asm-s390x/unistd.h
@@ -198,6 +198,16 @@
#define __NR_futex 238
#define __NR_sched_setaffinity 239
#define __NR_sched_getaffinity 240
+#define __NR_security 241 /* syscall for security modules */
+/*
+ * Number 242 is reserved for tux
+ */
+#define __NR_io_setup 243
+#define __NR_io_destroy 244
+#define __NR_io_getevents 245
+#define __NR_io_submit 246
+#define __NR_io_cancel 247
+#define __NR_exit_group 248
/* user-visible error numbers are in the range -1 - -122: see <asm-s390/errno.h> */