summaryrefslogtreecommitdiff
path: root/arch/s390
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/Makefile1
-rw-r--r--arch/s390/hypfs/hypfs_sprp.c2
-rw-r--r--arch/s390/include/asm/ap.h18
-rw-r--r--arch/s390/include/asm/atomic_ops.h28
-rw-r--r--arch/s390/include/asm/barrier.h8
-rw-r--r--arch/s390/include/asm/bitops.h2
-rw-r--r--arch/s390/include/asm/checksum.h2
-rw-r--r--arch/s390/include/asm/cmpxchg.h12
-rw-r--r--arch/s390/include/asm/cpacf.h24
-rw-r--r--arch/s390/include/asm/ctlreg.h8
-rw-r--r--arch/s390/include/asm/fpu-insn.h36
-rw-r--r--arch/s390/include/asm/kvm_host.h2
-rw-r--r--arch/s390/include/asm/kvm_para.h2
-rw-r--r--arch/s390/include/asm/pci.h10
-rw-r--r--arch/s390/include/asm/percpu.h8
-rw-r--r--arch/s390/include/asm/processor.h2
-rw-r--r--arch/s390/include/asm/rwonce.h2
-rw-r--r--arch/s390/include/asm/spinlock.h2
-rw-r--r--arch/s390/include/asm/stacktrace.h4
-rw-r--r--arch/s390/include/asm/string.h2
-rw-r--r--arch/s390/include/asm/syscall.h2
-rw-r--r--arch/s390/include/asm/timex.h2
-rw-r--r--arch/s390/kernel/diag/diag310.c2
-rw-r--r--arch/s390/kernel/diag/diag324.c2
-rw-r--r--arch/s390/kernel/perf_pai_crypto.c106
-rw-r--r--arch/s390/kernel/setup.c2
-rw-r--r--arch/s390/kernel/skey.c2
-rw-r--r--arch/s390/kernel/smp.c2
-rw-r--r--arch/s390/kernel/uv.c4
-rw-r--r--arch/s390/kvm/kvm-s390.c6
-rw-r--r--arch/s390/kvm/priv.c8
-rw-r--r--arch/s390/lib/spinlock.c6
-rw-r--r--arch/s390/lib/string.c8
-rw-r--r--arch/s390/lib/test_unwind.c4
-rw-r--r--arch/s390/lib/xor.c8
-rw-r--r--arch/s390/mm/maccess.c2
-rw-r--r--arch/s390/mm/pgalloc.c2
-rw-r--r--arch/s390/pci/pci.c4
-rw-r--r--arch/s390/pci/pci_event.c3
-rw-r--r--arch/s390/pci/pci_insn.c4
-rw-r--r--arch/s390/pci/pci_sysfs.c25
41 files changed, 207 insertions, 172 deletions
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index 7679bc16b692..b4769241332b 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -25,6 +25,7 @@ endif
KBUILD_CFLAGS_DECOMPRESSOR := $(CLANG_FLAGS) -m64 -O2 -mpacked-stack -std=gnu11
KBUILD_CFLAGS_DECOMPRESSOR += -DDISABLE_BRANCH_PROFILING -D__NO_FORTIFY
KBUILD_CFLAGS_DECOMPRESSOR += -D__DECOMPRESSOR
+KBUILD_CFLAGS_DECOMPRESSOR += -Wno-pointer-sign
KBUILD_CFLAGS_DECOMPRESSOR += -fno-delete-null-pointer-checks -msoft-float -mbackchain
KBUILD_CFLAGS_DECOMPRESSOR += -fno-asynchronous-unwind-tables
KBUILD_CFLAGS_DECOMPRESSOR += -ffreestanding
diff --git a/arch/s390/hypfs/hypfs_sprp.c b/arch/s390/hypfs/hypfs_sprp.c
index 9fc3f0dae8f0..a2952ed5518b 100644
--- a/arch/s390/hypfs/hypfs_sprp.c
+++ b/arch/s390/hypfs/hypfs_sprp.c
@@ -27,7 +27,7 @@ static inline unsigned long __hypfs_sprp_diag304(void *data, unsigned long cmd)
{
union register_pair r1 = { .even = virt_to_phys(data), };
- asm volatile("diag %[r1],%[r3],0x304\n"
+ asm volatile("diag %[r1],%[r3],0x304"
: [r1] "+&d" (r1.pair)
: [r3] "d" (cmd)
: "memory");
diff --git a/arch/s390/include/asm/ap.h b/arch/s390/include/asm/ap.h
index 352108727d7e..56817990c73d 100644
--- a/arch/s390/include/asm/ap.h
+++ b/arch/s390/include/asm/ap.h
@@ -143,7 +143,7 @@ static inline struct ap_queue_status ap_tapq(ap_qid_t qid,
" lghi 2,0\n" /* 0 into gr2 */
" .insn rre,0xb2af0000,0,0\n" /* PQAP(TAPQ) */
" lgr %[reg1],1\n" /* gr1 (status) into reg1 */
- " lgr %[reg2],2\n" /* gr2 into reg2 */
+ " lgr %[reg2],2" /* gr2 into reg2 */
: [reg1] "=&d" (reg1.value), [reg2] "=&d" (reg2)
: [qid] "d" (qid)
: "cc", "0", "1", "2");
@@ -186,7 +186,7 @@ static inline struct ap_queue_status ap_rapq(ap_qid_t qid, int fbit)
asm volatile(
" lgr 0,%[reg0]\n" /* qid arg into gr0 */
" .insn rre,0xb2af0000,0,0\n" /* PQAP(RAPQ) */
- " lgr %[reg1],1\n" /* gr1 (status) into reg1 */
+ " lgr %[reg1],1" /* gr1 (status) into reg1 */
: [reg1] "=&d" (reg1.value)
: [reg0] "d" (reg0)
: "cc", "0", "1");
@@ -211,7 +211,7 @@ static inline struct ap_queue_status ap_zapq(ap_qid_t qid, int fbit)
asm volatile(
" lgr 0,%[reg0]\n" /* qid arg into gr0 */
" .insn rre,0xb2af0000,0,0\n" /* PQAP(ZAPQ) */
- " lgr %[reg1],1\n" /* gr1 (status) into reg1 */
+ " lgr %[reg1],1" /* gr1 (status) into reg1 */
: [reg1] "=&d" (reg1.value)
: [reg0] "d" (reg0)
: "cc", "0", "1");
@@ -315,7 +315,7 @@ static inline struct ap_queue_status ap_aqic(ap_qid_t qid,
" lgr 1,%[reg1]\n" /* irq ctrl into gr1 */
" lgr 2,%[reg2]\n" /* ni addr into gr2 */
" .insn rre,0xb2af0000,0,0\n" /* PQAP(AQIC) */
- " lgr %[reg1],1\n" /* gr1 (status) into reg1 */
+ " lgr %[reg1],1" /* gr1 (status) into reg1 */
: [reg1] "+&d" (reg1.value)
: [reg0] "d" (reg0), [reg2] "d" (reg2)
: "cc", "memory", "0", "1", "2");
@@ -363,7 +363,7 @@ static inline struct ap_queue_status ap_qact(ap_qid_t qid, int ifbit,
" lgr 1,%[reg1]\n" /* qact in info into gr1 */
" .insn rre,0xb2af0000,0,0\n" /* PQAP(QACT) */
" lgr %[reg1],1\n" /* gr1 (status) into reg1 */
- " lgr %[reg2],2\n" /* qact out info into reg2 */
+ " lgr %[reg2],2" /* qact out info into reg2 */
: [reg1] "+&d" (reg1.value), [reg2] "=&d" (reg2)
: [reg0] "d" (reg0)
: "cc", "0", "1", "2");
@@ -388,7 +388,7 @@ static inline struct ap_queue_status ap_bapq(ap_qid_t qid)
asm volatile(
" lgr 0,%[reg0]\n" /* qid arg into gr0 */
" .insn rre,0xb2af0000,0,0\n" /* PQAP(BAPQ) */
- " lgr %[reg1],1\n" /* gr1 (status) into reg1 */
+ " lgr %[reg1],1" /* gr1 (status) into reg1 */
: [reg1] "=&d" (reg1.value)
: [reg0] "d" (reg0)
: "cc", "0", "1");
@@ -416,7 +416,7 @@ static inline struct ap_queue_status ap_aapq(ap_qid_t qid, unsigned int sec_idx)
" lgr 0,%[reg0]\n" /* qid arg into gr0 */
" lgr 2,%[reg2]\n" /* secret index into gr2 */
" .insn rre,0xb2af0000,0,0\n" /* PQAP(AAPQ) */
- " lgr %[reg1],1\n" /* gr1 (status) into reg1 */
+ " lgr %[reg1],1" /* gr1 (status) into reg1 */
: [reg1] "=&d" (reg1.value)
: [reg0] "d" (reg0), [reg2] "d" (reg2)
: "cc", "0", "1", "2");
@@ -453,7 +453,7 @@ static inline struct ap_queue_status ap_nqap(ap_qid_t qid,
" lgr 0,%[reg0]\n" /* qid param in gr0 */
"0: .insn rre,0xb2ad0000,%[nqap_r1],%[nqap_r2]\n"
" brc 2,0b\n" /* handle partial completion */
- " lgr %[reg1],1\n" /* gr1 (status) into reg1 */
+ " lgr %[reg1],1" /* gr1 (status) into reg1 */
: [reg0] "+&d" (reg0), [reg1] "=&d" (reg1.value),
[nqap_r2] "+&d" (nqap_r2.pair)
: [nqap_r1] "d" (nqap_r1.pair)
@@ -518,7 +518,7 @@ static inline struct ap_queue_status ap_dqap(ap_qid_t qid,
" brc 6,0b\n" /* handle partial complete */
"2: lgr %[reg0],0\n" /* gr0 (qid + info) into reg0 */
" lgr %[reg1],1\n" /* gr1 (status) into reg1 */
- " lgr %[reg2],2\n" /* gr2 (res length) into reg2 */
+ " lgr %[reg2],2" /* gr2 (res length) into reg2 */
: [reg0] "+&d" (reg0), [reg1] "=&d" (reg1.value),
[reg2] "=&d" (reg2), [rp1] "+&d" (rp1.pair),
[rp2] "+&d" (rp2.pair)
diff --git a/arch/s390/include/asm/atomic_ops.h b/arch/s390/include/asm/atomic_ops.h
index 21c26d842832..845b77864412 100644
--- a/arch/s390/include/asm/atomic_ops.h
+++ b/arch/s390/include/asm/atomic_ops.h
@@ -17,7 +17,7 @@ static __always_inline int __atomic_read(const int *ptr)
int val;
asm volatile(
- " l %[val],%[ptr]\n"
+ " l %[val],%[ptr]"
: [val] "=d" (val) : [ptr] "R" (*ptr));
return val;
}
@@ -26,11 +26,11 @@ static __always_inline void __atomic_set(int *ptr, int val)
{
if (__builtin_constant_p(val) && val >= S16_MIN && val <= S16_MAX) {
asm volatile(
- " mvhi %[ptr],%[val]\n"
+ " mvhi %[ptr],%[val]"
: [ptr] "=Q" (*ptr) : [val] "K" (val));
} else {
asm volatile(
- " st %[val],%[ptr]\n"
+ " st %[val],%[ptr]"
: [ptr] "=R" (*ptr) : [val] "d" (val));
}
}
@@ -40,7 +40,7 @@ static __always_inline long __atomic64_read(const long *ptr)
long val;
asm volatile(
- " lg %[val],%[ptr]\n"
+ " lg %[val],%[ptr]"
: [val] "=d" (val) : [ptr] "RT" (*ptr));
return val;
}
@@ -49,11 +49,11 @@ static __always_inline void __atomic64_set(long *ptr, long val)
{
if (__builtin_constant_p(val) && val >= S16_MIN && val <= S16_MAX) {
asm volatile(
- " mvghi %[ptr],%[val]\n"
+ " mvghi %[ptr],%[val]"
: [ptr] "=Q" (*ptr) : [val] "K" (val));
} else {
asm volatile(
- " stg %[val],%[ptr]\n"
+ " stg %[val],%[ptr]"
: [ptr] "=RT" (*ptr) : [val] "d" (val));
}
}
@@ -66,7 +66,7 @@ static __always_inline op_type op_name(op_type val, op_type *ptr) \
op_type old; \
\
asm volatile( \
- op_string " %[old],%[val],%[ptr]\n" \
+ op_string " %[old],%[val],%[ptr]" \
op_barrier \
: [old] "=d" (old), [ptr] "+QS" (*ptr) \
: [val] "d" (val) : "cc", "memory"); \
@@ -75,7 +75,7 @@ static __always_inline op_type op_name(op_type val, op_type *ptr) \
#define __ATOMIC_OPS(op_name, op_type, op_string) \
__ATOMIC_OP(op_name, op_type, op_string, "") \
- __ATOMIC_OP(op_name##_barrier, op_type, op_string, "bcr 14,0\n")
+ __ATOMIC_OP(op_name##_barrier, op_type, op_string, "\nbcr 14,0")
__ATOMIC_OPS(__atomic_add, int, "laa")
__ATOMIC_OPS(__atomic_and, int, "lan")
@@ -94,14 +94,14 @@ __ATOMIC_OPS(__atomic64_xor, long, "laxg")
static __always_inline void op_name(op_type val, op_type *ptr) \
{ \
asm volatile( \
- op_string " %[ptr],%[val]\n" \
+ op_string " %[ptr],%[val]" \
op_barrier \
: [ptr] "+QS" (*ptr) : [val] "i" (val) : "cc", "memory");\
}
#define __ATOMIC_CONST_OPS(op_name, op_type, op_string) \
__ATOMIC_CONST_OP(op_name, op_type, op_string, "") \
- __ATOMIC_CONST_OP(op_name##_barrier, op_type, op_string, "bcr 14,0\n")
+ __ATOMIC_CONST_OP(op_name##_barrier, op_type, op_string, "\nbcr 14,0")
__ATOMIC_CONST_OPS(__atomic_add_const, int, "asi")
__ATOMIC_CONST_OPS(__atomic64_add_const, long, "agsi")
@@ -179,7 +179,7 @@ static __always_inline bool op_name(op_type val, op_type *ptr) \
int cc; \
\
asm volatile( \
- op_string " %[tmp],%[val],%[ptr]\n" \
+ op_string " %[tmp],%[val],%[ptr]" \
op_barrier \
: "=@cc" (cc), [tmp] "=d" (tmp), [ptr] "+QS" (*ptr) \
: [val] "d" (val) \
@@ -189,7 +189,7 @@ static __always_inline bool op_name(op_type val, op_type *ptr) \
#define __ATOMIC_TEST_OPS(op_name, op_type, op_string) \
__ATOMIC_TEST_OP(op_name, op_type, op_string, "") \
- __ATOMIC_TEST_OP(op_name##_barrier, op_type, op_string, "bcr 14,0\n")
+ __ATOMIC_TEST_OP(op_name##_barrier, op_type, op_string, "\nbcr 14,0")
__ATOMIC_TEST_OPS(__atomic_add_and_test, int, "laal")
__ATOMIC_TEST_OPS(__atomic64_add_and_test, long, "laalg")
@@ -203,7 +203,7 @@ static __always_inline bool op_name(op_type val, op_type *ptr) \
int cc; \
\
asm volatile( \
- op_string " %[ptr],%[val]\n" \
+ op_string " %[ptr],%[val]" \
op_barrier \
: "=@cc" (cc), [ptr] "+QS" (*ptr) \
: [val] "i" (val) \
@@ -213,7 +213,7 @@ static __always_inline bool op_name(op_type val, op_type *ptr) \
#define __ATOMIC_CONST_TEST_OPS(op_name, op_type, op_string) \
__ATOMIC_CONST_TEST_OP(op_name, op_type, op_string, "") \
- __ATOMIC_CONST_TEST_OP(op_name##_barrier, op_type, op_string, "bcr 14,0\n")
+ __ATOMIC_CONST_TEST_OP(op_name##_barrier, op_type, op_string, "\nbcr 14,0")
__ATOMIC_CONST_TEST_OPS(__atomic_add_const_and_test, int, "alsi")
__ATOMIC_CONST_TEST_OPS(__atomic64_add_const_and_test, long, "algsi")
diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
index d82130d7f2b6..f3184073e754 100644
--- a/arch/s390/include/asm/barrier.h
+++ b/arch/s390/include/asm/barrier.h
@@ -18,9 +18,9 @@
#ifdef MARCH_HAS_Z196_FEATURES
/* Fast-BCR without checkpoint synchronization */
-#define __ASM_BCR_SERIALIZE "bcr 14,0\n"
+#define __ASM_BCR_SERIALIZE "bcr 14,0"
#else
-#define __ASM_BCR_SERIALIZE "bcr 15,0\n"
+#define __ASM_BCR_SERIALIZE "bcr 15,0"
#endif
static __always_inline void bcr_serialize(void)
@@ -69,12 +69,12 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
if (__builtin_constant_p(size) && size > 0) {
asm(" clgr %2,%1\n"
- " slbgr %0,%0\n"
+ " slbgr %0,%0"
:"=d" (mask) : "d" (size-1), "d" (index) :"cc");
return mask;
}
asm(" clgr %1,%2\n"
- " slbgr %0,%0\n"
+ " slbgr %0,%0"
:"=d" (mask) : "d" (size), "d" (index) :"cc");
return ~mask;
}
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index ec945fb60c02..5f10074665b0 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -62,7 +62,7 @@ static __always_inline bool arch_test_bit(unsigned long nr, const volatile unsig
addr += (nr ^ (BITS_PER_LONG - BITS_PER_BYTE)) / BITS_PER_BYTE;
mask = 1UL << (nr & (BITS_PER_BYTE - 1));
asm volatile(
- " tm %[addr],%[mask]\n"
+ " tm %[addr],%[mask]"
: "=@cc" (cc)
: [addr] "Q" (*addr), [mask] "I" (mask)
);
diff --git a/arch/s390/include/asm/checksum.h b/arch/s390/include/asm/checksum.h
index d86dea5900e7..7e83dc2d3b06 100644
--- a/arch/s390/include/asm/checksum.h
+++ b/arch/s390/include/asm/checksum.h
@@ -27,7 +27,7 @@ static inline __wsum cksm(const void *buff, int len, __wsum sum)
kmsan_check_memory(buff, len);
asm volatile(
"0: cksm %[sum],%[rp]\n"
- " jo 0b\n"
+ " jo 0b"
: [sum] "+&d" (sum), [rp] "+&d" (rp.pair) : : "cc", "memory");
return sum;
}
diff --git a/arch/s390/include/asm/cmpxchg.h b/arch/s390/include/asm/cmpxchg.h
index a9e2006033b7..008357996262 100644
--- a/arch/s390/include/asm/cmpxchg.h
+++ b/arch/s390/include/asm/cmpxchg.h
@@ -18,7 +18,7 @@ void __cmpxchg_called_with_bad_pointer(void);
static __always_inline u32 __cs_asm(u64 ptr, u32 old, u32 new)
{
asm volatile(
- " cs %[old],%[new],%[ptr]\n"
+ " cs %[old],%[new],%[ptr]"
: [old] "+d" (old), [ptr] "+Q" (*(u32 *)ptr)
: [new] "d" (new)
: "memory", "cc");
@@ -28,7 +28,7 @@ static __always_inline u32 __cs_asm(u64 ptr, u32 old, u32 new)
static __always_inline u64 __csg_asm(u64 ptr, u64 old, u64 new)
{
asm volatile(
- " csg %[old],%[new],%[ptr]\n"
+ " csg %[old],%[new],%[ptr]"
: [old] "+d" (old), [ptr] "+QS" (*(u64 *)ptr)
: [new] "d" (new)
: "memory", "cc");
@@ -126,7 +126,7 @@ static __always_inline u64 __arch_cmpxchg(u64 ptr, u64 old, u64 new, int size)
} \
case 4: { \
asm volatile( \
- " cs %[__old],%[__new],%[__ptr]\n" \
+ " cs %[__old],%[__new],%[__ptr]" \
: [__old] "+d" (*__oldp), \
[__ptr] "+Q" (*(ptr)), \
"=@cc" (__cc) \
@@ -136,7 +136,7 @@ static __always_inline u64 __arch_cmpxchg(u64 ptr, u64 old, u64 new, int size)
} \
case 8: { \
asm volatile( \
- " csg %[__old],%[__new],%[__ptr]\n" \
+ " csg %[__old],%[__new],%[__ptr]" \
: [__old] "+d" (*__oldp), \
[__ptr] "+QS" (*(ptr)), \
"=@cc" (__cc) \
@@ -241,7 +241,7 @@ static __always_inline u64 __arch_xchg(u64 ptr, u64 x, int size)
static __always_inline u128 arch_cmpxchg128(volatile u128 *ptr, u128 old, u128 new)
{
asm volatile(
- " cdsg %[old],%[new],%[ptr]\n"
+ " cdsg %[old],%[new],%[ptr]"
: [old] "+d" (old), [ptr] "+QS" (*ptr)
: [new] "d" (new)
: "memory", "cc");
@@ -258,7 +258,7 @@ static __always_inline bool arch_try_cmpxchg128(volatile u128 *ptr, u128 *oldp,
int cc;
asm volatile(
- " cdsg %[old],%[new],%[ptr]\n"
+ " cdsg %[old],%[new],%[ptr]"
: [old] "+d" (*oldp), [ptr] "+QS" (*ptr), "=@cc" (cc)
: [new] "d" (new)
: "memory");
diff --git a/arch/s390/include/asm/cpacf.h b/arch/s390/include/asm/cpacf.h
index 4bc5317fbb12..a83683169d98 100644
--- a/arch/s390/include/asm/cpacf.h
+++ b/arch/s390/include/asm/cpacf.h
@@ -229,7 +229,7 @@ static __always_inline void __cpacf_query_rre(u32 opc, u8 r1, u8 r2,
asm volatile(
" la %%r1,%[pb]\n"
" lghi %%r0,%[fc]\n"
- " .insn rre,%[opc] << 16,%[r1],%[r2]\n"
+ " .insn rre,%[opc] << 16,%[r1],%[r2]"
: [pb] "=R" (*pb)
: [opc] "i" (opc), [fc] "i" (fc),
[r1] "i" (r1), [r2] "i" (r2)
@@ -242,7 +242,7 @@ static __always_inline void __cpacf_query_rrf(u32 opc, u8 r1, u8 r2, u8 r3,
asm volatile(
" la %%r1,%[pb]\n"
" lghi %%r0,%[fc]\n"
- " .insn rrf,%[opc] << 16,%[r1],%[r2],%[r3],%[m4]\n"
+ " .insn rrf,%[opc] << 16,%[r1],%[r2],%[r3],%[m4]"
: [pb] "=R" (*pb)
: [opc] "i" (opc), [fc] "i" (fc), [r1] "i" (r1),
[r2] "i" (r2), [r3] "i" (r3), [m4] "i" (m4)
@@ -416,7 +416,7 @@ static inline int cpacf_km(unsigned long func, void *param,
" lgr 0,%[fc]\n"
" lgr 1,%[pba]\n"
"0: .insn rre,%[opc] << 16,%[dst],%[src]\n"
- " brc 1,0b\n" /* handle partial completion */
+ " brc 1,0b" /* handle partial completion */
: [src] "+&d" (s.pair), [dst] "+&d" (d.pair)
: [fc] "d" (func), [pba] "d" ((unsigned long)param),
[opc] "i" (CPACF_KM)
@@ -448,7 +448,7 @@ static inline int cpacf_kmc(unsigned long func, void *param,
" lgr 0,%[fc]\n"
" lgr 1,%[pba]\n"
"0: .insn rre,%[opc] << 16,%[dst],%[src]\n"
- " brc 1,0b\n" /* handle partial completion */
+ " brc 1,0b" /* handle partial completion */
: [src] "+&d" (s.pair), [dst] "+&d" (d.pair)
: [fc] "d" (func), [pba] "d" ((unsigned long)param),
[opc] "i" (CPACF_KMC)
@@ -476,7 +476,7 @@ static inline void cpacf_kimd(unsigned long func, void *param,
" lgr 0,%[fc]\n"
" lgr 1,%[pba]\n"
"0: .insn rrf,%[opc] << 16,0,%[src],8,0\n"
- " brc 1,0b\n" /* handle partial completion */
+ " brc 1,0b" /* handle partial completion */
: [src] "+&d" (s.pair)
: [fc] "d" (func), [pba] "d" ((unsigned long)(param)),
[opc] "i" (CPACF_KIMD)
@@ -501,7 +501,7 @@ static inline void cpacf_klmd(unsigned long func, void *param,
" lgr 0,%[fc]\n"
" lgr 1,%[pba]\n"
"0: .insn rrf,%[opc] << 16,0,%[src],8,0\n"
- " brc 1,0b\n" /* handle partial completion */
+ " brc 1,0b" /* handle partial completion */
: [src] "+&d" (s.pair)
: [fc] "d" (func), [pba] "d" ((unsigned long)param),
[opc] "i" (CPACF_KLMD)
@@ -530,7 +530,7 @@ static inline int _cpacf_kmac(unsigned long *gr0, void *param,
" lgr 1,%[pba]\n"
"0: .insn rre,%[opc] << 16,0,%[src]\n"
" brc 1,0b\n" /* handle partial completion */
- " lgr %[r0],0\n"
+ " lgr %[r0],0"
: [r0] "+d" (*gr0), [src] "+&d" (s.pair)
: [pba] "d" ((unsigned long)param),
[opc] "i" (CPACF_KMAC)
@@ -580,7 +580,7 @@ static inline int cpacf_kmctr(unsigned long func, void *param, u8 *dest,
" lgr 0,%[fc]\n"
" lgr 1,%[pba]\n"
"0: .insn rrf,%[opc] << 16,%[dst],%[src],%[ctr],0\n"
- " brc 1,0b\n" /* handle partial completion */
+ " brc 1,0b" /* handle partial completion */
: [src] "+&d" (s.pair), [dst] "+&d" (d.pair),
[ctr] "+&d" (c.pair)
: [fc] "d" (func), [pba] "d" ((unsigned long)param),
@@ -614,7 +614,7 @@ static inline void cpacf_prno(unsigned long func, void *param,
" lgr 0,%[fc]\n"
" lgr 1,%[pba]\n"
"0: .insn rre,%[opc] << 16,%[dst],%[seed]\n"
- " brc 1,0b\n" /* handle partial completion */
+ " brc 1,0b" /* handle partial completion */
: [dst] "+&d" (d.pair)
: [fc] "d" (func), [pba] "d" ((unsigned long)param),
[seed] "d" (s.pair), [opc] "i" (CPACF_PRNO)
@@ -640,7 +640,7 @@ static inline void cpacf_trng(u8 *ucbuf, unsigned long ucbuf_len,
asm volatile (
" lghi 0,%[fc]\n"
"0: .insn rre,%[opc] << 16,%[ucbuf],%[cbuf]\n"
- " brc 1,0b\n" /* handle partial completion */
+ " brc 1,0b" /* handle partial completion */
: [ucbuf] "+&d" (u.pair), [cbuf] "+&d" (c.pair)
: [fc] "K" (CPACF_PRNO_TRNG), [opc] "i" (CPACF_PRNO)
: "cc", "memory", "0");
@@ -692,7 +692,7 @@ static inline void cpacf_pckmo(long func, void *param)
asm volatile(
" lgr 0,%[fc]\n"
" lgr 1,%[pba]\n"
- " .insn rre,%[opc] << 16,0,0\n" /* PCKMO opcode */
+ " .insn rre,%[opc] << 16,0,0" /* PCKMO opcode */
:
: [fc] "d" (func), [pba] "d" ((unsigned long)param),
[opc] "i" (CPACF_PCKMO)
@@ -725,7 +725,7 @@ static inline void cpacf_kma(unsigned long func, void *param, u8 *dest,
" lgr 0,%[fc]\n"
" lgr 1,%[pba]\n"
"0: .insn rrf,%[opc] << 16,%[dst],%[src],%[aad],0\n"
- " brc 1,0b\n" /* handle partial completion */
+ " brc 1,0b" /* handle partial completion */
: [dst] "+&d" (d.pair), [src] "+&d" (s.pair),
[aad] "+&d" (a.pair)
: [fc] "d" (func), [pba] "d" ((unsigned long)param),
diff --git a/arch/s390/include/asm/ctlreg.h b/arch/s390/include/asm/ctlreg.h
index e93cc240a1ed..1765a0320933 100644
--- a/arch/s390/include/asm/ctlreg.h
+++ b/arch/s390/include/asm/ctlreg.h
@@ -100,7 +100,7 @@ struct ctlreg {
BUILD_BUG_ON(sizeof(struct addrtype) != _esize); \
typecheck(struct ctlreg, array[0]); \
asm volatile( \
- " lctlg %[_low],%[_high],%[_arr]\n" \
+ " lctlg %[_low],%[_high],%[_arr]" \
: \
: [_arr] "Q" (*(struct addrtype *)(&array)), \
[_low] "i" (low), [_high] "i" (high) \
@@ -119,7 +119,7 @@ struct ctlreg {
BUILD_BUG_ON(sizeof(struct addrtype) != _esize); \
typecheck(struct ctlreg, array[0]); \
asm volatile( \
- " stctg %[_low],%[_high],%[_arr]\n" \
+ " stctg %[_low],%[_high],%[_arr]" \
: [_arr] "=Q" (*(struct addrtype *)(&array)) \
: [_low] "i" (low), [_high] "i" (high)); \
} while (0)
@@ -127,7 +127,7 @@ struct ctlreg {
static __always_inline void local_ctl_load(unsigned int cr, struct ctlreg *reg)
{
asm volatile(
- " lctlg %[cr],%[cr],%[reg]\n"
+ " lctlg %[cr],%[cr],%[reg]"
:
: [reg] "Q" (*reg), [cr] "i" (cr)
: "memory");
@@ -136,7 +136,7 @@ static __always_inline void local_ctl_load(unsigned int cr, struct ctlreg *reg)
static __always_inline void local_ctl_store(unsigned int cr, struct ctlreg *reg)
{
asm volatile(
- " stctg %[cr],%[cr],%[reg]\n"
+ " stctg %[cr],%[cr],%[reg]"
: [reg] "=Q" (*reg)
: [cr] "i" (cr));
}
diff --git a/arch/s390/include/asm/fpu-insn.h b/arch/s390/include/asm/fpu-insn.h
index 135bb89c0a89..e99f8bca8e08 100644
--- a/arch/s390/include/asm/fpu-insn.h
+++ b/arch/s390/include/asm/fpu-insn.h
@@ -38,7 +38,7 @@ asm(".include \"asm/fpu-insn-asm.h\"\n");
static __always_inline void fpu_cefbr(u8 f1, s32 val)
{
- asm volatile("cefbr %[f1],%[val]\n"
+ asm volatile("cefbr %[f1],%[val]"
:
: [f1] "I" (f1), [val] "d" (val)
: "memory");
@@ -48,7 +48,7 @@ static __always_inline unsigned long fpu_cgebr(u8 f2, u8 mode)
{
unsigned long val;
- asm volatile("cgebr %[val],%[mode],%[f2]\n"
+ asm volatile("cgebr %[val],%[mode],%[f2]"
: [val] "=d" (val)
: [f2] "I" (f2), [mode] "I" (mode)
: "memory");
@@ -57,7 +57,7 @@ static __always_inline unsigned long fpu_cgebr(u8 f2, u8 mode)
static __always_inline void fpu_debr(u8 f1, u8 f2)
{
- asm volatile("debr %[f1],%[f2]\n"
+ asm volatile("debr %[f1],%[f2]"
:
: [f1] "I" (f1), [f2] "I" (f2)
: "memory");
@@ -66,7 +66,7 @@ static __always_inline void fpu_debr(u8 f1, u8 f2)
static __always_inline void fpu_ld(unsigned short fpr, freg_t *reg)
{
instrument_read(reg, sizeof(*reg));
- asm volatile("ld %[fpr],%[reg]\n"
+ asm volatile("ld %[fpr],%[reg]"
:
: [fpr] "I" (fpr), [reg] "Q" (reg->ui)
: "memory");
@@ -74,7 +74,7 @@ static __always_inline void fpu_ld(unsigned short fpr, freg_t *reg)
static __always_inline void fpu_ldgr(u8 f1, u32 val)
{
- asm volatile("ldgr %[f1],%[val]\n"
+ asm volatile("ldgr %[f1],%[val]"
:
: [f1] "I" (f1), [val] "d" (val)
: "memory");
@@ -113,7 +113,7 @@ static inline void fpu_lfpc_safe(unsigned int *fpc)
static __always_inline void fpu_std(unsigned short fpr, freg_t *reg)
{
instrument_write(reg, sizeof(*reg));
- asm volatile("std %[fpr],%[reg]\n"
+ asm volatile("std %[fpr],%[reg]"
: [reg] "=Q" (reg->ui)
: [fpr] "I" (fpr)
: "memory");
@@ -181,7 +181,7 @@ static __always_inline void fpu_vgfmg(u8 v1, u8 v2, u8 v3)
static __always_inline void fpu_vl(u8 v1, const void *vxr)
{
instrument_read(vxr, sizeof(__vector128));
- asm volatile("VL %[v1],%O[vxr],,%R[vxr]\n"
+ asm volatile("VL %[v1],%O[vxr],,%R[vxr]"
:
: [vxr] "Q" (*(__vector128 *)vxr),
[v1] "I" (v1)
@@ -195,7 +195,7 @@ static __always_inline void fpu_vl(u8 v1, const void *vxr)
instrument_read(vxr, sizeof(__vector128));
asm volatile(
" la 1,%[vxr]\n"
- " VL %[v1],0,,1\n"
+ " VL %[v1],0,,1"
:
: [vxr] "R" (*(__vector128 *)vxr),
[v1] "I" (v1)
@@ -239,7 +239,7 @@ static __always_inline void fpu_vll(u8 v1, u32 index, const void *vxr)
size = min(index + 1, sizeof(__vector128));
instrument_read(vxr, size);
- asm volatile("VLL %[v1],%[index],%O[vxr],%R[vxr]\n"
+ asm volatile("VLL %[v1],%[index],%O[vxr],%R[vxr]"
:
: [vxr] "Q" (*(u8 *)vxr),
[index] "d" (index),
@@ -257,7 +257,7 @@ static __always_inline void fpu_vll(u8 v1, u32 index, const void *vxr)
instrument_read(vxr, size);
asm volatile(
" la 1,%[vxr]\n"
- " VLL %[v1],%[index],0,1\n"
+ " VLL %[v1],%[index],0,1"
:
: [vxr] "R" (*(u8 *)vxr),
[index] "d" (index),
@@ -277,7 +277,7 @@ static __always_inline void fpu_vll(u8 v1, u32 index, const void *vxr)
} *_v = (void *)(_vxrs); \
\
instrument_read(_v, size); \
- asm volatile("VLM %[v1],%[v3],%O[vxrs],%R[vxrs]\n" \
+ asm volatile("VLM %[v1],%[v3],%O[vxrs],%R[vxrs]" \
: \
: [vxrs] "Q" (*_v), \
[v1] "I" (_v1), [v3] "I" (_v3) \
@@ -297,7 +297,7 @@ static __always_inline void fpu_vll(u8 v1, u32 index, const void *vxr)
instrument_read(_v, size); \
asm volatile( \
" la 1,%[vxrs]\n" \
- " VLM %[v1],%[v3],0,1\n" \
+ " VLM %[v1],%[v3],0,1" \
: \
: [vxrs] "R" (*_v), \
[v1] "I" (_v1), [v3] "I" (_v3) \
@@ -360,7 +360,7 @@ static __always_inline void fpu_vsrlb(u8 v1, u8 v2, u8 v3)
static __always_inline void fpu_vst(u8 v1, const void *vxr)
{
instrument_write(vxr, sizeof(__vector128));
- asm volatile("VST %[v1],%O[vxr],,%R[vxr]\n"
+ asm volatile("VST %[v1],%O[vxr],,%R[vxr]"
: [vxr] "=Q" (*(__vector128 *)vxr)
: [v1] "I" (v1)
: "memory");
@@ -373,7 +373,7 @@ static __always_inline void fpu_vst(u8 v1, const void *vxr)
instrument_write(vxr, sizeof(__vector128));
asm volatile(
" la 1,%[vxr]\n"
- " VST %[v1],0,,1\n"
+ " VST %[v1],0,,1"
: [vxr] "=R" (*(__vector128 *)vxr)
: [v1] "I" (v1)
: "memory", "1");
@@ -389,7 +389,7 @@ static __always_inline void fpu_vstl(u8 v1, u32 index, const void *vxr)
size = min(index + 1, sizeof(__vector128));
instrument_write(vxr, size);
- asm volatile("VSTL %[v1],%[index],%O[vxr],%R[vxr]\n"
+ asm volatile("VSTL %[v1],%[index],%O[vxr],%R[vxr]"
: [vxr] "=Q" (*(u8 *)vxr)
: [index] "d" (index), [v1] "I" (v1)
: "memory");
@@ -405,7 +405,7 @@ static __always_inline void fpu_vstl(u8 v1, u32 index, const void *vxr)
instrument_write(vxr, size);
asm volatile(
" la 1,%[vxr]\n"
- " VSTL %[v1],%[index],0,1\n"
+ " VSTL %[v1],%[index],0,1"
: [vxr] "=R" (*(u8 *)vxr)
: [index] "d" (index), [v1] "I" (v1)
: "memory", "1");
@@ -423,7 +423,7 @@ static __always_inline void fpu_vstl(u8 v1, u32 index, const void *vxr)
} *_v = (void *)(_vxrs); \
\
instrument_write(_v, size); \
- asm volatile("VSTM %[v1],%[v3],%O[vxrs],%R[vxrs]\n" \
+ asm volatile("VSTM %[v1],%[v3],%O[vxrs],%R[vxrs]" \
: [vxrs] "=Q" (*_v) \
: [v1] "I" (_v1), [v3] "I" (_v3) \
: "memory"); \
@@ -442,7 +442,7 @@ static __always_inline void fpu_vstl(u8 v1, u32 index, const void *vxr)
instrument_write(_v, size); \
asm volatile( \
" la 1,%[vxrs]\n" \
- " VSTM %[v1],%[v3],0,1\n" \
+ " VSTM %[v1],%[v3],0,1" \
: [vxrs] "=R" (*_v) \
: [v1] "I" (_v1), [v3] "I" (_v3) \
: "memory", "1"); \
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 95d15416c39d..c2ba3d4398c5 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -722,6 +722,8 @@ extern int kvm_s390_enter_exit_sie(struct kvm_s390_sie_block *scb,
extern int kvm_s390_gisc_register(struct kvm *kvm, u32 gisc);
extern int kvm_s390_gisc_unregister(struct kvm *kvm, u32 gisc);
+bool kvm_s390_is_gpa_in_memslot(struct kvm *kvm, gpa_t gpa);
+
static inline void kvm_arch_free_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot) {}
static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
diff --git a/arch/s390/include/asm/kvm_para.h b/arch/s390/include/asm/kvm_para.h
index df73a052760c..00cc8c916cfb 100644
--- a/arch/s390/include/asm/kvm_para.h
+++ b/arch/s390/include/asm/kvm_para.h
@@ -76,7 +76,7 @@ long __kvm_hypercall##args(unsigned long nr HYPERCALL_PARM_##args) \
HYPERCALL_REGS_##args; \
\
asm volatile ( \
- " diag 2,4,0x500\n" \
+ " diag 2,4,0x500" \
: "=d" (__rc) \
: "d" (__nr) HYPERCALL_FMT_##args \
: "memory", "cc"); \
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index 41f900f693d9..6890925d5587 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -246,6 +246,16 @@ int clp_refresh_fh(u32 fid, u32 *fh);
/* UID */
void update_uid_checking(bool new);
+/* Firmware Sysfs */
+int __init __zpci_fw_sysfs_init(void);
+
+static inline int __init zpci_fw_sysfs_init(void)
+{
+ if (IS_ENABLED(CONFIG_SYSFS))
+ return __zpci_fw_sysfs_init();
+ return 0;
+}
+
/* IOMMU Interface */
int zpci_init_iommu(struct zpci_dev *zdev);
void zpci_destroy_iommu(struct zpci_dev *zdev);
diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h
index 96af7d964014..965886dfe954 100644
--- a/arch/s390/include/asm/percpu.h
+++ b/arch/s390/include/asm/percpu.h
@@ -73,13 +73,13 @@
if (__builtin_constant_p(val__) && \
((szcast)val__ > -129) && ((szcast)val__ < 128)) { \
asm volatile( \
- op2 " %[ptr__],%[val__]\n" \
+ op2 " %[ptr__],%[val__]" \
: [ptr__] "+Q" (*ptr__) \
: [val__] "i" ((szcast)val__) \
: "cc"); \
} else { \
asm volatile( \
- op1 " %[old__],%[val__],%[ptr__]\n" \
+ op1 " %[old__],%[val__],%[ptr__]" \
: [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \
: [val__] "d" (val__) \
: "cc"); \
@@ -98,7 +98,7 @@
preempt_disable_notrace(); \
ptr__ = raw_cpu_ptr(&(pcp)); \
asm volatile( \
- op " %[old__],%[val__],%[ptr__]\n" \
+ op " %[old__],%[val__],%[ptr__]" \
: [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \
: [val__] "d" (val__) \
: "cc"); \
@@ -117,7 +117,7 @@
preempt_disable_notrace(); \
ptr__ = raw_cpu_ptr(&(pcp)); \
asm volatile( \
- op " %[old__],%[val__],%[ptr__]\n" \
+ op " %[old__],%[val__],%[ptr__]" \
: [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \
: [val__] "d" (val__) \
: "cc"); \
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 6a9c08b80eda..93e1034485d7 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -163,7 +163,7 @@ static __always_inline void __stackleak_poison(unsigned long erase_low,
"2: stg %[poison],0(%[addr])\n"
" j 4f\n"
"3: mvc 8(1,%[addr]),0(%[addr])\n"
- "4:\n"
+ "4:"
: [addr] "+&a" (erase_low), [count] "+&d" (count), [tmp] "=&a" (tmp)
: [poison] "d" (poison)
: "memory", "cc"
diff --git a/arch/s390/include/asm/rwonce.h b/arch/s390/include/asm/rwonce.h
index 91fc24520e82..402325ec20f0 100644
--- a/arch/s390/include/asm/rwonce.h
+++ b/arch/s390/include/asm/rwonce.h
@@ -19,7 +19,7 @@
\
BUILD_BUG_ON(sizeof(x) != 16); \
asm volatile( \
- " lpq %[val],%[_x]\n" \
+ " lpq %[val],%[_x]" \
: [val] "=d" (__u.val) \
: [_x] "QS" (x) \
: "memory"); \
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index f9935db9fd76..b06b183b7246 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -98,7 +98,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lp)
kcsan_release();
asm_inline volatile(
ALTERNATIVE("nop", ".insn rre,0xb2fa0000,7,0", ALT_FACILITY(49)) /* NIAI 7 */
- " mvhhi %[lock],0\n"
+ " mvhhi %[lock],0"
: [lock] "=Q" (((unsigned short *)&lp->lock)[1])
:
: "memory");
diff --git a/arch/s390/include/asm/stacktrace.h b/arch/s390/include/asm/stacktrace.h
index 1d5ca13dc90f..810a6b9d9628 100644
--- a/arch/s390/include/asm/stacktrace.h
+++ b/arch/s390/include/asm/stacktrace.h
@@ -199,7 +199,7 @@ static __always_inline unsigned long get_stack_pointer(struct task_struct *task,
" lg 15,%[_stack]\n" \
" stg %[_frame],%[_bc](15)\n" \
" brasl 14,%[_fn]\n" \
- " lgr 15,%[_prev]\n" \
+ " lgr 15,%[_prev]" \
: [_prev] "=&d" (prev), CALL_FMT_##nr \
: [_stack] "R" (__stack), \
[_bc] "i" (offsetof(struct stack_frame, back_chain)), \
@@ -250,7 +250,7 @@ static __always_inline unsigned long get_stack_pointer(struct task_struct *task,
" lra 14,0(1)\n" \
" lpswe %[psw_enter]\n" \
"0: lpswe 0(7)\n" \
- "1:\n" \
+ "1:" \
: CALL_FMT_##nr, [psw_leave] "=Q" (psw_leave) \
: [psw_enter] "Q" (psw_enter) \
: "7", CALL_CLOBBER_##nr); \
diff --git a/arch/s390/include/asm/string.h b/arch/s390/include/asm/string.h
index f8f68f4ef255..238e721e5a22 100644
--- a/arch/s390/include/asm/string.h
+++ b/arch/s390/include/asm/string.h
@@ -125,7 +125,7 @@ static inline void *memscan(void *s, int c, size_t n)
asm volatile(
" lgr 0,%[c]\n"
"0: srst %[ret],%[s]\n"
- " jo 0b\n"
+ " jo 0b"
: [ret] "+&a" (ret), [s] "+&a" (s)
: [c] "d" (c)
: "cc", "memory", "0");
diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h
index bd4cb00ccd5e..10ce5c4ccbd6 100644
--- a/arch/s390/include/asm/syscall.h
+++ b/arch/s390/include/asm/syscall.h
@@ -155,7 +155,7 @@ long syscall##nr(unsigned long syscall SYSCALL_PARM_##nr) \
SYSCALL_REGS_##nr; \
\
asm volatile ( \
- " svc 0\n" \
+ " svc 0" \
: "=d" (rc) \
: "d" (r1) SYSCALL_FMT_##nr \
: "memory"); \
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index 59dfb8780f62..49447b40f038 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -81,7 +81,7 @@ static inline void set_tod_programmable_field(u16 val)
{
asm volatile(
" lgr 0,%[val]\n"
- " sckpf\n"
+ " sckpf"
:
: [val] "d" ((unsigned long)val)
: "0");
diff --git a/arch/s390/kernel/diag/diag310.c b/arch/s390/kernel/diag/diag310.c
index d6a34454aa5a..f411562aa7f6 100644
--- a/arch/s390/kernel/diag/diag310.c
+++ b/arch/s390/kernel/diag/diag310.c
@@ -66,7 +66,7 @@ static inline unsigned long diag310(unsigned long subcode, unsigned long size, v
union register_pair rp = { .even = (unsigned long)addr, .odd = size };
diag_stat_inc(DIAG_STAT_X310);
- asm volatile("diag %[rp],%[subcode],0x310\n"
+ asm volatile("diag %[rp],%[subcode],0x310"
: [rp] "+d" (rp.pair)
: [subcode] "d" (subcode)
: "memory");
diff --git a/arch/s390/kernel/diag/diag324.c b/arch/s390/kernel/diag/diag324.c
index f0a8b4841fb9..fe325c2a2d0d 100644
--- a/arch/s390/kernel/diag/diag324.c
+++ b/arch/s390/kernel/diag/diag324.c
@@ -101,7 +101,7 @@ static unsigned long diag324(unsigned long subcode, void *addr)
union register_pair rp = { .even = (unsigned long)addr };
diag_stat_inc(DIAG_STAT_X324);
- asm volatile("diag %[rp],%[subcode],0x324\n"
+ asm volatile("diag %[rp],%[subcode],0x324"
: [rp] "+d" (rp.pair)
: [subcode] "d" (subcode)
: "memory");
diff --git a/arch/s390/kernel/perf_pai_crypto.c b/arch/s390/kernel/perf_pai_crypto.c
index 9455f213dc20..62bf8a15bf32 100644
--- a/arch/s390/kernel/perf_pai_crypto.c
+++ b/arch/s390/kernel/perf_pai_crypto.c
@@ -80,6 +80,15 @@ static int paicrypt_root_alloc(void)
/* Release the PMU if event is the last perf event */
static DEFINE_MUTEX(pai_reserve_mutex);
+/* Free all memory allocated for event counting/sampling setup */
+static void paicrypt_free(struct paicrypt_mapptr *mp)
+{
+ free_page((unsigned long)mp->mapptr->page);
+ kvfree(mp->mapptr->save);
+ kfree(mp->mapptr);
+ mp->mapptr = NULL;
+}
+
/* Adjust usage counters and remove allocated memory when all users are
* gone.
*/
@@ -93,15 +102,8 @@ static void paicrypt_event_destroy_cpu(struct perf_event *event, int cpu)
"refcnt %u\n", __func__, event->attr.config,
event->cpu, cpump->active_events,
refcount_read(&cpump->refcnt));
- if (refcount_dec_and_test(&cpump->refcnt)) {
- debug_sprintf_event(cfm_dbg, 4, "%s page %#lx save %p\n",
- __func__, (unsigned long)cpump->page,
- cpump->save);
- free_page((unsigned long)cpump->page);
- kvfree(cpump->save);
- kfree(cpump);
- mp->mapptr = NULL;
- }
+ if (refcount_dec_and_test(&cpump->refcnt))
+ paicrypt_free(mp);
paicrypt_root_free();
mutex_unlock(&pai_reserve_mutex);
}
@@ -175,14 +177,13 @@ static u64 paicrypt_getall(struct perf_event *event)
*
* Allocate the memory for the event.
*/
-static struct paicrypt_map *paicrypt_busy(struct perf_event *event, int cpu)
+static int paicrypt_alloc_cpu(struct perf_event *event, int cpu)
{
struct paicrypt_map *cpump = NULL;
struct paicrypt_mapptr *mp;
int rc;
mutex_lock(&pai_reserve_mutex);
-
/* Allocate root node */
rc = paicrypt_root_alloc();
if (rc)
@@ -192,58 +193,44 @@ static struct paicrypt_map *paicrypt_busy(struct perf_event *event, int cpu)
mp = per_cpu_ptr(paicrypt_root.mapptr, cpu);
cpump = mp->mapptr;
if (!cpump) { /* Paicrypt_map allocated? */
+ rc = -ENOMEM;
cpump = kzalloc(sizeof(*cpump), GFP_KERNEL);
- if (!cpump) {
- rc = -ENOMEM;
- goto free_root;
+ if (!cpump)
+ goto undo;
+ /* Allocate memory for counter page and counter extraction.
+ * Only the first counting event has to allocate a page.
+ */
+ mp->mapptr = cpump;
+ cpump->page = (unsigned long *)get_zeroed_page(GFP_KERNEL);
+ cpump->save = kvmalloc_array(paicrypt_cnt + 1,
+ sizeof(struct pai_userdata),
+ GFP_KERNEL);
+ if (!cpump->page || !cpump->save) {
+ paicrypt_free(mp);
+ goto undo;
}
INIT_LIST_HEAD(&cpump->syswide_list);
- }
-
- /* Allocate memory for counter page and counter extraction.
- * Only the first counting event has to allocate a page.
- */
- if (cpump->page) {
+ refcount_set(&cpump->refcnt, 1);
+ rc = 0;
+ } else {
refcount_inc(&cpump->refcnt);
- goto unlock;
}
- rc = -ENOMEM;
- cpump->page = (unsigned long *)get_zeroed_page(GFP_KERNEL);
- if (!cpump->page)
- goto free_paicrypt_map;
- cpump->save = kvmalloc_array(paicrypt_cnt + 1,
- sizeof(struct pai_userdata), GFP_KERNEL);
- if (!cpump->save) {
- free_page((unsigned long)cpump->page);
- cpump->page = NULL;
- goto free_paicrypt_map;
+undo:
+ if (rc) {
+ /* Error in allocation of event, decrement anchor. Since
+ * the event in not created, its destroy() function is never
+ * invoked. Adjust the reference counter for the anchor.
+ */
+ paicrypt_root_free();
}
-
- /* Set mode and reference count */
- rc = 0;
- refcount_set(&cpump->refcnt, 1);
- mp->mapptr = cpump;
- debug_sprintf_event(cfm_dbg, 5, "%s users %d refcnt %u page %#lx "
- "save %p rc %d\n", __func__, cpump->active_events,
- refcount_read(&cpump->refcnt),
- (unsigned long)cpump->page, cpump->save, rc);
- goto unlock;
-
-free_paicrypt_map:
- /* Undo memory allocation */
- kfree(cpump);
- mp->mapptr = NULL;
-free_root:
- paicrypt_root_free();
unlock:
mutex_unlock(&pai_reserve_mutex);
- return rc ? ERR_PTR(rc) : cpump;
+ return rc;
}
-static int paicrypt_event_init_all(struct perf_event *event)
+static int paicrypt_alloc(struct perf_event *event)
{
- struct paicrypt_map *cpump;
struct cpumask *maskptr;
int cpu, rc = -ENOMEM;
@@ -252,12 +239,11 @@ static int paicrypt_event_init_all(struct perf_event *event)
goto out;
for_each_online_cpu(cpu) {
- cpump = paicrypt_busy(event, cpu);
- if (IS_ERR(cpump)) {
+ rc = paicrypt_alloc_cpu(event, cpu);
+ if (rc) {
for_each_cpu(cpu, maskptr)
paicrypt_event_destroy_cpu(event, cpu);
kfree(maskptr);
- rc = PTR_ERR(cpump);
goto out;
}
cpumask_set_cpu(cpu, maskptr);
@@ -279,7 +265,6 @@ out:
static int paicrypt_event_init(struct perf_event *event)
{
struct perf_event_attr *a = &event->attr;
- struct paicrypt_map *cpump;
int rc = 0;
/* PAI crypto PMU registered as PERF_TYPE_RAW, check event type */
@@ -301,13 +286,10 @@ static int paicrypt_event_init(struct perf_event *event)
}
}
- if (event->cpu >= 0) {
- cpump = paicrypt_busy(event, event->cpu);
- if (IS_ERR(cpump))
- rc = PTR_ERR(cpump);
- } else {
- rc = paicrypt_event_init_all(event);
- }
+ if (event->cpu >= 0)
+ rc = paicrypt_alloc_cpu(event, event->cpu);
+ else
+ rc = paicrypt_alloc(event);
if (rc) {
free_page(PAI_SAVE_AREA(event));
goto out;
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 7b529868789f..892fce2b7549 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -839,7 +839,7 @@ static void __init setup_control_program_code(void)
return;
diag_stat_inc(DIAG_STAT_X318);
- asm volatile("diag %0,0,0x318\n" : : "d" (diag318_info.val));
+ asm volatile("diag %0,0,0x318" : : "d" (diag318_info.val));
}
/*
diff --git a/arch/s390/kernel/skey.c b/arch/s390/kernel/skey.c
index ba049fd103c2..cc869de6e3a5 100644
--- a/arch/s390/kernel/skey.c
+++ b/arch/s390/kernel/skey.c
@@ -11,7 +11,7 @@ static inline unsigned long load_real_address(unsigned long address)
unsigned long real;
asm volatile(
- " lra %[real],0(%[address])\n"
+ " lra %[real],0(%[address])"
: [real] "=d" (real)
: [address] "a" (address)
: "cc");
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index e88ebe5339fc..da84c0dc6b7e 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -340,7 +340,7 @@ static void pcpu_delegate(struct pcpu *pcpu, int cpu,
"0: sigp 0,%0,%2 # sigp restart to target cpu\n"
" brc 2,0b # busy, try again\n"
"1: sigp 0,%1,%3 # sigp stop to current cpu\n"
- " brc 2,1b # busy, try again\n"
+ " brc 2,1b # busy, try again"
: : "d" (pcpu->address), "d" (source_cpu),
"K" (SIGP_RESTART), "K" (SIGP_STOP)
: "0", "1", "cc");
diff --git a/arch/s390/kernel/uv.c b/arch/s390/kernel/uv.c
index 93b2a01bae40..5d17609bcfe1 100644
--- a/arch/s390/kernel/uv.c
+++ b/arch/s390/kernel/uv.c
@@ -866,8 +866,8 @@ static int find_secret_in_page(const u8 secret_id[UV_SECRET_ID_LEN],
return -ENOENT;
}
-/*
- * Do the actual search for `uv_get_secret_metadata`.
+/**
+ * uv_find_secret() - search secret metadata for a given secret id.
* @secret_id: search pattern.
* @list: ephemeral buffer space
* @secret: output data, containing the secret's metadata.
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 6d51aa5f66be..16ba04062854 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -356,7 +356,7 @@ static __always_inline void pfcr_query(u8 (*query)[16])
{
asm volatile(
" lghi 0,0\n"
- " .insn rsy,0xeb0000000016,0,0,%[query]\n"
+ " .insn rsy,0xeb0000000016,0,0,%[query]"
: [query] "=QS" (*query)
:
: "cc", "0");
@@ -368,7 +368,7 @@ static __always_inline void __sortl_query(u8 (*query)[32])
" lghi 0,0\n"
" la 1,%[query]\n"
/* Parameter registers are ignored */
- " .insn rre,0xb9380000,2,4\n"
+ " .insn rre,0xb9380000,2,4"
: [query] "=R" (*query)
:
: "cc", "0", "1");
@@ -380,7 +380,7 @@ static __always_inline void __dfltcc_query(u8 (*query)[32])
" lghi 0,0\n"
" la 1,%[query]\n"
/* Parameter registers are ignored */
- " .insn rrf,0xb9390000,2,4,6,0\n"
+ " .insn rrf,0xb9390000,2,4,6,0"
: [query] "=R" (*query)
:
: "cc", "0", "1");
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 9253c70897a8..9a71b6e00948 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -605,6 +605,14 @@ static int handle_io_inst(struct kvm_vcpu *vcpu)
}
}
+#if IS_ENABLED(CONFIG_VFIO_AP)
+bool kvm_s390_is_gpa_in_memslot(struct kvm *kvm, gpa_t gpa)
+{
+ return kvm_is_gpa_in_memslot(kvm, gpa);
+}
+EXPORT_SYMBOL_FOR_MODULES(kvm_s390_is_gpa_in_memslot, "vfio_ap");
+#endif
+
/*
* handle_pqap: Handling pqap interception
* @vcpu: the vcpu having issue the pqap instruction
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index ad9da4038511..10db1e56a811 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -96,7 +96,7 @@ static inline int arch_load_niai4(int *lock)
asm_inline volatile(
ALTERNATIVE("nop", ".insn rre,0xb2fa0000,4,0", ALT_FACILITY(49)) /* NIAI 4 */
- " l %[owner],%[lock]\n"
+ " l %[owner],%[lock]"
: [owner] "=d" (owner) : [lock] "R" (*lock) : "memory");
return owner;
}
@@ -109,7 +109,7 @@ static inline int arch_try_cmpxchg_niai8(int *lock, int old, int new)
asm_inline volatile(
ALTERNATIVE("nop", ".insn rre,0xb2fa0000,8,0", ALT_FACILITY(49)) /* NIAI 8 */
- " cs %[old],%[new],%[lock]\n"
+ " cs %[old],%[new],%[lock]"
: [old] "+d" (old), [lock] "+Q" (*lock), "=@cc" (cc)
: [new] "d" (new)
: "memory");
@@ -124,7 +124,7 @@ static inline int arch_try_cmpxchg_niai8(int *lock, int old, int new)
asm_inline volatile(
ALTERNATIVE("nop", ".insn rre,0xb2fa0000,8,0", ALT_FACILITY(49)) /* NIAI 8 */
- " cs %[old],%[new],%[lock]\n"
+ " cs %[old],%[new],%[lock]"
: [old] "+d" (old), [lock] "+Q" (*lock)
: [new] "d" (new)
: "cc", "memory");
diff --git a/arch/s390/lib/string.c b/arch/s390/lib/string.c
index 099de76e8b1a..757f58960198 100644
--- a/arch/s390/lib/string.c
+++ b/arch/s390/lib/string.c
@@ -27,7 +27,7 @@ static inline char *__strend(const char *s)
asm volatile(
" lghi 0,0\n"
"0: srst %[e],%[s]\n"
- " jo 0b\n"
+ " jo 0b"
: [e] "+&a" (e), [s] "+&a" (s)
:
: "cc", "memory", "0");
@@ -41,7 +41,7 @@ static inline char *__strnend(const char *s, size_t n)
asm volatile(
" lghi 0,0\n"
"0: srst %[p],%[s]\n"
- " jo 0b\n"
+ " jo 0b"
: [p] "+&d" (p), [s] "+&a" (s)
:
: "cc", "memory", "0");
@@ -95,7 +95,7 @@ char *strcat(char *dest, const char *src)
"0: srst %[dummy],%[dest]\n"
" jo 0b\n"
"1: mvst %[dummy],%[src]\n"
- " jo 1b\n"
+ " jo 1b"
: [dummy] "+&a" (dummy), [dest] "+&a" (dest), [src] "+&a" (src)
:
: "cc", "memory", "0");
@@ -291,7 +291,7 @@ void *memscan(void *s, int c, size_t n)
asm volatile(
" lgr 0,%[c]\n"
"0: srst %[ret],%[s]\n"
- " jo 0b\n"
+ " jo 0b"
: [ret] "+&a" (ret), [s] "+&a" (s)
: [c] "d" (c)
: "cc", "memory", "0");
diff --git a/arch/s390/lib/test_unwind.c b/arch/s390/lib/test_unwind.c
index 6e42100875e7..6bb3fa5bf925 100644
--- a/arch/s390/lib/test_unwind.c
+++ b/arch/s390/lib/test_unwind.c
@@ -150,7 +150,7 @@ static __always_inline struct pt_regs fake_pt_regs(void)
regs.gprs[15] = current_stack_pointer;
asm volatile(
- "basr %[psw_addr],0\n"
+ "basr %[psw_addr],0"
: [psw_addr] "=d" (regs.psw.addr));
return regs;
}
@@ -232,7 +232,7 @@ static noinline void test_unwind_kprobed_func(void)
asm volatile(
" nopr %%r7\n"
"test_unwind_kprobed_insn:\n"
- " nopr %%r7\n"
+ " nopr %%r7"
:);
}
diff --git a/arch/s390/lib/xor.c b/arch/s390/lib/xor.c
index ce7bcf7c0032..1721b73b7803 100644
--- a/arch/s390/lib/xor.c
+++ b/arch/s390/lib/xor.c
@@ -27,7 +27,7 @@ static void xor_xc_2(unsigned long bytes, unsigned long * __restrict p1,
"1: exrl %0,2f\n"
" j 3f\n"
"2: xc 0(1,%1),0(%2)\n"
- "3:\n"
+ "3:"
: : "d" (bytes), "a" (p1), "a" (p2)
: "0", "cc", "memory");
}
@@ -53,7 +53,7 @@ static void xor_xc_3(unsigned long bytes, unsigned long * __restrict p1,
" j 4f\n"
"2: xc 0(1,%1),0(%2)\n"
"3: xc 0(1,%1),0(%3)\n"
- "4:\n"
+ "4:"
: "+d" (bytes), "+a" (p1), "+a" (p2), "+a" (p3)
: : "0", "cc", "memory");
}
@@ -84,7 +84,7 @@ static void xor_xc_4(unsigned long bytes, unsigned long * __restrict p1,
"2: xc 0(1,%1),0(%2)\n"
"3: xc 0(1,%1),0(%3)\n"
"4: xc 0(1,%1),0(%4)\n"
- "5:\n"
+ "5:"
: "+d" (bytes), "+a" (p1), "+a" (p2), "+a" (p3), "+a" (p4)
: : "0", "cc", "memory");
}
@@ -121,7 +121,7 @@ static void xor_xc_5(unsigned long bytes, unsigned long * __restrict p1,
"3: xc 0(1,%1),0(%3)\n"
"4: xc 0(1,%1),0(%4)\n"
"5: xc 0(1,%1),0(%5)\n"
- "6:\n"
+ "6:"
: "+d" (bytes), "+a" (p1), "+a" (p2), "+a" (p3), "+a" (p4),
"+a" (p5)
: : "0", "cc", "memory");
diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c
index 44426e0f2944..cfd219fe495c 100644
--- a/arch/s390/mm/maccess.c
+++ b/arch/s390/mm/maccess.c
@@ -41,7 +41,7 @@ static notrace long s390_kernel_write_odd(void *dst, const void *src, size_t siz
" ex %1,0(1)\n"
" lg %1,0(%3)\n"
" lra %0,0(%0)\n"
- " sturg %1,%0\n"
+ " sturg %1,%0"
: "+&a" (aligned), "+&a" (count), "=m" (tmp)
: "a" (&tmp), "a" (&tmp[offset]), "a" (src)
: "cc", "memory", "1");
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index 76d92069799f..626fca116cd7 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -245,7 +245,7 @@ static inline unsigned long base_lra(unsigned long address)
unsigned long real;
asm volatile(
- " lra %0,0(%1)\n"
+ " lra %0,0(%1)"
: "=d" (real) : "a" (address) : "cc");
return real;
}
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index cd6676c2d602..c82c577db2bc 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -1188,6 +1188,10 @@ static int __init pci_base_init(void)
if (rc)
goto out_find;
+ rc = zpci_fw_sysfs_init();
+ if (rc)
+ goto out_find;
+
s390_pci_initialized = 1;
return 0;
diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c
index d930416d4c90..b95376041501 100644
--- a/arch/s390/pci/pci_event.c
+++ b/arch/s390/pci/pci_event.c
@@ -88,6 +88,7 @@ static pci_ers_result_t zpci_event_notify_error_detected(struct pci_dev *pdev,
pci_ers_result_t ers_res = PCI_ERS_RESULT_DISCONNECT;
ers_res = driver->err_handler->error_detected(pdev, pdev->error_state);
+ pci_uevent_ers(pdev, ers_res);
if (ers_result_indicates_abort(ers_res))
pr_info("%s: Automatic recovery failed after initial reporting\n", pci_name(pdev));
else if (ers_res == PCI_ERS_RESULT_NEED_RESET)
@@ -244,6 +245,7 @@ static pci_ers_result_t zpci_event_attempt_error_recovery(struct pci_dev *pdev)
ers_res = PCI_ERS_RESULT_RECOVERED;
if (ers_res != PCI_ERS_RESULT_RECOVERED) {
+ pci_uevent_ers(pdev, PCI_ERS_RESULT_DISCONNECT);
pr_err("%s: Automatic recovery failed; operator intervention is required\n",
pci_name(pdev));
status_str = "failed (driver can't recover)";
@@ -253,6 +255,7 @@ static pci_ers_result_t zpci_event_attempt_error_recovery(struct pci_dev *pdev)
pr_info("%s: The device is ready to resume operations\n", pci_name(pdev));
if (driver->err_handler->resume)
driver->err_handler->resume(pdev);
+ pci_uevent_ers(pdev, PCI_ERS_RESULT_RECOVERED);
out_unlock:
pci_dev_unlock(pdev);
zpci_report_status(zdev, "recovery", status_str);
diff --git a/arch/s390/pci/pci_insn.c b/arch/s390/pci/pci_insn.c
index eb978c8012be..35ceb1bea1c6 100644
--- a/arch/s390/pci/pci_insn.c
+++ b/arch/s390/pci/pci_insn.c
@@ -145,7 +145,7 @@ int zpci_set_irq_ctrl(u16 ctl, u8 isc, union zpci_sic_iib *iib)
return -EIO;
asm volatile(
- ".insn rsy,0xeb00000000d1,%[ctl],%[isc],%[iib]\n"
+ ".insn rsy,0xeb00000000d1,%[ctl],%[isc],%[iib]"
: : [ctl] "d" (ctl), [isc] "d" (isc << 27), [iib] "Q" (*iib));
return 0;
@@ -442,7 +442,7 @@ EXPORT_SYMBOL_GPL(zpci_write_block);
static inline void __pciwb_mio(void)
{
- asm volatile (".insn rre,0xb9d50000,0,0\n");
+ asm volatile (".insn rre,0xb9d50000,0,0");
}
void zpci_barrier(void)
diff --git a/arch/s390/pci/pci_sysfs.c b/arch/s390/pci/pci_sysfs.c
index 0ee0924cfab7..12060870e2aa 100644
--- a/arch/s390/pci/pci_sysfs.c
+++ b/arch/s390/pci/pci_sysfs.c
@@ -41,6 +41,9 @@ zpci_attr(segment1, "0x%02x\n", pfip[1]);
zpci_attr(segment2, "0x%02x\n", pfip[2]);
zpci_attr(segment3, "0x%02x\n", pfip[3]);
+#define ZPCI_FW_ATTR_RO(_name) \
+ static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
+
static ssize_t mio_enabled_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -164,6 +167,13 @@ static ssize_t uid_is_unique_show(struct device *dev,
}
static DEVICE_ATTR_RO(uid_is_unique);
+static ssize_t uid_checking_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", zpci_unique_uid ? 1 : 0);
+}
+ZPCI_FW_ATTR_RO(uid_checking);
+
/* analogous to smbios index */
static ssize_t index_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -233,3 +243,18 @@ const struct attribute_group pfip_attr_group = {
.name = "pfip",
.attrs = pfip_attrs,
};
+
+static struct attribute *clp_fw_attrs[] = {
+ &uid_checking_attr.attr,
+ NULL,
+};
+
+static struct attribute_group clp_fw_attr_group = {
+ .name = "clp",
+ .attrs = clp_fw_attrs,
+};
+
+int __init __zpci_fw_sysfs_init(void)
+{
+ return sysfs_create_group(firmware_kobj, &clp_fw_attr_group);
+}