summaryrefslogtreecommitdiff
path: root/arch/s390/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/kernel')
-rw-r--r--arch/s390/kernel/Makefile12
-rw-r--r--arch/s390/kernel/asm-offsets.c4
-rw-r--r--arch/s390/kernel/audit.c16
-rw-r--r--arch/s390/kernel/audit.h16
-rw-r--r--arch/s390/kernel/compat_audit.c48
-rw-r--r--arch/s390/kernel/compat_linux.c289
-rw-r--r--arch/s390/kernel/compat_linux.h101
-rw-r--r--arch/s390/kernel/compat_ptrace.h64
-rw-r--r--arch/s390/kernel/compat_signal.c420
-rw-r--r--arch/s390/kernel/cpacf.c3
-rw-r--r--arch/s390/kernel/cpcmd.c3
-rw-r--r--arch/s390/kernel/debug.c3
-rw-r--r--arch/s390/kernel/dis.c17
-rw-r--r--arch/s390/kernel/dumpstack.c8
-rw-r--r--arch/s390/kernel/early.c21
-rw-r--r--arch/s390/kernel/entry.S25
-rw-r--r--arch/s390/kernel/head.S (renamed from arch/s390/kernel/head64.S)0
-rw-r--r--arch/s390/kernel/hiperdispatch.c5
-rw-r--r--arch/s390/kernel/module.c21
-rw-r--r--arch/s390/kernel/nmi.c3
-rw-r--r--arch/s390/kernel/os_info.c3
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c6
-rw-r--r--arch/s390/kernel/perf_cpum_sf.c7
-rw-r--r--arch/s390/kernel/perf_event.c4
-rw-r--r--arch/s390/kernel/perf_pai.c1230
-rw-r--r--arch/s390/kernel/perf_pai_crypto.c843
-rw-r--r--arch/s390/kernel/perf_pai_ext.c756
-rw-r--r--arch/s390/kernel/perf_regs.c3
-rw-r--r--arch/s390/kernel/process.c9
-rw-r--r--arch/s390/kernel/processor.c3
-rw-r--r--arch/s390/kernel/ptrace.c524
-rw-r--r--arch/s390/kernel/setup.c6
-rw-r--r--arch/s390/kernel/signal.c27
-rw-r--r--arch/s390/kernel/smp.c15
-rw-r--r--arch/s390/kernel/stackprotector.c156
-rw-r--r--arch/s390/kernel/stacktrace.c3
-rw-r--r--arch/s390/kernel/sthyi.c2
-rw-r--r--arch/s390/kernel/syscall.c12
-rw-r--r--arch/s390/kernel/syscalls/Makefile58
-rw-r--r--arch/s390/kernel/syscalls/syscall.tbl857
-rwxr-xr-xarch/s390/kernel/syscalls/syscalltbl232
-rw-r--r--arch/s390/kernel/sysinfo.c2
-rw-r--r--arch/s390/kernel/time.c3
-rw-r--r--arch/s390/kernel/topology.c3
-rw-r--r--arch/s390/kernel/uprobes.c13
-rw-r--r--arch/s390/kernel/uv.c3
-rw-r--r--arch/s390/kernel/vdso.c36
-rw-r--r--arch/s390/kernel/vdso/.gitignore (renamed from arch/s390/kernel/vdso32/.gitignore)2
-rw-r--r--arch/s390/kernel/vdso/Makefile76
-rwxr-xr-xarch/s390/kernel/vdso/gen_vdso_offsets.sh (renamed from arch/s390/kernel/vdso64/gen_vdso_offsets.sh)2
-rw-r--r--arch/s390/kernel/vdso/getcpu.c (renamed from arch/s390/kernel/vdso64/getcpu.c)0
-rw-r--r--arch/s390/kernel/vdso/note.S (renamed from arch/s390/kernel/vdso32/note.S)0
-rw-r--r--arch/s390/kernel/vdso/vdso.h (renamed from arch/s390/kernel/vdso64/vdso.h)6
-rw-r--r--arch/s390/kernel/vdso/vdso.lds.S (renamed from arch/s390/kernel/vdso64/vdso64.lds.S)43
-rw-r--r--arch/s390/kernel/vdso/vdso_generic.c (renamed from arch/s390/kernel/vdso64/vdso64_generic.c)0
-rw-r--r--arch/s390/kernel/vdso/vdso_user_wrapper.S (renamed from arch/s390/kernel/vdso64/vdso_user_wrapper.S)0
-rw-r--r--arch/s390/kernel/vdso/vdso_wrapper.S (renamed from arch/s390/kernel/vdso32/vdso32_wrapper.S)8
-rw-r--r--arch/s390/kernel/vdso/vgetrandom-chacha.S (renamed from arch/s390/kernel/vdso64/vgetrandom-chacha.S)0
-rw-r--r--arch/s390/kernel/vdso/vgetrandom.c (renamed from arch/s390/kernel/vdso64/vgetrandom.c)0
-rw-r--r--arch/s390/kernel/vdso32/Makefile64
-rwxr-xr-xarch/s390/kernel/vdso32/gen_vdso_offsets.sh15
-rw-r--r--arch/s390/kernel/vdso32/vdso32.lds.S140
-rw-r--r--arch/s390/kernel/vdso32/vdso_user_wrapper.S22
-rw-r--r--arch/s390/kernel/vdso64/.gitignore2
-rw-r--r--arch/s390/kernel/vdso64/Makefile79
-rw-r--r--arch/s390/kernel/vdso64/note.S13
-rw-r--r--arch/s390/kernel/vdso64/vdso64_wrapper.S15
-rw-r--r--arch/s390/kernel/vmlinux.lds.S15
68 files changed, 2011 insertions, 4386 deletions
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index eb06ff888314..42c83d60d6fa 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -36,7 +36,7 @@ CFLAGS_stacktrace.o += -fno-optimize-sibling-calls
CFLAGS_dumpstack.o += -fno-optimize-sibling-calls
CFLAGS_unwind_bc.o += -fno-optimize-sibling-calls
-obj-y := head64.o traps.o time.o process.o early.o setup.o idle.o vtime.o
+obj-y := head.o traps.o time.o process.o early.o setup.o idle.o vtime.o
obj-y += processor.o syscall.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
obj-y += debug.o irq.o ipl.o dis.o vdso.o cpufeature.o
obj-y += sysinfo.o lgr.o os_info.o ctlreg.o
@@ -56,9 +56,6 @@ obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_SCHED_TOPOLOGY) += topology.o hiperdispatch.o
obj-$(CONFIG_NUMA) += numa.o
obj-$(CONFIG_AUDIT) += audit.o
-compat-obj-$(CONFIG_AUDIT) += compat_audit.o
-obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o
-obj-$(CONFIG_COMPAT) += $(compat-obj-y)
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_KPROBES) += mcount.o
@@ -70,7 +67,7 @@ obj-$(CONFIG_KEXEC_CORE) += machine_kexec.o relocate_kernel.o
obj-$(CONFIG_VMCORE_INFO) += vmcore_info.o
obj-$(CONFIG_UPROBES) += uprobes.o
obj-$(CONFIG_JUMP_LABEL) += jump_label.o
-
+obj-$(CONFIG_STACKPROTECTOR) += stackprotector.o
obj-$(CONFIG_KEXEC_FILE) += machine_kexec_file.o kexec_image.o
obj-$(CONFIG_KEXEC_FILE) += kexec_elf.o
obj-$(CONFIG_CERT_STORE) += cert_store.o
@@ -79,10 +76,9 @@ obj-$(CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT) += ima_arch.o
obj-$(CONFIG_PERF_EVENTS) += perf_event.o
obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf.o perf_cpum_sf.o
obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf_events.o perf_regs.o
-obj-$(CONFIG_PERF_EVENTS) += perf_pai_crypto.o perf_pai_ext.o
+obj-$(CONFIG_PERF_EVENTS) += perf_pai.o
obj-$(CONFIG_TRACEPOINTS) += trace.o
# vdso
-obj-y += vdso64/
-obj-$(CONFIG_COMPAT) += vdso32/
+obj-y += vdso/
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index a8915663e917..cfe27f6579e3 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -21,6 +21,9 @@ int main(void)
OFFSET(__TASK_stack, task_struct, stack);
OFFSET(__TASK_thread, task_struct, thread);
OFFSET(__TASK_pid, task_struct, pid);
+#ifdef CONFIG_STACKPROTECTOR
+ OFFSET(__TASK_stack_canary, task_struct, stack_canary);
+#endif
BLANK();
/* thread struct offsets */
OFFSET(__THREAD_ksp, thread_struct, ksp);
@@ -139,6 +142,7 @@ int main(void)
OFFSET(__LC_CURRENT_PID, lowcore, current_pid);
OFFSET(__LC_LAST_BREAK, lowcore, last_break);
/* software defined ABI-relevant lowcore locations 0xe00 - 0xe20 */
+ OFFSET(__LC_STACK_CANARY, lowcore, stack_canary);
OFFSET(__LC_DUMP_REIPL, lowcore, ipib);
OFFSET(__LC_VMCORE_INFO, lowcore, vmcore_info);
OFFSET(__LC_OS_INFO, lowcore, os_info);
diff --git a/arch/s390/kernel/audit.c b/arch/s390/kernel/audit.c
index 02051a596b87..7897d9411e13 100644
--- a/arch/s390/kernel/audit.c
+++ b/arch/s390/kernel/audit.c
@@ -3,7 +3,6 @@
#include <linux/types.h>
#include <linux/audit.h>
#include <asm/unistd.h>
-#include "audit.h"
static unsigned dir_class[] = {
#include <asm-generic/audit_dir_write.h>
@@ -32,19 +31,11 @@ static unsigned signal_class[] = {
int audit_classify_arch(int arch)
{
-#ifdef CONFIG_COMPAT
- if (arch == AUDIT_ARCH_S390)
- return 1;
-#endif
return 0;
}
int audit_classify_syscall(int abi, unsigned syscall)
{
-#ifdef CONFIG_COMPAT
- if (abi == AUDIT_ARCH_S390)
- return s390_classify_syscall(syscall);
-#endif
switch(syscall) {
case __NR_open:
return AUDITSC_OPEN;
@@ -63,13 +54,6 @@ int audit_classify_syscall(int abi, unsigned syscall)
static int __init audit_classes_init(void)
{
-#ifdef CONFIG_COMPAT
- audit_register_class(AUDIT_CLASS_WRITE_32, s390_write_class);
- audit_register_class(AUDIT_CLASS_READ_32, s390_read_class);
- audit_register_class(AUDIT_CLASS_DIR_WRITE_32, s390_dir_class);
- audit_register_class(AUDIT_CLASS_CHATTR_32, s390_chattr_class);
- audit_register_class(AUDIT_CLASS_SIGNAL_32, s390_signal_class);
-#endif
audit_register_class(AUDIT_CLASS_WRITE, write_class);
audit_register_class(AUDIT_CLASS_READ, read_class);
audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class);
diff --git a/arch/s390/kernel/audit.h b/arch/s390/kernel/audit.h
deleted file mode 100644
index 4d4b596412ec..000000000000
--- a/arch/s390/kernel/audit.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ARCH_S390_KERNEL_AUDIT_H
-#define __ARCH_S390_KERNEL_AUDIT_H
-
-#include <linux/types.h>
-
-#ifdef CONFIG_COMPAT
-extern int s390_classify_syscall(unsigned);
-extern __u32 s390_dir_class[];
-extern __u32 s390_write_class[];
-extern __u32 s390_read_class[];
-extern __u32 s390_chattr_class[];
-extern __u32 s390_signal_class[];
-#endif /* CONFIG_COMPAT */
-
-#endif /* __ARCH_S390_KERNEL_AUDIT_H */
diff --git a/arch/s390/kernel/compat_audit.c b/arch/s390/kernel/compat_audit.c
deleted file mode 100644
index a7c46e8310f0..000000000000
--- a/arch/s390/kernel/compat_audit.c
+++ /dev/null
@@ -1,48 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#undef __s390x__
-#include <linux/audit_arch.h>
-#include <asm/unistd.h>
-#include "audit.h"
-
-unsigned s390_dir_class[] = {
-#include <asm-generic/audit_dir_write.h>
-~0U
-};
-
-unsigned s390_chattr_class[] = {
-#include <asm-generic/audit_change_attr.h>
-~0U
-};
-
-unsigned s390_write_class[] = {
-#include <asm-generic/audit_write.h>
-~0U
-};
-
-unsigned s390_read_class[] = {
-#include <asm-generic/audit_read.h>
-~0U
-};
-
-unsigned s390_signal_class[] = {
-#include <asm-generic/audit_signal.h>
-~0U
-};
-
-int s390_classify_syscall(unsigned syscall)
-{
- switch(syscall) {
- case __NR_open:
- return AUDITSC_OPEN;
- case __NR_openat:
- return AUDITSC_OPENAT;
- case __NR_socketcall:
- return AUDITSC_SOCKETCALL;
- case __NR_execve:
- return AUDITSC_EXECVE;
- case __NR_openat2:
- return AUDITSC_OPENAT2;
- default:
- return AUDITSC_COMPAT;
- }
-}
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
deleted file mode 100644
index f9d418d1b619..000000000000
--- a/arch/s390/kernel/compat_linux.c
+++ /dev/null
@@ -1,289 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * S390 version
- * Copyright IBM Corp. 2000
- * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
- * Gerhard Tonn (ton@de.ibm.com)
- * Thomas Spatzier (tspat@de.ibm.com)
- *
- * Conversion between 31bit and 64bit native syscalls.
- *
- * Heavily inspired by the 32-bit Sparc compat code which is
- * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
- * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
- *
- */
-
-
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/fs.h>
-#include <linux/mm.h>
-#include <linux/file.h>
-#include <linux/signal.h>
-#include <linux/resource.h>
-#include <linux/times.h>
-#include <linux/smp.h>
-#include <linux/sem.h>
-#include <linux/msg.h>
-#include <linux/shm.h>
-#include <linux/uio.h>
-#include <linux/quota.h>
-#include <linux/poll.h>
-#include <linux/personality.h>
-#include <linux/stat.h>
-#include <linux/filter.h>
-#include <linux/highmem.h>
-#include <linux/mman.h>
-#include <linux/ipv6.h>
-#include <linux/in.h>
-#include <linux/icmpv6.h>
-#include <linux/syscalls.h>
-#include <linux/sysctl.h>
-#include <linux/binfmts.h>
-#include <linux/capability.h>
-#include <linux/compat.h>
-#include <linux/vfs.h>
-#include <linux/ptrace.h>
-#include <linux/fadvise.h>
-#include <linux/ipc.h>
-#include <linux/slab.h>
-
-#include <asm/types.h>
-#include <linux/uaccess.h>
-
-#include <net/scm.h>
-#include <net/sock.h>
-
-#include "compat_linux.h"
-
-#ifdef CONFIG_SYSVIPC
-COMPAT_SYSCALL_DEFINE5(s390_ipc, uint, call, int, first, compat_ulong_t, second,
- compat_ulong_t, third, compat_uptr_t, ptr)
-{
- if (call >> 16) /* hack for backward compatibility */
- return -EINVAL;
- return compat_ksys_ipc(call, first, second, third, ptr, third);
-}
-#endif
-
-COMPAT_SYSCALL_DEFINE3(s390_truncate64, const char __user *, path, u32, high, u32, low)
-{
- return ksys_truncate(path, (unsigned long)high << 32 | low);
-}
-
-COMPAT_SYSCALL_DEFINE3(s390_ftruncate64, unsigned int, fd, u32, high, u32, low)
-{
- return ksys_ftruncate(fd, (unsigned long)high << 32 | low);
-}
-
-COMPAT_SYSCALL_DEFINE5(s390_pread64, unsigned int, fd, char __user *, ubuf,
- compat_size_t, count, u32, high, u32, low)
-{
- if ((compat_ssize_t) count < 0)
- return -EINVAL;
- return ksys_pread64(fd, ubuf, count, (unsigned long)high << 32 | low);
-}
-
-COMPAT_SYSCALL_DEFINE5(s390_pwrite64, unsigned int, fd, const char __user *, ubuf,
- compat_size_t, count, u32, high, u32, low)
-{
- if ((compat_ssize_t) count < 0)
- return -EINVAL;
- return ksys_pwrite64(fd, ubuf, count, (unsigned long)high << 32 | low);
-}
-
-COMPAT_SYSCALL_DEFINE4(s390_readahead, int, fd, u32, high, u32, low, s32, count)
-{
- return ksys_readahead(fd, (unsigned long)high << 32 | low, count);
-}
-
-struct stat64_emu31 {
- unsigned long long st_dev;
- unsigned int __pad1;
-#define STAT64_HAS_BROKEN_ST_INO 1
- u32 __st_ino;
- unsigned int st_mode;
- unsigned int st_nlink;
- u32 st_uid;
- u32 st_gid;
- unsigned long long st_rdev;
- unsigned int __pad3;
- long st_size;
- u32 st_blksize;
- unsigned char __pad4[4];
- u32 __pad5; /* future possible st_blocks high bits */
- u32 st_blocks; /* Number 512-byte blocks allocated. */
- u32 st_atime;
- u32 __pad6;
- u32 st_mtime;
- u32 __pad7;
- u32 st_ctime;
- u32 __pad8; /* will be high 32 bits of ctime someday */
- unsigned long st_ino;
-};
-
-static int cp_stat64(struct stat64_emu31 __user *ubuf, struct kstat *stat)
-{
- struct stat64_emu31 tmp;
-
- memset(&tmp, 0, sizeof(tmp));
-
- tmp.st_dev = huge_encode_dev(stat->dev);
- tmp.st_ino = stat->ino;
- tmp.__st_ino = (u32)stat->ino;
- tmp.st_mode = stat->mode;
- tmp.st_nlink = (unsigned int)stat->nlink;
- tmp.st_uid = from_kuid_munged(current_user_ns(), stat->uid);
- tmp.st_gid = from_kgid_munged(current_user_ns(), stat->gid);
- tmp.st_rdev = huge_encode_dev(stat->rdev);
- tmp.st_size = stat->size;
- tmp.st_blksize = (u32)stat->blksize;
- tmp.st_blocks = (u32)stat->blocks;
- tmp.st_atime = (u32)stat->atime.tv_sec;
- tmp.st_mtime = (u32)stat->mtime.tv_sec;
- tmp.st_ctime = (u32)stat->ctime.tv_sec;
-
- return copy_to_user(ubuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
-}
-
-COMPAT_SYSCALL_DEFINE2(s390_stat64, const char __user *, filename, struct stat64_emu31 __user *, statbuf)
-{
- struct kstat stat;
- int ret = vfs_stat(filename, &stat);
- if (!ret)
- ret = cp_stat64(statbuf, &stat);
- return ret;
-}
-
-COMPAT_SYSCALL_DEFINE2(s390_lstat64, const char __user *, filename, struct stat64_emu31 __user *, statbuf)
-{
- struct kstat stat;
- int ret = vfs_lstat(filename, &stat);
- if (!ret)
- ret = cp_stat64(statbuf, &stat);
- return ret;
-}
-
-COMPAT_SYSCALL_DEFINE2(s390_fstat64, unsigned int, fd, struct stat64_emu31 __user *, statbuf)
-{
- struct kstat stat;
- int ret = vfs_fstat(fd, &stat);
- if (!ret)
- ret = cp_stat64(statbuf, &stat);
- return ret;
-}
-
-COMPAT_SYSCALL_DEFINE4(s390_fstatat64, unsigned int, dfd, const char __user *, filename,
- struct stat64_emu31 __user *, statbuf, int, flag)
-{
- struct kstat stat;
- int error;
-
- error = vfs_fstatat(dfd, filename, &stat, flag);
- if (error)
- return error;
- return cp_stat64(statbuf, &stat);
-}
-
-/*
- * Linux/i386 didn't use to be able to handle more than
- * 4 system call parameters, so these system calls used a memory
- * block for parameter passing..
- */
-
-struct mmap_arg_struct_emu31 {
- compat_ulong_t addr;
- compat_ulong_t len;
- compat_ulong_t prot;
- compat_ulong_t flags;
- compat_ulong_t fd;
- compat_ulong_t offset;
-};
-
-COMPAT_SYSCALL_DEFINE1(s390_old_mmap, struct mmap_arg_struct_emu31 __user *, arg)
-{
- struct mmap_arg_struct_emu31 a;
-
- if (copy_from_user(&a, arg, sizeof(a)))
- return -EFAULT;
- if (a.offset & ~PAGE_MASK)
- return -EINVAL;
- return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
- a.offset >> PAGE_SHIFT);
-}
-
-COMPAT_SYSCALL_DEFINE1(s390_mmap2, struct mmap_arg_struct_emu31 __user *, arg)
-{
- struct mmap_arg_struct_emu31 a;
-
- if (copy_from_user(&a, arg, sizeof(a)))
- return -EFAULT;
- return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset);
-}
-
-COMPAT_SYSCALL_DEFINE3(s390_read, unsigned int, fd, char __user *, buf, compat_size_t, count)
-{
- if ((compat_ssize_t) count < 0)
- return -EINVAL;
-
- return ksys_read(fd, buf, count);
-}
-
-COMPAT_SYSCALL_DEFINE3(s390_write, unsigned int, fd, const char __user *, buf, compat_size_t, count)
-{
- if ((compat_ssize_t) count < 0)
- return -EINVAL;
-
- return ksys_write(fd, buf, count);
-}
-
-/*
- * 31 bit emulation wrapper functions for sys_fadvise64/fadvise64_64.
- * These need to rewrite the advise values for POSIX_FADV_{DONTNEED,NOREUSE}
- * because the 31 bit values differ from the 64 bit values.
- */
-
-COMPAT_SYSCALL_DEFINE5(s390_fadvise64, int, fd, u32, high, u32, low, compat_size_t, len, int, advise)
-{
- if (advise == 4)
- advise = POSIX_FADV_DONTNEED;
- else if (advise == 5)
- advise = POSIX_FADV_NOREUSE;
- return ksys_fadvise64_64(fd, (unsigned long)high << 32 | low, len,
- advise);
-}
-
-struct fadvise64_64_args {
- int fd;
- long long offset;
- long long len;
- int advice;
-};
-
-COMPAT_SYSCALL_DEFINE1(s390_fadvise64_64, struct fadvise64_64_args __user *, args)
-{
- struct fadvise64_64_args a;
-
- if ( copy_from_user(&a, args, sizeof(a)) )
- return -EFAULT;
- if (a.advice == 4)
- a.advice = POSIX_FADV_DONTNEED;
- else if (a.advice == 5)
- a.advice = POSIX_FADV_NOREUSE;
- return ksys_fadvise64_64(a.fd, a.offset, a.len, a.advice);
-}
-
-COMPAT_SYSCALL_DEFINE6(s390_sync_file_range, int, fd, u32, offhigh, u32, offlow,
- u32, nhigh, u32, nlow, unsigned int, flags)
-{
- return ksys_sync_file_range(fd, ((loff_t)offhigh << 32) + offlow,
- ((u64)nhigh << 32) + nlow, flags);
-}
-
-COMPAT_SYSCALL_DEFINE6(s390_fallocate, int, fd, int, mode, u32, offhigh, u32, offlow,
- u32, lenhigh, u32, lenlow)
-{
- return ksys_fallocate(fd, mode, ((loff_t)offhigh << 32) + offlow,
- ((u64)lenhigh << 32) + lenlow);
-}
diff --git a/arch/s390/kernel/compat_linux.h b/arch/s390/kernel/compat_linux.h
deleted file mode 100644
index ef23739b277c..000000000000
--- a/arch/s390/kernel/compat_linux.h
+++ /dev/null
@@ -1,101 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_S390X_S390_H
-#define _ASM_S390X_S390_H
-
-#include <linux/compat.h>
-#include <linux/socket.h>
-#include <linux/syscalls.h>
-#include <asm/ptrace.h>
-
-/*
- * Macro that masks the high order bit of a 32 bit pointer and
- * converts it to a 64 bit pointer.
- */
-#define A(__x) ((unsigned long)((__x) & 0x7FFFFFFFUL))
-#define AA(__x) ((unsigned long)(__x))
-
-/* Now 32bit compatibility types */
-struct ipc_kludge_32 {
- __u32 msgp; /* pointer */
- __s32 msgtyp;
-};
-
-/* asm/sigcontext.h */
-typedef union {
- __u64 d;
- __u32 f;
-} freg_t32;
-
-typedef struct {
- unsigned int fpc;
- unsigned int pad;
- freg_t32 fprs[__NUM_FPRS];
-} _s390_fp_regs32;
-
-typedef struct {
- psw_t32 psw;
- __u32 gprs[__NUM_GPRS];
- __u32 acrs[__NUM_ACRS];
-} _s390_regs_common32;
-
-typedef struct {
- _s390_regs_common32 regs;
- _s390_fp_regs32 fpregs;
-} _sigregs32;
-
-typedef struct {
- __u32 gprs_high[__NUM_GPRS];
- __u64 vxrs_low[__NUM_VXRS_LOW];
- __vector128 vxrs_high[__NUM_VXRS_HIGH];
- __u8 __reserved[128];
-} _sigregs_ext32;
-
-#define _SIGCONTEXT_NSIG32 64
-#define _SIGCONTEXT_NSIG_BPW32 32
-#define __SIGNAL_FRAMESIZE32 96
-#define _SIGMASK_COPY_SIZE32 (sizeof(u32) * 2)
-
-struct sigcontext32 {
- __u32 oldmask[_COMPAT_NSIG_WORDS];
- __u32 sregs; /* pointer */
-};
-
-/* asm/signal.h */
-
-/* asm/ucontext.h */
-struct ucontext32 {
- __u32 uc_flags;
- __u32 uc_link; /* pointer */
- compat_stack_t uc_stack;
- _sigregs32 uc_mcontext;
- compat_sigset_t uc_sigmask;
- /* Allow for uc_sigmask growth. Glibc uses a 1024-bit sigset_t. */
- unsigned char __unused[128 - sizeof(compat_sigset_t)];
- _sigregs_ext32 uc_mcontext_ext;
-};
-
-struct stat64_emu31;
-struct mmap_arg_struct_emu31;
-struct fadvise64_64_args;
-
-long compat_sys_s390_truncate64(const char __user *path, u32 high, u32 low);
-long compat_sys_s390_ftruncate64(unsigned int fd, u32 high, u32 low);
-long compat_sys_s390_pread64(unsigned int fd, char __user *ubuf, compat_size_t count, u32 high, u32 low);
-long compat_sys_s390_pwrite64(unsigned int fd, const char __user *ubuf, compat_size_t count, u32 high, u32 low);
-long compat_sys_s390_readahead(int fd, u32 high, u32 low, s32 count);
-long compat_sys_s390_stat64(const char __user *filename, struct stat64_emu31 __user *statbuf);
-long compat_sys_s390_lstat64(const char __user *filename, struct stat64_emu31 __user *statbuf);
-long compat_sys_s390_fstat64(unsigned int fd, struct stat64_emu31 __user *statbuf);
-long compat_sys_s390_fstatat64(unsigned int dfd, const char __user *filename, struct stat64_emu31 __user *statbuf, int flag);
-long compat_sys_s390_old_mmap(struct mmap_arg_struct_emu31 __user *arg);
-long compat_sys_s390_mmap2(struct mmap_arg_struct_emu31 __user *arg);
-long compat_sys_s390_read(unsigned int fd, char __user *buf, compat_size_t count);
-long compat_sys_s390_write(unsigned int fd, const char __user *buf, compat_size_t count);
-long compat_sys_s390_fadvise64(int fd, u32 high, u32 low, compat_size_t len, int advise);
-long compat_sys_s390_fadvise64_64(struct fadvise64_64_args __user *args);
-long compat_sys_s390_sync_file_range(int fd, u32 offhigh, u32 offlow, u32 nhigh, u32 nlow, unsigned int flags);
-long compat_sys_s390_fallocate(int fd, int mode, u32 offhigh, u32 offlow, u32 lenhigh, u32 lenlow);
-long compat_sys_sigreturn(void);
-long compat_sys_rt_sigreturn(void);
-
-#endif /* _ASM_S390X_S390_H */
diff --git a/arch/s390/kernel/compat_ptrace.h b/arch/s390/kernel/compat_ptrace.h
deleted file mode 100644
index 3c400fc7e987..000000000000
--- a/arch/s390/kernel/compat_ptrace.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _PTRACE32_H
-#define _PTRACE32_H
-
-#include <asm/ptrace.h> /* needed for NUM_CR_WORDS */
-#include "compat_linux.h" /* needed for psw_compat_t */
-
-struct compat_per_struct_kernel {
- __u32 cr9; /* PER control bits */
- __u32 cr10; /* PER starting address */
- __u32 cr11; /* PER ending address */
- __u32 bits; /* Obsolete software bits */
- __u32 starting_addr; /* User specified start address */
- __u32 ending_addr; /* User specified end address */
- __u16 perc_atmid; /* PER trap ATMID */
- __u32 address; /* PER trap instruction address */
- __u8 access_id; /* PER trap access identification */
-};
-
-struct compat_user_regs_struct
-{
- psw_compat_t psw;
- u32 gprs[NUM_GPRS];
- u32 acrs[NUM_ACRS];
- u32 orig_gpr2;
- /* nb: there's a 4-byte hole here */
- s390_fp_regs fp_regs;
- /*
- * These per registers are in here so that gdb can modify them
- * itself as there is no "official" ptrace interface for hardware
- * watchpoints. This is the way intel does it.
- */
- struct compat_per_struct_kernel per_info;
- u32 ieee_instruction_pointer; /* obsolete, always 0 */
-};
-
-struct compat_user {
- /* We start with the registers, to mimic the way that "memory"
- is returned from the ptrace(3,...) function. */
- struct compat_user_regs_struct regs;
- /* The rest of this junk is to help gdb figure out what goes where */
- u32 u_tsize; /* Text segment size (pages). */
- u32 u_dsize; /* Data segment size (pages). */
- u32 u_ssize; /* Stack segment size (pages). */
- u32 start_code; /* Starting virtual address of text. */
- u32 start_stack; /* Starting virtual address of stack area.
- This is actually the bottom of the stack,
- the top of the stack is always found in the
- esp register. */
- s32 signal; /* Signal that caused the core dump. */
- u32 u_ar0; /* Used by gdb to help find the values for */
- /* the registers. */
- u32 magic; /* To uniquely identify a core file */
- char u_comm[32]; /* User command that was responsible */
-};
-
-typedef struct
-{
- __u32 len;
- __u32 kernel_addr;
- __u32 process_addr;
-} compat_ptrace_area;
-
-#endif /* _PTRACE32_H */
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
deleted file mode 100644
index 5a86b9d1da71..000000000000
--- a/arch/s390/kernel/compat_signal.c
+++ /dev/null
@@ -1,420 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright IBM Corp. 2000, 2006
- * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
- * Gerhard Tonn (ton@de.ibm.com)
- *
- * Copyright (C) 1991, 1992 Linus Torvalds
- *
- * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
- */
-
-#include <linux/compat.h>
-#include <linux/sched.h>
-#include <linux/sched/task_stack.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/kernel.h>
-#include <linux/signal.h>
-#include <linux/errno.h>
-#include <linux/wait.h>
-#include <linux/ptrace.h>
-#include <linux/unistd.h>
-#include <linux/stddef.h>
-#include <linux/tty.h>
-#include <linux/personality.h>
-#include <linux/binfmts.h>
-#include <asm/vdso-symbols.h>
-#include <asm/access-regs.h>
-#include <asm/ucontext.h>
-#include <linux/uaccess.h>
-#include <asm/lowcore.h>
-#include <asm/fpu.h>
-#include "compat_linux.h"
-#include "compat_ptrace.h"
-#include "entry.h"
-
-typedef struct
-{
- __u8 callee_used_stack[__SIGNAL_FRAMESIZE32];
- struct sigcontext32 sc;
- _sigregs32 sregs;
- int signo;
- _sigregs_ext32 sregs_ext;
- __u16 svc_insn; /* Offset of svc_insn is NOT fixed! */
-} sigframe32;
-
-typedef struct
-{
- __u8 callee_used_stack[__SIGNAL_FRAMESIZE32];
- __u16 svc_insn;
- compat_siginfo_t info;
- struct ucontext32 uc;
-} rt_sigframe32;
-
-/* Store registers needed to create the signal frame */
-static void store_sigregs(void)
-{
- save_access_regs(current->thread.acrs);
- save_user_fpu_regs();
-}
-
-/* Load registers after signal return */
-static void load_sigregs(void)
-{
- restore_access_regs(current->thread.acrs);
-}
-
-static int save_sigregs32(struct pt_regs *regs, _sigregs32 __user *sregs)
-{
- _sigregs32 user_sregs;
- int i;
-
- user_sregs.regs.psw.mask = (__u32)(regs->psw.mask >> 32);
- user_sregs.regs.psw.mask &= PSW32_MASK_USER | PSW32_MASK_RI;
- user_sregs.regs.psw.mask |= PSW32_USER_BITS;
- user_sregs.regs.psw.addr = (__u32) regs->psw.addr |
- (__u32)(regs->psw.mask & PSW_MASK_BA);
- for (i = 0; i < NUM_GPRS; i++)
- user_sregs.regs.gprs[i] = (__u32) regs->gprs[i];
- memcpy(&user_sregs.regs.acrs, current->thread.acrs,
- sizeof(user_sregs.regs.acrs));
- fpregs_store((_s390_fp_regs *) &user_sregs.fpregs, &current->thread.ufpu);
- if (__copy_to_user(sregs, &user_sregs, sizeof(_sigregs32)))
- return -EFAULT;
- return 0;
-}
-
-static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs)
-{
- _sigregs32 user_sregs;
- int i;
-
- /* Always make any pending restarted system call return -EINTR */
- current->restart_block.fn = do_no_restart_syscall;
-
- if (__copy_from_user(&user_sregs, &sregs->regs, sizeof(user_sregs)))
- return -EFAULT;
-
- if (!is_ri_task(current) && (user_sregs.regs.psw.mask & PSW32_MASK_RI))
- return -EINVAL;
-
- /* Use regs->psw.mask instead of PSW_USER_BITS to preserve PER bit. */
- regs->psw.mask = (regs->psw.mask & ~(PSW_MASK_USER | PSW_MASK_RI)) |
- (__u64)(user_sregs.regs.psw.mask & PSW32_MASK_USER) << 32 |
- (__u64)(user_sregs.regs.psw.mask & PSW32_MASK_RI) << 32 |
- (__u64)(user_sregs.regs.psw.addr & PSW32_ADDR_AMODE);
- /* Check for invalid user address space control. */
- if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_HOME)
- regs->psw.mask = PSW_ASC_PRIMARY |
- (regs->psw.mask & ~PSW_MASK_ASC);
- regs->psw.addr = (__u64)(user_sregs.regs.psw.addr & PSW32_ADDR_INSN);
- for (i = 0; i < NUM_GPRS; i++)
- regs->gprs[i] = (__u64) user_sregs.regs.gprs[i];
- memcpy(&current->thread.acrs, &user_sregs.regs.acrs,
- sizeof(current->thread.acrs));
- fpregs_load((_s390_fp_regs *)&user_sregs.fpregs, &current->thread.ufpu);
-
- clear_pt_regs_flag(regs, PIF_SYSCALL); /* No longer in a system call */
- return 0;
-}
-
-static int save_sigregs_ext32(struct pt_regs *regs,
- _sigregs_ext32 __user *sregs_ext)
-{
- __u32 gprs_high[NUM_GPRS];
- __u64 vxrs[__NUM_VXRS_LOW];
- int i;
-
- /* Save high gprs to signal stack */
- for (i = 0; i < NUM_GPRS; i++)
- gprs_high[i] = regs->gprs[i] >> 32;
- if (__copy_to_user(&sregs_ext->gprs_high, &gprs_high,
- sizeof(sregs_ext->gprs_high)))
- return -EFAULT;
-
- /* Save vector registers to signal stack */
- if (cpu_has_vx()) {
- for (i = 0; i < __NUM_VXRS_LOW; i++)
- vxrs[i] = current->thread.ufpu.vxrs[i].low;
- if (__copy_to_user(&sregs_ext->vxrs_low, vxrs,
- sizeof(sregs_ext->vxrs_low)) ||
- __copy_to_user(&sregs_ext->vxrs_high,
- current->thread.ufpu.vxrs + __NUM_VXRS_LOW,
- sizeof(sregs_ext->vxrs_high)))
- return -EFAULT;
- }
- return 0;
-}
-
-static int restore_sigregs_ext32(struct pt_regs *regs,
- _sigregs_ext32 __user *sregs_ext)
-{
- __u32 gprs_high[NUM_GPRS];
- __u64 vxrs[__NUM_VXRS_LOW];
- int i;
-
- /* Restore high gprs from signal stack */
- if (__copy_from_user(&gprs_high, &sregs_ext->gprs_high,
- sizeof(sregs_ext->gprs_high)))
- return -EFAULT;
- for (i = 0; i < NUM_GPRS; i++)
- *(__u32 *)&regs->gprs[i] = gprs_high[i];
-
- /* Restore vector registers from signal stack */
- if (cpu_has_vx()) {
- if (__copy_from_user(vxrs, &sregs_ext->vxrs_low,
- sizeof(sregs_ext->vxrs_low)) ||
- __copy_from_user(current->thread.ufpu.vxrs + __NUM_VXRS_LOW,
- &sregs_ext->vxrs_high,
- sizeof(sregs_ext->vxrs_high)))
- return -EFAULT;
- for (i = 0; i < __NUM_VXRS_LOW; i++)
- current->thread.ufpu.vxrs[i].low = vxrs[i];
- }
- return 0;
-}
-
-COMPAT_SYSCALL_DEFINE0(sigreturn)
-{
- struct pt_regs *regs = task_pt_regs(current);
- sigframe32 __user *frame = (sigframe32 __user *)regs->gprs[15];
- sigset_t set;
-
- if (get_compat_sigset(&set, (compat_sigset_t __user *)frame->sc.oldmask))
- goto badframe;
- set_current_blocked(&set);
- save_user_fpu_regs();
- if (restore_sigregs32(regs, &frame->sregs))
- goto badframe;
- if (restore_sigregs_ext32(regs, &frame->sregs_ext))
- goto badframe;
- load_sigregs();
- return regs->gprs[2];
-badframe:
- force_sig(SIGSEGV);
- return 0;
-}
-
-COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
-{
- struct pt_regs *regs = task_pt_regs(current);
- rt_sigframe32 __user *frame = (rt_sigframe32 __user *)regs->gprs[15];
- sigset_t set;
-
- if (get_compat_sigset(&set, &frame->uc.uc_sigmask))
- goto badframe;
- set_current_blocked(&set);
- if (compat_restore_altstack(&frame->uc.uc_stack))
- goto badframe;
- save_user_fpu_regs();
- if (restore_sigregs32(regs, &frame->uc.uc_mcontext))
- goto badframe;
- if (restore_sigregs_ext32(regs, &frame->uc.uc_mcontext_ext))
- goto badframe;
- load_sigregs();
- return regs->gprs[2];
-badframe:
- force_sig(SIGSEGV);
- return 0;
-}
-
-/*
- * Set up a signal frame.
- */
-
-
-/*
- * Determine which stack to use..
- */
-static inline void __user *
-get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size)
-{
- unsigned long sp;
-
- /* Default to using normal stack */
- sp = (unsigned long) A(regs->gprs[15]);
-
- /* Overflow on alternate signal stack gives SIGSEGV. */
- if (on_sig_stack(sp) && !on_sig_stack((sp - frame_size) & -8UL))
- return (void __user *) -1UL;
-
- /* This is the X/Open sanctioned signal stack switching. */
- if (ka->sa.sa_flags & SA_ONSTACK) {
- if (! sas_ss_flags(sp))
- sp = current->sas_ss_sp + current->sas_ss_size;
- }
-
- return (void __user *)((sp - frame_size) & -8ul);
-}
-
-static int setup_frame32(struct ksignal *ksig, sigset_t *set,
- struct pt_regs *regs)
-{
- int sig = ksig->sig;
- sigframe32 __user *frame;
- unsigned long restorer;
- size_t frame_size;
-
- /*
- * gprs_high are always present for 31-bit compat tasks.
- * The space for vector registers is only allocated if
- * the machine supports it
- */
- frame_size = sizeof(*frame) - sizeof(frame->sregs_ext.__reserved);
- if (!cpu_has_vx())
- frame_size -= sizeof(frame->sregs_ext.vxrs_low) +
- sizeof(frame->sregs_ext.vxrs_high);
- frame = get_sigframe(&ksig->ka, regs, frame_size);
- if (frame == (void __user *) -1UL)
- return -EFAULT;
-
- /* Set up backchain. */
- if (__put_user(regs->gprs[15], (unsigned int __user *) frame))
- return -EFAULT;
-
- /* Create struct sigcontext32 on the signal stack */
- if (put_compat_sigset((compat_sigset_t __user *)frame->sc.oldmask,
- set, sizeof(compat_sigset_t)))
- return -EFAULT;
- if (__put_user(ptr_to_compat(&frame->sregs), &frame->sc.sregs))
- return -EFAULT;
-
- /* Store registers needed to create the signal frame */
- store_sigregs();
-
- /* Create _sigregs32 on the signal stack */
- if (save_sigregs32(regs, &frame->sregs))
- return -EFAULT;
-
- /* Place signal number on stack to allow backtrace from handler. */
- if (__put_user(regs->gprs[2], (int __force __user *) &frame->signo))
- return -EFAULT;
-
- /* Create _sigregs_ext32 on the signal stack */
- if (save_sigregs_ext32(regs, &frame->sregs_ext))
- return -EFAULT;
-
- /* Set up to return from userspace. If provided, use a stub
- already in userspace. */
- if (ksig->ka.sa.sa_flags & SA_RESTORER) {
- restorer = (unsigned long __force)
- ksig->ka.sa.sa_restorer | PSW32_ADDR_AMODE;
- } else {
- restorer = VDSO32_SYMBOL(current, sigreturn);
- }
-
- /* Set up registers for signal handler */
- regs->gprs[14] = restorer;
- regs->gprs[15] = (__force __u64) frame;
- /* Force 31 bit amode and default user address space control. */
- regs->psw.mask = PSW_MASK_BA |
- (PSW_USER_BITS & PSW_MASK_ASC) |
- (regs->psw.mask & ~PSW_MASK_ASC);
- regs->psw.addr = (__force __u64) ksig->ka.sa.sa_handler;
-
- regs->gprs[2] = sig;
- regs->gprs[3] = (__force __u64) &frame->sc;
-
- /* We forgot to include these in the sigcontext.
- To avoid breaking binary compatibility, they are passed as args. */
- if (sig == SIGSEGV || sig == SIGBUS || sig == SIGILL ||
- sig == SIGTRAP || sig == SIGFPE) {
- /* set extra registers only for synchronous signals */
- regs->gprs[4] = regs->int_code & 127;
- regs->gprs[5] = regs->int_parm_long;
- regs->gprs[6] = current->thread.last_break;
- }
-
- return 0;
-}
-
-static int setup_rt_frame32(struct ksignal *ksig, sigset_t *set,
- struct pt_regs *regs)
-{
- rt_sigframe32 __user *frame;
- unsigned long restorer;
- size_t frame_size;
- u32 uc_flags;
-
- frame_size = sizeof(*frame) -
- sizeof(frame->uc.uc_mcontext_ext.__reserved);
- /*
- * gprs_high are always present for 31-bit compat tasks.
- * The space for vector registers is only allocated if
- * the machine supports it
- */
- uc_flags = UC_GPRS_HIGH;
- if (cpu_has_vx()) {
- uc_flags |= UC_VXRS;
- } else {
- frame_size -= sizeof(frame->uc.uc_mcontext_ext.vxrs_low) +
- sizeof(frame->uc.uc_mcontext_ext.vxrs_high);
- }
- frame = get_sigframe(&ksig->ka, regs, frame_size);
- if (frame == (void __user *) -1UL)
- return -EFAULT;
-
- /* Set up backchain. */
- if (__put_user(regs->gprs[15], (unsigned int __force __user *) frame))
- return -EFAULT;
-
- /* Set up to return from userspace. If provided, use a stub
- already in userspace. */
- if (ksig->ka.sa.sa_flags & SA_RESTORER) {
- restorer = (unsigned long __force)
- ksig->ka.sa.sa_restorer | PSW32_ADDR_AMODE;
- } else {
- restorer = VDSO32_SYMBOL(current, rt_sigreturn);
- }
-
- /* Create siginfo on the signal stack */
- if (copy_siginfo_to_user32(&frame->info, &ksig->info))
- return -EFAULT;
-
- /* Store registers needed to create the signal frame */
- store_sigregs();
-
- /* Create ucontext on the signal stack. */
- if (__put_user(uc_flags, &frame->uc.uc_flags) ||
- __put_user(0, &frame->uc.uc_link) ||
- __compat_save_altstack(&frame->uc.uc_stack, regs->gprs[15]) ||
- save_sigregs32(regs, &frame->uc.uc_mcontext) ||
- put_compat_sigset(&frame->uc.uc_sigmask, set, sizeof(compat_sigset_t)) ||
- save_sigregs_ext32(regs, &frame->uc.uc_mcontext_ext))
- return -EFAULT;
-
- /* Set up registers for signal handler */
- regs->gprs[14] = restorer;
- regs->gprs[15] = (__force __u64) frame;
- /* Force 31 bit amode and default user address space control. */
- regs->psw.mask = PSW_MASK_BA |
- (PSW_USER_BITS & PSW_MASK_ASC) |
- (regs->psw.mask & ~PSW_MASK_ASC);
- regs->psw.addr = (__u64 __force) ksig->ka.sa.sa_handler;
-
- regs->gprs[2] = ksig->sig;
- regs->gprs[3] = (__force __u64) &frame->info;
- regs->gprs[4] = (__force __u64) &frame->uc;
- regs->gprs[5] = current->thread.last_break;
- return 0;
-}
-
-/*
- * OK, we're invoking a handler
- */
-
-void handle_signal32(struct ksignal *ksig, sigset_t *oldset,
- struct pt_regs *regs)
-{
- int ret;
-
- /* Set up the stack frame */
- if (ksig->ka.sa.sa_flags & SA_SIGINFO)
- ret = setup_rt_frame32(ksig, oldset, regs);
- else
- ret = setup_frame32(ksig, oldset, regs);
-
- signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLE_STEP));
-}
-
diff --git a/arch/s390/kernel/cpacf.c b/arch/s390/kernel/cpacf.c
index 3bebc47beeab..9d85b4bc7036 100644
--- a/arch/s390/kernel/cpacf.c
+++ b/arch/s390/kernel/cpacf.c
@@ -3,8 +3,7 @@
* Copyright IBM Corp. 2024
*/
-#define KMSG_COMPONENT "cpacf"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#define pr_fmt(fmt) "cpacf: " fmt
#include <linux/cpu.h>
#include <linux/device.h>
diff --git a/arch/s390/kernel/cpcmd.c b/arch/s390/kernel/cpcmd.c
index 2f4174b961de..ab611764642a 100644
--- a/arch/s390/kernel/cpcmd.c
+++ b/arch/s390/kernel/cpcmd.c
@@ -6,8 +6,7 @@
* Christian Borntraeger (cborntra@de.ibm.com),
*/
-#define KMSG_COMPONENT "cpcmd"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#define pr_fmt(fmt) "cpcmd: " fmt
#include <linux/kernel.h>
#include <linux/export.h>
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index 6a26f202441d..71cdb6845dd7 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -10,8 +10,7 @@
* Bugreports to: <Linux390@de.ibm.com>
*/
-#define KMSG_COMPONENT "s390dbf"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#define pr_fmt(fmt) "s390dbf: " fmt
#include <linux/stddef.h>
#include <linux/kernel.h>
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index 63a1d4226ff8..1cec93895b3a 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -503,24 +503,27 @@ static int copy_from_regs(struct pt_regs *regs, void *dst, void *src, int len)
void show_code(struct pt_regs *regs)
{
char *mode = user_mode(regs) ? "User" : "Krnl";
+ unsigned long addr, pswaddr;
unsigned char code[64];
char buffer[128], *ptr;
- unsigned long addr;
int start, end, opsize, hops, i;
+ pswaddr = regs->psw.addr;
+ if (test_pt_regs_flag(regs, PIF_PSW_ADDR_ADJUSTED))
+ pswaddr = __forward_psw(regs->psw, regs->int_code >> 16);
/* Get a snapshot of the 64 bytes surrounding the fault address. */
- for (start = 32; start && regs->psw.addr >= 34 - start; start -= 2) {
- addr = regs->psw.addr - 34 + start;
+ for (start = 32; start && pswaddr >= 34 - start; start -= 2) {
+ addr = pswaddr - 34 + start;
if (copy_from_regs(regs, code + start - 2, (void *)addr, 2))
break;
}
for (end = 32; end < 64; end += 2) {
- addr = regs->psw.addr + end - 32;
+ addr = pswaddr + end - 32;
if (copy_from_regs(regs, code + end, (void *)addr, 2))
break;
}
/* Code snapshot usable ? */
- if ((regs->psw.addr & 1) || start >= end) {
+ if ((pswaddr & 1) || start >= end) {
printk("%s Code: Bad PSW.\n", mode);
return;
}
@@ -543,12 +546,12 @@ void show_code(struct pt_regs *regs)
while (start < end && hops < 8) {
opsize = insn_length(code[start]);
if (start + opsize == 32)
- *ptr++ = '#';
+ *ptr++ = '*';
else if (start == 32)
*ptr++ = '>';
else
*ptr++ = ' ';
- addr = regs->psw.addr + start - 32;
+ addr = pswaddr + start - 32;
ptr += sprintf(ptr, "%px: ", (void *)addr);
if (start + opsize >= end)
break;
diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c
index dd410962ecbe..f9d52e05e01e 100644
--- a/arch/s390/kernel/dumpstack.c
+++ b/arch/s390/kernel/dumpstack.c
@@ -155,12 +155,16 @@ static void show_last_breaking_event(struct pt_regs *regs)
void show_registers(struct pt_regs *regs)
{
struct psw_bits *psw = &psw_bits(regs->psw);
+ unsigned long pswaddr;
char *mode;
+ pswaddr = regs->psw.addr;
+ if (test_pt_regs_flag(regs, PIF_PSW_ADDR_ADJUSTED))
+ pswaddr = __forward_psw(regs->psw, regs->int_code >> 16);
mode = user_mode(regs) ? "User" : "Krnl";
- printk("%s PSW : %px %px", mode, (void *)regs->psw.mask, (void *)regs->psw.addr);
+ printk("%s PSW : %px %px", mode, (void *)regs->psw.mask, (void *)pswaddr);
if (!user_mode(regs))
- pr_cont(" (%pSR)", (void *)regs->psw.addr);
+ pr_cont(" (%pSR)", (void *)pswaddr);
pr_cont("\n");
printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
"P:%x AS:%x CC:%x PM:%x", psw->per, psw->dat, psw->io, psw->ext,
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 544e5403dd91..b27239c03d79 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -4,8 +4,7 @@
* Author(s): Hongjie Yang <hongjie@us.ibm.com>,
*/
-#define KMSG_COMPONENT "setup"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#define pr_fmt(fmt) "setup: " fmt
#include <linux/sched/debug.h>
#include <linux/cpufeature.h>
@@ -120,21 +119,21 @@ static noinline __init void setup_arch_string(void)
EBCASC(mach->type, sizeof(mach->type));
EBCASC(mach->model, sizeof(mach->model));
EBCASC(mach->model_capacity, sizeof(mach->model_capacity));
- sprintf(mstr, "%-16.16s %-4.4s %-16.16s %-16.16s",
- mach->manufacturer, mach->type,
- mach->model, mach->model_capacity);
+ scnprintf(mstr, sizeof(mstr), "%-16.16s %-4.4s %-16.16s %-16.16s",
+ mach->manufacturer, mach->type,
+ mach->model, mach->model_capacity);
strim_all(mstr);
if (stsi(vm, 3, 2, 2) == 0 && vm->count) {
EBCASC(vm->vm[0].cpi, sizeof(vm->vm[0].cpi));
- sprintf(hvstr, "%-16.16s", vm->vm[0].cpi);
+ scnprintf(hvstr, sizeof(hvstr), "%-16.16s", vm->vm[0].cpi);
strim_all(hvstr);
} else {
- sprintf(hvstr, "%s",
- machine_is_lpar() ? "LPAR" :
- machine_is_vm() ? "z/VM" :
- machine_is_kvm() ? "KVM" : "unknown");
+ scnprintf(hvstr, sizeof(hvstr), "%s",
+ machine_is_lpar() ? "LPAR" :
+ machine_is_vm() ? "z/VM" :
+ machine_is_kvm() ? "KVM" : "unknown");
}
- sprintf(arch_hw_string, "HW: %s (%s)", mstr, hvstr);
+ scnprintf(arch_hw_string, sizeof(arch_hw_string), "HW: %s (%s)", mstr, hvstr);
dump_stack_set_arch_desc("%s (%s)", mstr, hvstr);
}
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 75b0fbb236d0..c360087807d8 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -162,9 +162,13 @@ SYM_FUNC_START(__switch_to_asm)
stg %r3,__LC_CURRENT(%r13) # store task struct of next
stg %r15,__LC_KERNEL_STACK(%r13) # store end of kernel stack
lg %r15,__THREAD_ksp(%r1,%r3) # load kernel stack of next
- aghi %r3,__TASK_pid
- mvc __LC_CURRENT_PID(4,%r13),0(%r3) # store pid of next
+ lay %r4,__TASK_pid(%r3)
+ mvc __LC_CURRENT_PID(4,%r13),0(%r4) # store pid of next
ALTERNATIVE "nop", "lpp _LPP_OFFSET(%r13)", ALT_FACILITY(40)
+#ifdef CONFIG_STACKPROTECTOR
+ lg %r3,__TASK_stack_canary(%r3)
+ stg %r3,__LC_STACK_CANARY(%r13)
+#endif
lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
BR_EX %r14
SYM_FUNC_END(__switch_to_asm)
@@ -606,20 +610,3 @@ SYM_DATA_START_LOCAL(daton_psw)
.quad PSW_KERNEL_BITS
.quad .Ldaton
SYM_DATA_END(daton_psw)
-
- .section .rodata, "a"
- .balign 8
-#define SYSCALL(esame,emu) .quad __s390x_ ## esame
-SYM_DATA_START(sys_call_table)
-#include <asm/syscall_table.h>
-SYM_DATA_END(sys_call_table)
-#undef SYSCALL
-
-#ifdef CONFIG_COMPAT
-
-#define SYSCALL(esame,emu) .quad __s390_ ## emu
-SYM_DATA_START(sys_call_table_emu)
-#include <asm/syscall_table.h>
-SYM_DATA_END(sys_call_table_emu)
-#undef SYSCALL
-#endif
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head.S
index 7edb9ded199c..7edb9ded199c 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head.S
diff --git a/arch/s390/kernel/hiperdispatch.c b/arch/s390/kernel/hiperdispatch.c
index 2507bc3f7757..217206522266 100644
--- a/arch/s390/kernel/hiperdispatch.c
+++ b/arch/s390/kernel/hiperdispatch.c
@@ -3,8 +3,7 @@
* Copyright IBM Corp. 2024
*/
-#define KMSG_COMPONENT "hd"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#define pr_fmt(fmt) "hd: " fmt
/*
* Hiperdispatch:
@@ -65,7 +64,7 @@
#define HD_DELAY_FACTOR (4)
#define HD_DELAY_INTERVAL (HZ / 4)
-#define HD_STEAL_THRESHOLD 30
+#define HD_STEAL_THRESHOLD 10
#define HD_STEAL_AVG_WEIGHT 16
static cpumask_t hd_vl_coremask; /* Mask containing all vertical low COREs */
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index 91e207b50394..9d1f8a50f5a4 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -22,12 +22,14 @@
#include <linux/bug.h>
#include <linux/memory.h>
#include <linux/execmem.h>
+#include <asm/arch-stackprotector.h>
#include <asm/alternative.h>
#include <asm/nospec-branch.h>
#include <asm/facility.h>
#include <asm/ftrace.lds.h>
#include <asm/set_memory.h>
#include <asm/setup.h>
+#include <asm/asm-offsets.h>
#if 0
#define DEBUGP printk
@@ -495,9 +497,7 @@ int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *s;
char *secstrings, *secname;
void *aseg;
-#ifdef CONFIG_FUNCTION_TRACER
- int ret;
-#endif
+ int rc = 0;
if (IS_ENABLED(CONFIG_EXPOLINE) &&
!nospec_disable && me->arch.plt_size) {
@@ -527,14 +527,21 @@ int module_finalize(const Elf_Ehdr *hdr,
(str_has_prefix(secname, ".s390_return")))
nospec_revert(aseg, aseg + s->sh_size);
+ if (IS_ENABLED(CONFIG_STACKPROTECTOR) &&
+ (str_has_prefix(secname, "__stack_protector_loc"))) {
+ rc = stack_protector_apply(aseg, aseg + s->sh_size);
+ if (rc)
+ break;
+ }
+
#ifdef CONFIG_FUNCTION_TRACER
if (!strcmp(FTRACE_CALLSITE_SECTION, secname)) {
- ret = module_alloc_ftrace_hotpatch_trampolines(me, s);
- if (ret < 0)
- return ret;
+ rc = module_alloc_ftrace_hotpatch_trampolines(me, s);
+ if (rc)
+ break;
}
#endif /* CONFIG_FUNCTION_TRACER */
}
- return 0;
+ return rc;
}
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index 11f33243a23f..a55abbf65333 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -184,7 +184,7 @@ static notrace void nmi_print_info(void)
sclp_emergency_printk(message);
}
-static notrace void s390_handle_damage(void)
+static notrace void __noreturn s390_handle_damage(void)
{
struct lowcore *lc = get_lowcore();
union ctlreg0 cr0, cr0_new;
@@ -214,7 +214,6 @@ static notrace void s390_handle_damage(void)
lc->mcck_new_psw = psw_save;
local_ctl_load(0, &cr0.reg);
disabled_wait();
- while (1);
}
NOKPROBE_SYMBOL(s390_handle_damage);
diff --git a/arch/s390/kernel/os_info.c b/arch/s390/kernel/os_info.c
index c2a468986212..94fa44776d0c 100644
--- a/arch/s390/kernel/os_info.c
+++ b/arch/s390/kernel/os_info.c
@@ -6,8 +6,7 @@
* Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com>
*/
-#define KMSG_COMPONENT "os_info"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#define pr_fmt(fmt) "os_info: " fmt
#include <linux/crash_dump.h>
#include <linux/kernel.h>
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index 04457d88e589..408ab93112bf 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -6,8 +6,7 @@
* Author(s): Hendrik Brueckner <brueckner@linux.ibm.com>
* Thomas Richter <tmricht@linux.ibm.com>
*/
-#define KMSG_COMPONENT "cpum_cf"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#define pr_fmt(fmt) "cpum_cf: " fmt
#include <linux/kernel.h>
#include <linux/kernel_stat.h>
@@ -1206,7 +1205,7 @@ static int __init cpumf_pmu_init(void)
}
/* Setup s390dbf facility */
- cf_dbg = debug_register(KMSG_COMPONENT, 2, 1, 128);
+ cf_dbg = debug_register("cpum_cf", 2, 1, 128);
if (!cf_dbg) {
pr_err("Registration of s390dbf(cpum_cf) failed\n");
rc = -ENOMEM;
@@ -1689,7 +1688,6 @@ static const struct file_operations cfset_fops = {
.open = cfset_open,
.release = cfset_release,
.unlocked_ioctl = cfset_ioctl,
- .compat_ioctl = cfset_ioctl,
};
static struct miscdevice cfset_dev = {
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index f432869f8921..459af23a47a5 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -5,8 +5,7 @@
* Copyright IBM Corp. 2013, 2018
* Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
*/
-#define KMSG_COMPONENT "cpum_sf"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#define pr_fmt(fmt) "cpum_sf: " fmt
#include <linux/kernel.h>
#include <linux/kernel_stat.h>
@@ -1093,7 +1092,7 @@ static void perf_event_count_update(struct perf_event *event, u64 count)
* combined-sampling data entry consists of a basic- and a diagnostic-sampling
* data entry. The sampling function is determined by the flags in the perf
* event hardware structure. The function always works with a combined-sampling
- * data entry but ignores the the diagnostic portion if it is not available.
+ * data entry but ignores the diagnostic portion if it is not available.
*
* Note that the implementation focuses on basic-sampling data entries and, if
* such an entry is not valid, the entire combined-sampling data entry is
@@ -2070,7 +2069,7 @@ static int __init init_cpum_sampling_pmu(void)
CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC_DIAG);
}
- sfdbg = debug_register(KMSG_COMPONENT, 2, 1, 80);
+ sfdbg = debug_register("cpum_sf", 2, 1, 80);
if (!sfdbg) {
pr_err("Registering for s390dbf failed\n");
return -ENOMEM;
diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c
index 91b8716c883a..606750bae508 100644
--- a/arch/s390/kernel/perf_event.c
+++ b/arch/s390/kernel/perf_event.c
@@ -5,8 +5,7 @@
* Copyright IBM Corp. 2012, 2013
* Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
*/
-#define KMSG_COMPONENT "perf"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#define pr_fmt(fmt) "perf: " fmt
#include <linux/kernel.h>
#include <linux/perf_event.h>
@@ -15,7 +14,6 @@
#include <linux/seq_file.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
-#include <linux/compat.h>
#include <linux/sysfs.h>
#include <asm/stacktrace.h>
#include <asm/irq.h>
diff --git a/arch/s390/kernel/perf_pai.c b/arch/s390/kernel/perf_pai.c
new file mode 100644
index 000000000000..810f5b6c5e01
--- /dev/null
+++ b/arch/s390/kernel/perf_pai.c
@@ -0,0 +1,1230 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Performance event support - Processor Activity Instrumentation Facility
+ *
+ * Copyright IBM Corp. 2026
+ * Author(s): Thomas Richter <tmricht@linux.ibm.com>
+ */
+#define pr_fmt(fmt) "pai: " fmt
+
+#include <linux/kernel.h>
+#include <linux/kernel_stat.h>
+#include <linux/percpu.h>
+#include <linux/notifier.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/perf_event.h>
+#include <asm/ctlreg.h>
+#include <asm/pai.h>
+#include <asm/debug.h>
+
+static debug_info_t *paidbg;
+
+DEFINE_STATIC_KEY_FALSE(pai_key);
+
+enum {
+ PAI_PMU_CRYPTO, /* Index of PMU pai_crypto */
+ PAI_PMU_EXT, /* Index of PMU pai_ext */
+ PAI_PMU_MAX /* # of PAI PMUs */
+};
+
+enum {
+ PAIE1_CB_SZ = 0x200, /* Size of PAIE1 control block */
+ PAIE1_CTRBLOCK_SZ = 0x400 /* Size of PAIE1 counter blocks */
+};
+
+struct pai_userdata {
+ u16 num;
+ u64 value;
+} __packed;
+
+/* Create the PAI extension 1 control block area.
+ * The PAI extension control block 1 is pointed to by lowcore
+ * address 0x1508 for each CPU. This control block is 512 bytes in size
+ * and requires a 512 byte boundary alignment.
+ */
+struct paiext_cb { /* PAI extension 1 control block */
+ u64 header; /* Not used */
+ u64 reserved1;
+ u64 acc; /* Addr to analytics counter control block */
+ u8 reserved2[PAIE1_CTRBLOCK_SZ - 3 * sizeof(u64)];
+} __packed;
+
+struct pai_map {
+ unsigned long *area; /* Area for CPU to store counters */
+ struct pai_userdata *save; /* Page to store no-zero counters */
+ unsigned int active_events; /* # of PAI crypto users */
+ refcount_t refcnt; /* Reference count mapped buffers */
+ struct perf_event *event; /* Perf event for sampling */
+ struct list_head syswide_list; /* List system-wide sampling events */
+ struct paiext_cb *paiext_cb; /* PAI extension control block area */
+ bool fullpage; /* True: counter area is a full page */
+};
+
+struct pai_mapptr {
+ struct pai_map *mapptr;
+};
+
+static struct pai_root { /* Anchor to per CPU data */
+ refcount_t refcnt; /* Overall active events */
+ struct pai_mapptr __percpu *mapptr;
+} pai_root[PAI_PMU_MAX];
+
+/* This table defines the different parameters of the PAI PMUs. During
+ * initialization the machine dependent values are extracted and saved.
+ * However most of the values are static and do not change.
+ * There is one table entry per PAI PMU.
+ */
+struct pai_pmu { /* Define PAI PMU characteristics */
+ const char *pmuname; /* Name of PMU */
+ const int facility_nr; /* Facility number to check for support */
+ unsigned int num_avail; /* # Counters defined by hardware */
+ unsigned int num_named; /* # Counters known by name */
+ unsigned long base; /* Counter set base number */
+ unsigned long kernel_offset; /* Offset to kernel part in counter page */
+ unsigned long area_size; /* Size of counter area */
+ const char * const *names; /* List of counter names */
+ struct pmu *pmu; /* Ptr to supporting PMU */
+ int (*init)(struct pai_pmu *p); /* PMU support init function */
+ void (*exit)(struct pai_pmu *p); /* PMU support exit function */
+ struct attribute_group *event_group; /* Ptr to attribute of events */
+};
+
+static struct pai_pmu pai_pmu[]; /* Forward declaration */
+
+/* Free per CPU data when the last event is removed. */
+static void pai_root_free(int idx)
+{
+ if (refcount_dec_and_test(&pai_root[idx].refcnt)) {
+ free_percpu(pai_root[idx].mapptr);
+ pai_root[idx].mapptr = NULL;
+ }
+ debug_sprintf_event(paidbg, 5, "%s root[%d].refcount %d\n", __func__,
+ idx, refcount_read(&pai_root[idx].refcnt));
+}
+
+/*
+ * On initialization of first event also allocate per CPU data dynamically.
+ * Start with an array of pointers, the array size is the maximum number of
+ * CPUs possible, which might be larger than the number of CPUs currently
+ * online.
+ */
+static int pai_root_alloc(int idx)
+{
+ if (!refcount_inc_not_zero(&pai_root[idx].refcnt)) {
+ /* The memory is already zeroed. */
+ pai_root[idx].mapptr = alloc_percpu(struct pai_mapptr);
+ if (!pai_root[idx].mapptr)
+ return -ENOMEM;
+ refcount_set(&pai_root[idx].refcnt, 1);
+ }
+ return 0;
+}
+
+/* Release the PMU if event is the last perf event */
+static DEFINE_MUTEX(pai_reserve_mutex);
+
+/* Free all memory allocated for event counting/sampling setup */
+static void pai_free(struct pai_mapptr *mp)
+{
+ if (mp->mapptr->fullpage)
+ free_page((unsigned long)mp->mapptr->area);
+ else
+ kfree(mp->mapptr->area);
+ kfree(mp->mapptr->paiext_cb);
+ kvfree(mp->mapptr->save);
+ kfree(mp->mapptr);
+ mp->mapptr = NULL;
+}
+
+/* Adjust usage counters and remove allocated memory when all users are
+ * gone.
+ */
+static void pai_event_destroy_cpu(struct perf_event *event, int cpu)
+{
+ int idx = PAI_PMU_IDX(event);
+ struct pai_mapptr *mp = per_cpu_ptr(pai_root[idx].mapptr, cpu);
+ struct pai_map *cpump = mp->mapptr;
+
+ mutex_lock(&pai_reserve_mutex);
+ debug_sprintf_event(paidbg, 5, "%s event %#llx idx %d cpu %d users %d "
+ "refcnt %u\n", __func__, event->attr.config, idx,
+ event->cpu, cpump->active_events,
+ refcount_read(&cpump->refcnt));
+ if (refcount_dec_and_test(&cpump->refcnt))
+ pai_free(mp);
+ pai_root_free(idx);
+ mutex_unlock(&pai_reserve_mutex);
+}
+
+static void pai_event_destroy(struct perf_event *event)
+{
+ int cpu;
+
+ free_page(PAI_SAVE_AREA(event));
+ if (event->cpu == -1) {
+ struct cpumask *mask = PAI_CPU_MASK(event);
+
+ for_each_cpu(cpu, mask)
+ pai_event_destroy_cpu(event, cpu);
+ kfree(mask);
+ } else {
+ pai_event_destroy_cpu(event, event->cpu);
+ }
+}
+
+static void paicrypt_event_destroy(struct perf_event *event)
+{
+ static_branch_dec(&pai_key);
+ pai_event_destroy(event);
+}
+
+static u64 pai_getctr(unsigned long *page, int nr, unsigned long offset)
+{
+ if (offset)
+ nr += offset / sizeof(*page);
+ return page[nr];
+}
+
+/* Read the counter values. Return value from location in CMP. For base
+ * event xxx_ALL sum up all events. Returns counter value.
+ */
+static u64 pai_getdata(struct perf_event *event, bool kernel)
+{
+ int idx = PAI_PMU_IDX(event);
+ struct pai_mapptr *mp = this_cpu_ptr(pai_root[idx].mapptr);
+ struct pai_pmu *pp = &pai_pmu[idx];
+ struct pai_map *cpump = mp->mapptr;
+ unsigned int i;
+ u64 sum = 0;
+
+ if (event->attr.config != pp->base) {
+ return pai_getctr(cpump->area,
+ event->attr.config - pp->base,
+ kernel ? pp->kernel_offset : 0);
+ }
+
+ for (i = 1; i <= pp->num_avail; i++) {
+ u64 val = pai_getctr(cpump->area, i,
+ kernel ? pp->kernel_offset : 0);
+
+ if (!val)
+ continue;
+ sum += val;
+ }
+ return sum;
+}
+
+static u64 paicrypt_getall(struct perf_event *event)
+{
+ u64 sum = 0;
+
+ if (!event->attr.exclude_kernel)
+ sum += pai_getdata(event, true);
+ if (!event->attr.exclude_user)
+ sum += pai_getdata(event, false);
+
+ return sum;
+}
+
+/* Check concurrent access of counting and sampling for crypto events.
+ * This function is called in process context and it is save to block.
+ * When the event initialization functions fails, no other call back will
+ * be invoked.
+ *
+ * Allocate the memory for the event.
+ */
+static int pai_alloc_cpu(struct perf_event *event, int cpu)
+{
+ int rc, idx = PAI_PMU_IDX(event);
+ struct pai_map *cpump = NULL;
+ bool need_paiext_cb = false;
+ struct pai_mapptr *mp;
+
+ mutex_lock(&pai_reserve_mutex);
+ /* Allocate root node */
+ rc = pai_root_alloc(idx);
+ if (rc)
+ goto unlock;
+
+ /* Allocate node for this event */
+ mp = per_cpu_ptr(pai_root[idx].mapptr, cpu);
+ cpump = mp->mapptr;
+ if (!cpump) { /* Paicrypt_map allocated? */
+ rc = -ENOMEM;
+ cpump = kzalloc(sizeof(*cpump), GFP_KERNEL);
+ if (!cpump)
+ goto undo;
+ /* Allocate memory for counter page and counter extraction.
+ * Only the first counting event has to allocate a page.
+ */
+ mp->mapptr = cpump;
+ if (idx == PAI_PMU_CRYPTO) {
+ cpump->area = (unsigned long *)get_zeroed_page(GFP_KERNEL);
+ /* free_page() can handle 0x0 address */
+ cpump->fullpage = true;
+ } else { /* PAI_PMU_EXT */
+ /*
+ * Allocate memory for counter area and counter extraction.
+ * These are
+ * - a 512 byte block and requires 512 byte boundary
+ * alignment.
+ * - a 1KB byte block and requires 1KB boundary
+ * alignment.
+ * Only the first counting event has to allocate the area.
+ *
+ * Note: This works with commit 59bb47985c1d by default.
+ * Backporting this to kernels without this commit might
+ * needs adjustment.
+ */
+ cpump->area = kzalloc(pai_pmu[idx].area_size, GFP_KERNEL);
+ cpump->paiext_cb = kzalloc(PAIE1_CB_SZ, GFP_KERNEL);
+ need_paiext_cb = true;
+ }
+ cpump->save = kvmalloc_array(pai_pmu[idx].num_avail + 1,
+ sizeof(struct pai_userdata),
+ GFP_KERNEL);
+ if (!cpump->area || !cpump->save ||
+ (need_paiext_cb && !cpump->paiext_cb)) {
+ pai_free(mp);
+ goto undo;
+ }
+ INIT_LIST_HEAD(&cpump->syswide_list);
+ refcount_set(&cpump->refcnt, 1);
+ rc = 0;
+ } else {
+ refcount_inc(&cpump->refcnt);
+ }
+
+undo:
+ if (rc) {
+ /* Error in allocation of event, decrement anchor. Since
+ * the event in not created, its destroy() function is never
+ * invoked. Adjust the reference counter for the anchor.
+ */
+ pai_root_free(idx);
+ }
+unlock:
+ mutex_unlock(&pai_reserve_mutex);
+ /* If rc is non-zero, no increment of counter/sampler was done. */
+ return rc;
+}
+
+static int pai_alloc(struct perf_event *event)
+{
+ struct cpumask *maskptr;
+ int cpu, rc = -ENOMEM;
+
+ maskptr = kzalloc(sizeof(*maskptr), GFP_KERNEL);
+ if (!maskptr)
+ goto out;
+
+ for_each_online_cpu(cpu) {
+ rc = pai_alloc_cpu(event, cpu);
+ if (rc) {
+ for_each_cpu(cpu, maskptr)
+ pai_event_destroy_cpu(event, cpu);
+ kfree(maskptr);
+ goto out;
+ }
+ cpumask_set_cpu(cpu, maskptr);
+ }
+
+ /*
+ * On error all cpumask are freed and all events have been destroyed.
+ * Save of which CPUs data structures have been allocated for.
+ * Release them in pai_event_destroy call back function
+ * for this event.
+ */
+ PAI_CPU_MASK(event) = maskptr;
+ rc = 0;
+out:
+ return rc;
+}
+
+/* Validate event number and return error if event is not supported.
+ * On successful return, PAI_PMU_IDX(event) is set to the index of
+ * the supporting paing_support[] array element.
+ */
+static int pai_event_valid(struct perf_event *event, int idx)
+{
+ struct perf_event_attr *a = &event->attr;
+ struct pai_pmu *pp = &pai_pmu[idx];
+
+ /* PAI crypto PMU registered as PERF_TYPE_RAW, check event type */
+ if (a->type != PERF_TYPE_RAW && event->pmu->type != a->type)
+ return -ENOENT;
+ /* Allow only CRYPTO_ALL/NNPA_ALL for sampling */
+ if (a->sample_period && a->config != pp->base)
+ return -EINVAL;
+ /* PAI crypto event must be in valid range, try others if not */
+ if (a->config < pp->base || a->config > pp->base + pp->num_avail)
+ return -ENOENT;
+ if (idx == PAI_PMU_EXT && a->exclude_user)
+ return -EINVAL;
+ PAI_PMU_IDX(event) = idx;
+ return 0;
+}
+
+/* Might be called on different CPU than the one the event is intended for. */
+static int pai_event_init(struct perf_event *event, int idx)
+{
+ struct perf_event_attr *a = &event->attr;
+ int rc;
+
+ /* PAI event must be valid and in supported range */
+ rc = pai_event_valid(event, idx);
+ if (rc)
+ goto out;
+ /* Get a page to store last counter values for sampling */
+ if (a->sample_period) {
+ PAI_SAVE_AREA(event) = get_zeroed_page(GFP_KERNEL);
+ if (!PAI_SAVE_AREA(event)) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ }
+
+ if (event->cpu >= 0)
+ rc = pai_alloc_cpu(event, event->cpu);
+ else
+ rc = pai_alloc(event);
+ if (rc) {
+ free_page(PAI_SAVE_AREA(event));
+ goto out;
+ }
+
+ if (a->sample_period) {
+ a->sample_period = 1;
+ a->freq = 0;
+ /* Register for paicrypt_sched_task() to be called */
+ event->attach_state |= PERF_ATTACH_SCHED_CB;
+ /* Add raw data which contain the memory mapped counters */
+ a->sample_type |= PERF_SAMPLE_RAW;
+ /* Turn off inheritance */
+ a->inherit = 0;
+ }
+out:
+ return rc;
+}
+
+static int paicrypt_event_init(struct perf_event *event)
+{
+ int rc = pai_event_init(event, PAI_PMU_CRYPTO);
+
+ if (!rc) {
+ event->destroy = paicrypt_event_destroy;
+ static_branch_inc(&pai_key);
+ }
+ return rc;
+}
+
+static void pai_read(struct perf_event *event,
+ u64 (*fct)(struct perf_event *event))
+{
+ u64 prev, new, delta;
+
+ prev = local64_read(&event->hw.prev_count);
+ new = fct(event);
+ local64_set(&event->hw.prev_count, new);
+ delta = (prev <= new) ? new - prev : (-1ULL - prev) + new + 1;
+ local64_add(delta, &event->count);
+}
+
+static void paicrypt_read(struct perf_event *event)
+{
+ pai_read(event, paicrypt_getall);
+}
+
+static void pai_start(struct perf_event *event, int flags,
+ u64 (*fct)(struct perf_event *event))
+{
+ int idx = PAI_PMU_IDX(event);
+ struct pai_pmu *pp = &pai_pmu[idx];
+ struct pai_mapptr *mp = this_cpu_ptr(pai_root[idx].mapptr);
+ struct pai_map *cpump = mp->mapptr;
+ u64 sum;
+
+ if (!event->attr.sample_period) { /* Counting */
+ sum = fct(event); /* Get current value */
+ local64_set(&event->hw.prev_count, sum);
+ } else { /* Sampling */
+ memcpy((void *)PAI_SAVE_AREA(event), cpump->area, pp->area_size);
+ /* Enable context switch callback for system-wide sampling */
+ if (!(event->attach_state & PERF_ATTACH_TASK)) {
+ list_add_tail(PAI_SWLIST(event), &cpump->syswide_list);
+ perf_sched_cb_inc(event->pmu);
+ } else {
+ cpump->event = event;
+ }
+ }
+}
+
+static void paicrypt_start(struct perf_event *event, int flags)
+{
+ pai_start(event, flags, paicrypt_getall);
+}
+
+static int pai_add(struct perf_event *event, int flags)
+{
+ int idx = PAI_PMU_IDX(event);
+ struct pai_mapptr *mp = this_cpu_ptr(pai_root[idx].mapptr);
+ struct pai_map *cpump = mp->mapptr;
+ struct paiext_cb *pcb = cpump->paiext_cb;
+ unsigned long ccd;
+
+ if (++cpump->active_events == 1) {
+ if (!pcb) { /* PAI crypto */
+ ccd = virt_to_phys(cpump->area) | PAI_CRYPTO_KERNEL_OFFSET;
+ WRITE_ONCE(get_lowcore()->ccd, ccd);
+ local_ctl_set_bit(0, CR0_CRYPTOGRAPHY_COUNTER_BIT);
+ } else { /* PAI extension 1 */
+ ccd = virt_to_phys(pcb);
+ WRITE_ONCE(get_lowcore()->aicd, ccd);
+ pcb->acc = virt_to_phys(cpump->area) | 0x1;
+ /* Enable CPU instruction lookup for PAIE1 control block */
+ local_ctl_set_bit(0, CR0_PAI_EXTENSION_BIT);
+ }
+ }
+ if (flags & PERF_EF_START)
+ pai_pmu[idx].pmu->start(event, PERF_EF_RELOAD);
+ event->hw.state = 0;
+ return 0;
+}
+
+static int paicrypt_add(struct perf_event *event, int flags)
+{
+ return pai_add(event, flags);
+}
+
+static void pai_have_sample(struct perf_event *, struct pai_map *);
+static void pai_stop(struct perf_event *event, int flags)
+{
+ int idx = PAI_PMU_IDX(event);
+ struct pai_mapptr *mp = this_cpu_ptr(pai_root[idx].mapptr);
+ struct pai_map *cpump = mp->mapptr;
+
+ if (!event->attr.sample_period) { /* Counting */
+ pai_pmu[idx].pmu->read(event);
+ } else { /* Sampling */
+ if (!(event->attach_state & PERF_ATTACH_TASK)) {
+ perf_sched_cb_dec(event->pmu);
+ list_del(PAI_SWLIST(event));
+ } else {
+ pai_have_sample(event, cpump);
+ cpump->event = NULL;
+ }
+ }
+ event->hw.state = PERF_HES_STOPPED;
+}
+
+static void paicrypt_stop(struct perf_event *event, int flags)
+{
+ pai_stop(event, flags);
+}
+
+static void pai_del(struct perf_event *event, int flags)
+{
+ int idx = PAI_PMU_IDX(event);
+ struct pai_mapptr *mp = this_cpu_ptr(pai_root[idx].mapptr);
+ struct pai_map *cpump = mp->mapptr;
+ struct paiext_cb *pcb = cpump->paiext_cb;
+
+ pai_pmu[idx].pmu->stop(event, PERF_EF_UPDATE);
+ if (--cpump->active_events == 0) {
+ if (!pcb) { /* PAI crypto */
+ local_ctl_clear_bit(0, CR0_CRYPTOGRAPHY_COUNTER_BIT);
+ WRITE_ONCE(get_lowcore()->ccd, 0);
+ } else { /* PAI extension 1 */
+ /* Disable CPU instruction lookup for PAIE1 control block */
+ local_ctl_clear_bit(0, CR0_PAI_EXTENSION_BIT);
+ pcb->acc = 0;
+ WRITE_ONCE(get_lowcore()->aicd, 0);
+ }
+ }
+}
+
+static void paicrypt_del(struct perf_event *event, int flags)
+{
+ pai_del(event, flags);
+}
+
+/* Create raw data and save it in buffer. Calculate the delta for each
+ * counter between this invocation and the last invocation.
+ * Returns number of bytes copied.
+ * Saves only entries with positive counter difference of the form
+ * 2 bytes: Number of counter
+ * 8 bytes: Value of counter
+ */
+static size_t pai_copy(struct pai_userdata *userdata, unsigned long *page,
+ struct pai_pmu *pp, unsigned long *page_old,
+ bool exclude_user, bool exclude_kernel)
+{
+ int i, outidx = 0;
+
+ for (i = 1; i <= pp->num_avail; i++) {
+ u64 val = 0, val_old = 0;
+
+ if (!exclude_kernel) {
+ val += pai_getctr(page, i, pp->kernel_offset);
+ val_old += pai_getctr(page_old, i, pp->kernel_offset);
+ }
+ if (!exclude_user) {
+ val += pai_getctr(page, i, 0);
+ val_old += pai_getctr(page_old, i, 0);
+ }
+ if (val >= val_old)
+ val -= val_old;
+ else
+ val = (~0ULL - val_old) + val + 1;
+ if (val) {
+ userdata[outidx].num = i;
+ userdata[outidx].value = val;
+ outidx++;
+ }
+ }
+ return outidx * sizeof(*userdata);
+}
+
+/* Write sample when one or more counters values are nonzero.
+ *
+ * Note: The function paicrypt_sched_task() and pai_push_sample() are not
+ * invoked after function paicrypt_del() has been called because of function
+ * perf_sched_cb_dec(). Both functions are only
+ * called when sampling is active. Function perf_sched_cb_inc()
+ * has been invoked to install function paicrypt_sched_task() as call back
+ * to run at context switch time.
+ *
+ * This causes function perf_event_context_sched_out() and
+ * perf_event_context_sched_in() to check whether the PMU has installed an
+ * sched_task() callback. That callback is not active after paicrypt_del()
+ * returns and has deleted the event on that CPU.
+ */
+static int pai_push_sample(size_t rawsize, struct pai_map *cpump,
+ struct perf_event *event)
+{
+ int idx = PAI_PMU_IDX(event);
+ struct pai_pmu *pp = &pai_pmu[idx];
+ struct perf_sample_data data;
+ struct perf_raw_record raw;
+ struct pt_regs regs;
+ int overflow;
+
+ /* Setup perf sample */
+ memset(&regs, 0, sizeof(regs));
+ memset(&raw, 0, sizeof(raw));
+ memset(&data, 0, sizeof(data));
+ perf_sample_data_init(&data, 0, event->hw.last_period);
+ if (event->attr.sample_type & PERF_SAMPLE_TID) {
+ data.tid_entry.pid = task_tgid_nr(current);
+ data.tid_entry.tid = task_pid_nr(current);
+ }
+ if (event->attr.sample_type & PERF_SAMPLE_TIME)
+ data.time = event->clock();
+ if (event->attr.sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER))
+ data.id = event->id;
+ if (event->attr.sample_type & PERF_SAMPLE_CPU) {
+ data.cpu_entry.cpu = smp_processor_id();
+ data.cpu_entry.reserved = 0;
+ }
+ if (event->attr.sample_type & PERF_SAMPLE_RAW) {
+ raw.frag.size = rawsize;
+ raw.frag.data = cpump->save;
+ perf_sample_save_raw_data(&data, event, &raw);
+ }
+
+ overflow = perf_event_overflow(event, &data, &regs);
+ perf_event_update_userpage(event);
+ /* Save crypto counter lowcore page after reading event data. */
+ memcpy((void *)PAI_SAVE_AREA(event), cpump->area, pp->area_size);
+ return overflow;
+}
+
+/* Check if there is data to be saved on schedule out of a task. */
+static void pai_have_sample(struct perf_event *event, struct pai_map *cpump)
+{
+ struct pai_pmu *pp;
+ size_t rawsize;
+
+ if (!event) /* No event active */
+ return;
+ pp = &pai_pmu[PAI_PMU_IDX(event)];
+ rawsize = pai_copy(cpump->save, cpump->area, pp,
+ (unsigned long *)PAI_SAVE_AREA(event),
+ event->attr.exclude_user,
+ event->attr.exclude_kernel);
+ if (rawsize) /* No incremented counters */
+ pai_push_sample(rawsize, cpump, event);
+}
+
+/* Check if there is data to be saved on schedule out of a task. */
+static void pai_have_samples(int idx)
+{
+ struct pai_mapptr *mp = this_cpu_ptr(pai_root[idx].mapptr);
+ struct pai_map *cpump = mp->mapptr;
+ struct perf_event *event;
+
+ list_for_each_entry(event, &cpump->syswide_list, hw.tp_list)
+ pai_have_sample(event, cpump);
+}
+
+/* Called on schedule-in and schedule-out. No access to event structure,
+ * but for sampling only event CRYPTO_ALL is allowed.
+ */
+static void paicrypt_sched_task(struct perf_event_pmu_context *pmu_ctx,
+ struct task_struct *task, bool sched_in)
+{
+ /* We started with a clean page on event installation. So read out
+ * results on schedule_out and if page was dirty, save old values.
+ */
+ if (!sched_in)
+ pai_have_samples(PAI_PMU_CRYPTO);
+}
+
+/* ============================= paiext ====================================*/
+
+static void paiext_event_destroy(struct perf_event *event)
+{
+ pai_event_destroy(event);
+}
+
+/* Might be called on different CPU than the one the event is intended for. */
+static int paiext_event_init(struct perf_event *event)
+{
+ int rc = pai_event_init(event, PAI_PMU_EXT);
+
+ if (!rc) {
+ event->attr.exclude_kernel = true; /* No kernel space part */
+ event->destroy = paiext_event_destroy;
+ /* Offset of NNPA in paiext_cb */
+ event->hw.config_base = offsetof(struct paiext_cb, acc);
+ }
+ return rc;
+}
+
+static u64 paiext_getall(struct perf_event *event)
+{
+ return pai_getdata(event, false);
+}
+
+static void paiext_read(struct perf_event *event)
+{
+ pai_read(event, paiext_getall);
+}
+
+static void paiext_start(struct perf_event *event, int flags)
+{
+ pai_start(event, flags, paiext_getall);
+}
+
+static int paiext_add(struct perf_event *event, int flags)
+{
+ return pai_add(event, flags);
+}
+
+static void paiext_stop(struct perf_event *event, int flags)
+{
+ pai_stop(event, flags);
+}
+
+static void paiext_del(struct perf_event *event, int flags)
+{
+ pai_del(event, flags);
+}
+
+/* Called on schedule-in and schedule-out. No access to event structure,
+ * but for sampling only event NNPA_ALL is allowed.
+ */
+static void paiext_sched_task(struct perf_event_pmu_context *pmu_ctx,
+ struct task_struct *task, bool sched_in)
+{
+ /* We started with a clean page on event installation. So read out
+ * results on schedule_out and if page was dirty, save old values.
+ */
+ if (!sched_in)
+ pai_have_samples(PAI_PMU_EXT);
+}
+
+/* Attribute definitions for paicrypt interface. As with other CPU
+ * Measurement Facilities, there is one attribute per mapped counter.
+ * The number of mapped counters may vary per machine generation. Use
+ * the QUERY PROCESSOR ACTIVITY COUNTER INFORMATION (QPACI) instruction
+ * to determine the number of mapped counters. The instructions returns
+ * a positive number, which is the highest number of supported counters.
+ * All counters less than this number are also supported, there are no
+ * holes. A returned number of zero means no support for mapped counters.
+ *
+ * The identification of the counter is a unique number. The chosen range
+ * is 0x1000 + offset in mapped kernel page.
+ * All CPU Measurement Facility counters identifiers must be unique and
+ * the numbers from 0 to 496 are already used for the CPU Measurement
+ * Counter facility. Numbers 0xb0000, 0xbc000 and 0xbd000 are already
+ * used for the CPU Measurement Sampling facility.
+ */
+PMU_FORMAT_ATTR(event, "config:0-63");
+
+static struct attribute *paicrypt_format_attr[] = {
+ &format_attr_event.attr,
+ NULL,
+};
+
+static struct attribute_group paicrypt_events_group = {
+ .name = "events",
+ .attrs = NULL /* Filled in attr_event_init() */
+};
+
+static struct attribute_group paicrypt_format_group = {
+ .name = "format",
+ .attrs = paicrypt_format_attr,
+};
+
+static const struct attribute_group *paicrypt_attr_groups[] = {
+ &paicrypt_events_group,
+ &paicrypt_format_group,
+ NULL,
+};
+
+/* Performance monitoring unit for mapped counters */
+static struct pmu paicrypt = {
+ .task_ctx_nr = perf_hw_context,
+ .event_init = paicrypt_event_init,
+ .add = paicrypt_add,
+ .del = paicrypt_del,
+ .start = paicrypt_start,
+ .stop = paicrypt_stop,
+ .read = paicrypt_read,
+ .sched_task = paicrypt_sched_task,
+ .attr_groups = paicrypt_attr_groups
+};
+
+/* List of symbolic PAI counter names. */
+static const char * const paicrypt_ctrnames[] = {
+ [0] = "CRYPTO_ALL",
+ [1] = "KM_DEA",
+ [2] = "KM_TDEA_128",
+ [3] = "KM_TDEA_192",
+ [4] = "KM_ENCRYPTED_DEA",
+ [5] = "KM_ENCRYPTED_TDEA_128",
+ [6] = "KM_ENCRYPTED_TDEA_192",
+ [7] = "KM_AES_128",
+ [8] = "KM_AES_192",
+ [9] = "KM_AES_256",
+ [10] = "KM_ENCRYPTED_AES_128",
+ [11] = "KM_ENCRYPTED_AES_192",
+ [12] = "KM_ENCRYPTED_AES_256",
+ [13] = "KM_XTS_AES_128",
+ [14] = "KM_XTS_AES_256",
+ [15] = "KM_XTS_ENCRYPTED_AES_128",
+ [16] = "KM_XTS_ENCRYPTED_AES_256",
+ [17] = "KMC_DEA",
+ [18] = "KMC_TDEA_128",
+ [19] = "KMC_TDEA_192",
+ [20] = "KMC_ENCRYPTED_DEA",
+ [21] = "KMC_ENCRYPTED_TDEA_128",
+ [22] = "KMC_ENCRYPTED_TDEA_192",
+ [23] = "KMC_AES_128",
+ [24] = "KMC_AES_192",
+ [25] = "KMC_AES_256",
+ [26] = "KMC_ENCRYPTED_AES_128",
+ [27] = "KMC_ENCRYPTED_AES_192",
+ [28] = "KMC_ENCRYPTED_AES_256",
+ [29] = "KMC_PRNG",
+ [30] = "KMA_GCM_AES_128",
+ [31] = "KMA_GCM_AES_192",
+ [32] = "KMA_GCM_AES_256",
+ [33] = "KMA_GCM_ENCRYPTED_AES_128",
+ [34] = "KMA_GCM_ENCRYPTED_AES_192",
+ [35] = "KMA_GCM_ENCRYPTED_AES_256",
+ [36] = "KMF_DEA",
+ [37] = "KMF_TDEA_128",
+ [38] = "KMF_TDEA_192",
+ [39] = "KMF_ENCRYPTED_DEA",
+ [40] = "KMF_ENCRYPTED_TDEA_128",
+ [41] = "KMF_ENCRYPTED_TDEA_192",
+ [42] = "KMF_AES_128",
+ [43] = "KMF_AES_192",
+ [44] = "KMF_AES_256",
+ [45] = "KMF_ENCRYPTED_AES_128",
+ [46] = "KMF_ENCRYPTED_AES_192",
+ [47] = "KMF_ENCRYPTED_AES_256",
+ [48] = "KMCTR_DEA",
+ [49] = "KMCTR_TDEA_128",
+ [50] = "KMCTR_TDEA_192",
+ [51] = "KMCTR_ENCRYPTED_DEA",
+ [52] = "KMCTR_ENCRYPTED_TDEA_128",
+ [53] = "KMCTR_ENCRYPTED_TDEA_192",
+ [54] = "KMCTR_AES_128",
+ [55] = "KMCTR_AES_192",
+ [56] = "KMCTR_AES_256",
+ [57] = "KMCTR_ENCRYPTED_AES_128",
+ [58] = "KMCTR_ENCRYPTED_AES_192",
+ [59] = "KMCTR_ENCRYPTED_AES_256",
+ [60] = "KMO_DEA",
+ [61] = "KMO_TDEA_128",
+ [62] = "KMO_TDEA_192",
+ [63] = "KMO_ENCRYPTED_DEA",
+ [64] = "KMO_ENCRYPTED_TDEA_128",
+ [65] = "KMO_ENCRYPTED_TDEA_192",
+ [66] = "KMO_AES_128",
+ [67] = "KMO_AES_192",
+ [68] = "KMO_AES_256",
+ [69] = "KMO_ENCRYPTED_AES_128",
+ [70] = "KMO_ENCRYPTED_AES_192",
+ [71] = "KMO_ENCRYPTED_AES_256",
+ [72] = "KIMD_SHA_1",
+ [73] = "KIMD_SHA_256",
+ [74] = "KIMD_SHA_512",
+ [75] = "KIMD_SHA3_224",
+ [76] = "KIMD_SHA3_256",
+ [77] = "KIMD_SHA3_384",
+ [78] = "KIMD_SHA3_512",
+ [79] = "KIMD_SHAKE_128",
+ [80] = "KIMD_SHAKE_256",
+ [81] = "KIMD_GHASH",
+ [82] = "KLMD_SHA_1",
+ [83] = "KLMD_SHA_256",
+ [84] = "KLMD_SHA_512",
+ [85] = "KLMD_SHA3_224",
+ [86] = "KLMD_SHA3_256",
+ [87] = "KLMD_SHA3_384",
+ [88] = "KLMD_SHA3_512",
+ [89] = "KLMD_SHAKE_128",
+ [90] = "KLMD_SHAKE_256",
+ [91] = "KMAC_DEA",
+ [92] = "KMAC_TDEA_128",
+ [93] = "KMAC_TDEA_192",
+ [94] = "KMAC_ENCRYPTED_DEA",
+ [95] = "KMAC_ENCRYPTED_TDEA_128",
+ [96] = "KMAC_ENCRYPTED_TDEA_192",
+ [97] = "KMAC_AES_128",
+ [98] = "KMAC_AES_192",
+ [99] = "KMAC_AES_256",
+ [100] = "KMAC_ENCRYPTED_AES_128",
+ [101] = "KMAC_ENCRYPTED_AES_192",
+ [102] = "KMAC_ENCRYPTED_AES_256",
+ [103] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_DEA",
+ [104] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_TDEA_128",
+ [105] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_TDEA_192",
+ [106] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_DEA",
+ [107] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_TDEA_128",
+ [108] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_TDEA_192",
+ [109] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_128",
+ [110] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_192",
+ [111] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_256",
+ [112] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_128",
+ [113] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_192",
+ [114] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_256",
+ [115] = "PCC_COMPUTE_XTS_PARAMETER_USING_AES_128",
+ [116] = "PCC_COMPUTE_XTS_PARAMETER_USING_AES_256",
+ [117] = "PCC_COMPUTE_XTS_PARAMETER_USING_ENCRYPTED_AES_128",
+ [118] = "PCC_COMPUTE_XTS_PARAMETER_USING_ENCRYPTED_AES_256",
+ [119] = "PCC_SCALAR_MULTIPLY_P256",
+ [120] = "PCC_SCALAR_MULTIPLY_P384",
+ [121] = "PCC_SCALAR_MULTIPLY_P521",
+ [122] = "PCC_SCALAR_MULTIPLY_ED25519",
+ [123] = "PCC_SCALAR_MULTIPLY_ED448",
+ [124] = "PCC_SCALAR_MULTIPLY_X25519",
+ [125] = "PCC_SCALAR_MULTIPLY_X448",
+ [126] = "PRNO_SHA_512_DRNG",
+ [127] = "PRNO_TRNG_QUERY_RAW_TO_CONDITIONED_RATIO",
+ [128] = "PRNO_TRNG",
+ [129] = "KDSA_ECDSA_VERIFY_P256",
+ [130] = "KDSA_ECDSA_VERIFY_P384",
+ [131] = "KDSA_ECDSA_VERIFY_P521",
+ [132] = "KDSA_ECDSA_SIGN_P256",
+ [133] = "KDSA_ECDSA_SIGN_P384",
+ [134] = "KDSA_ECDSA_SIGN_P521",
+ [135] = "KDSA_ENCRYPTED_ECDSA_SIGN_P256",
+ [136] = "KDSA_ENCRYPTED_ECDSA_SIGN_P384",
+ [137] = "KDSA_ENCRYPTED_ECDSA_SIGN_P521",
+ [138] = "KDSA_EDDSA_VERIFY_ED25519",
+ [139] = "KDSA_EDDSA_VERIFY_ED448",
+ [140] = "KDSA_EDDSA_SIGN_ED25519",
+ [141] = "KDSA_EDDSA_SIGN_ED448",
+ [142] = "KDSA_ENCRYPTED_EDDSA_SIGN_ED25519",
+ [143] = "KDSA_ENCRYPTED_EDDSA_SIGN_ED448",
+ [144] = "PCKMO_ENCRYPT_DEA_KEY",
+ [145] = "PCKMO_ENCRYPT_TDEA_128_KEY",
+ [146] = "PCKMO_ENCRYPT_TDEA_192_KEY",
+ [147] = "PCKMO_ENCRYPT_AES_128_KEY",
+ [148] = "PCKMO_ENCRYPT_AES_192_KEY",
+ [149] = "PCKMO_ENCRYPT_AES_256_KEY",
+ [150] = "PCKMO_ENCRYPT_ECC_P256_KEY",
+ [151] = "PCKMO_ENCRYPT_ECC_P384_KEY",
+ [152] = "PCKMO_ENCRYPT_ECC_P521_KEY",
+ [153] = "PCKMO_ENCRYPT_ECC_ED25519_KEY",
+ [154] = "PCKMO_ENCRYPT_ECC_ED448_KEY",
+ [155] = "IBM_RESERVED_155",
+ [156] = "IBM_RESERVED_156",
+ [157] = "KM_FULL_XTS_AES_128",
+ [158] = "KM_FULL_XTS_AES_256",
+ [159] = "KM_FULL_XTS_ENCRYPTED_AES_128",
+ [160] = "KM_FULL_XTS_ENCRYPTED_AES_256",
+ [161] = "KMAC_HMAC_SHA_224",
+ [162] = "KMAC_HMAC_SHA_256",
+ [163] = "KMAC_HMAC_SHA_384",
+ [164] = "KMAC_HMAC_SHA_512",
+ [165] = "KMAC_HMAC_ENCRYPTED_SHA_224",
+ [166] = "KMAC_HMAC_ENCRYPTED_SHA_256",
+ [167] = "KMAC_HMAC_ENCRYPTED_SHA_384",
+ [168] = "KMAC_HMAC_ENCRYPTED_SHA_512",
+ [169] = "PCKMO_ENCRYPT_HMAC_512_KEY",
+ [170] = "PCKMO_ENCRYPT_HMAC_1024_KEY",
+ [171] = "PCKMO_ENCRYPT_AES_XTS_128",
+ [172] = "PCKMO_ENCRYPT_AES_XTS_256",
+};
+
+static struct attribute *paiext_format_attr[] = {
+ &format_attr_event.attr,
+ NULL,
+};
+
+static struct attribute_group paiext_events_group = {
+ .name = "events",
+ .attrs = NULL, /* Filled in attr_event_init() */
+};
+
+static struct attribute_group paiext_format_group = {
+ .name = "format",
+ .attrs = paiext_format_attr,
+};
+
+static const struct attribute_group *paiext_attr_groups[] = {
+ &paiext_events_group,
+ &paiext_format_group,
+ NULL,
+};
+
+/* Performance monitoring unit for mapped counters */
+static struct pmu paiext = {
+ .task_ctx_nr = perf_hw_context,
+ .event_init = paiext_event_init,
+ .add = paiext_add,
+ .del = paiext_del,
+ .start = paiext_start,
+ .stop = paiext_stop,
+ .read = paiext_read,
+ .sched_task = paiext_sched_task,
+ .attr_groups = paiext_attr_groups,
+};
+
+/* List of symbolic PAI extension 1 NNPA counter names. */
+static const char * const paiext_ctrnames[] = {
+ [0] = "NNPA_ALL",
+ [1] = "NNPA_ADD",
+ [2] = "NNPA_SUB",
+ [3] = "NNPA_MUL",
+ [4] = "NNPA_DIV",
+ [5] = "NNPA_MIN",
+ [6] = "NNPA_MAX",
+ [7] = "NNPA_LOG",
+ [8] = "NNPA_EXP",
+ [9] = "NNPA_IBM_RESERVED_9",
+ [10] = "NNPA_RELU",
+ [11] = "NNPA_TANH",
+ [12] = "NNPA_SIGMOID",
+ [13] = "NNPA_SOFTMAX",
+ [14] = "NNPA_BATCHNORM",
+ [15] = "NNPA_MAXPOOL2D",
+ [16] = "NNPA_AVGPOOL2D",
+ [17] = "NNPA_LSTMACT",
+ [18] = "NNPA_GRUACT",
+ [19] = "NNPA_CONVOLUTION",
+ [20] = "NNPA_MATMUL_OP",
+ [21] = "NNPA_MATMUL_OP_BCAST23",
+ [22] = "NNPA_SMALLBATCH",
+ [23] = "NNPA_LARGEDIM",
+ [24] = "NNPA_SMALLTENSOR",
+ [25] = "NNPA_1MFRAME",
+ [26] = "NNPA_2GFRAME",
+ [27] = "NNPA_ACCESSEXCEPT",
+ [28] = "NNPA_TRANSFORM",
+ [29] = "NNPA_GELU",
+ [30] = "NNPA_MOMENTS",
+ [31] = "NNPA_LAYERNORM",
+ [32] = "NNPA_MATMUL_OP_BCAST1",
+ [33] = "NNPA_SQRT",
+ [34] = "NNPA_INVSQRT",
+ [35] = "NNPA_NORM",
+ [36] = "NNPA_REDUCE",
+};
+
+static void __init attr_event_free(struct attribute **attrs)
+{
+ struct perf_pmu_events_attr *pa;
+ unsigned int i;
+
+ for (i = 0; attrs[i]; i++) {
+ struct device_attribute *dap;
+
+ dap = container_of(attrs[i], struct device_attribute, attr);
+ pa = container_of(dap, struct perf_pmu_events_attr, attr);
+ kfree(pa);
+ }
+ kfree(attrs);
+}
+
+static struct attribute * __init attr_event_init_one(int num,
+ unsigned long base,
+ const char *name)
+{
+ struct perf_pmu_events_attr *pa;
+
+ pa = kzalloc(sizeof(*pa), GFP_KERNEL);
+ if (!pa)
+ return NULL;
+
+ sysfs_attr_init(&pa->attr.attr);
+ pa->id = base + num;
+ pa->attr.attr.name = name;
+ pa->attr.attr.mode = 0444;
+ pa->attr.show = cpumf_events_sysfs_show;
+ pa->attr.store = NULL;
+ return &pa->attr.attr;
+}
+
+static struct attribute ** __init attr_event_init(struct pai_pmu *p)
+{
+ unsigned int min_attr = min_t(unsigned int, p->num_named, p->num_avail);
+ struct attribute **attrs;
+ unsigned int i;
+
+ attrs = kmalloc_array(min_attr + 1, sizeof(*attrs), GFP_KERNEL | __GFP_ZERO);
+ if (!attrs)
+ goto out;
+ for (i = 0; i < min_attr; i++) {
+ attrs[i] = attr_event_init_one(i, p->base, p->names[i]);
+ if (!attrs[i]) {
+ attr_event_free(attrs);
+ attrs = NULL;
+ goto out;
+ }
+ }
+ attrs[i] = NULL;
+out:
+ return attrs;
+}
+
+static void __init pai_pmu_exit(struct pai_pmu *p)
+{
+ attr_event_free(p->event_group->attrs);
+ p->event_group->attrs = NULL;
+}
+
+/* Add a PMU. Install its events and register the PMU device driver
+ * call back functions.
+ */
+static int __init pai_pmu_init(struct pai_pmu *p)
+{
+ int rc = -ENOMEM;
+
+
+ /* Export known PAI events */
+ p->event_group->attrs = attr_event_init(p);
+ if (!p->event_group->attrs) {
+ pr_err("Creation of PMU %s /sysfs failed\n", p->pmuname);
+ goto out;
+ }
+
+ rc = perf_pmu_register(p->pmu, p->pmuname, -1);
+ if (rc) {
+ pai_pmu_exit(p);
+ pr_err("Registering PMU %s failed with rc=%i\n", p->pmuname,
+ rc);
+ }
+out:
+ return rc;
+}
+
+/* PAI PMU characteristics table */
+static struct pai_pmu pai_pmu[] __refdata = {
+ [PAI_PMU_CRYPTO] = {
+ .pmuname = "pai_crypto",
+ .facility_nr = 196,
+ .num_named = ARRAY_SIZE(paicrypt_ctrnames),
+ .names = paicrypt_ctrnames,
+ .base = PAI_CRYPTO_BASE,
+ .kernel_offset = PAI_CRYPTO_KERNEL_OFFSET,
+ .area_size = PAGE_SIZE,
+ .init = pai_pmu_init,
+ .exit = pai_pmu_exit,
+ .pmu = &paicrypt,
+ .event_group = &paicrypt_events_group
+ },
+ [PAI_PMU_EXT] = {
+ .pmuname = "pai_ext",
+ .facility_nr = 197,
+ .num_named = ARRAY_SIZE(paiext_ctrnames),
+ .names = paiext_ctrnames,
+ .base = PAI_NNPA_BASE,
+ .kernel_offset = 0,
+ .area_size = PAIE1_CTRBLOCK_SZ,
+ .init = pai_pmu_init,
+ .exit = pai_pmu_exit,
+ .pmu = &paiext,
+ .event_group = &paiext_events_group
+ }
+};
+
+/*
+ * Check if the PMU (via facility) is supported by machine. Try all of the
+ * supported PAI PMUs.
+ * Return number of successfully installed PMUs.
+ */
+static int __init paipmu_setup(void)
+{
+ struct qpaci_info_block ib;
+ int install_ok = 0, rc;
+ struct pai_pmu *p;
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(pai_pmu); ++i) {
+ p = &pai_pmu[i];
+
+ if (!test_facility(p->facility_nr))
+ continue;
+
+ qpaci(&ib);
+ switch (i) {
+ case PAI_PMU_CRYPTO:
+ p->num_avail = ib.num_cc;
+ if (p->num_avail >= PAI_CRYPTO_MAXCTR) {
+ pr_err("Too many PMU %s counters %d\n",
+ p->pmuname, p->num_avail);
+ continue;
+ }
+ break;
+ case PAI_PMU_EXT:
+ p->num_avail = ib.num_nnpa;
+ break;
+ }
+ p->num_avail += 1; /* Add xxx_ALL event */
+ if (p->init) {
+ rc = p->init(p);
+ if (!rc)
+ ++install_ok;
+ }
+ }
+ return install_ok;
+}
+
+static int __init pai_init(void)
+{
+ /* Setup s390dbf facility */
+ paidbg = debug_register("pai", 32, 256, 128);
+ if (!paidbg) {
+ pr_err("Registration of s390dbf pai failed\n");
+ return -ENOMEM;
+ }
+ debug_register_view(paidbg, &debug_sprintf_view);
+
+ if (!paipmu_setup()) {
+ /* No PMU registration, no need for debug buffer */
+ debug_unregister_view(paidbg, &debug_sprintf_view);
+ debug_unregister(paidbg);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+device_initcall(pai_init);
diff --git a/arch/s390/kernel/perf_pai_crypto.c b/arch/s390/kernel/perf_pai_crypto.c
deleted file mode 100644
index 62bf8a15bf32..000000000000
--- a/arch/s390/kernel/perf_pai_crypto.c
+++ /dev/null
@@ -1,843 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Performance event support - Processor Activity Instrumentation Facility
- *
- * Copyright IBM Corp. 2022
- * Author(s): Thomas Richter <tmricht@linux.ibm.com>
- */
-#define KMSG_COMPONENT "pai_crypto"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/kernel_stat.h>
-#include <linux/percpu.h>
-#include <linux/notifier.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/perf_event.h>
-#include <asm/ctlreg.h>
-#include <asm/pai.h>
-#include <asm/debug.h>
-
-static debug_info_t *cfm_dbg;
-static unsigned int paicrypt_cnt; /* Size of the mapped counter sets */
- /* extracted with QPACI instruction */
-
-DEFINE_STATIC_KEY_FALSE(pai_key);
-
-struct pai_userdata {
- u16 num;
- u64 value;
-} __packed;
-
-struct paicrypt_map {
- unsigned long *page; /* Page for CPU to store counters */
- struct pai_userdata *save; /* Page to store no-zero counters */
- unsigned int active_events; /* # of PAI crypto users */
- refcount_t refcnt; /* Reference count mapped buffers */
- struct perf_event *event; /* Perf event for sampling */
- struct list_head syswide_list; /* List system-wide sampling events */
-};
-
-struct paicrypt_mapptr {
- struct paicrypt_map *mapptr;
-};
-
-static struct paicrypt_root { /* Anchor to per CPU data */
- refcount_t refcnt; /* Overall active events */
- struct paicrypt_mapptr __percpu *mapptr;
-} paicrypt_root;
-
-/* Free per CPU data when the last event is removed. */
-static void paicrypt_root_free(void)
-{
- if (refcount_dec_and_test(&paicrypt_root.refcnt)) {
- free_percpu(paicrypt_root.mapptr);
- paicrypt_root.mapptr = NULL;
- }
- debug_sprintf_event(cfm_dbg, 5, "%s root.refcount %d\n", __func__,
- refcount_read(&paicrypt_root.refcnt));
-}
-
-/*
- * On initialization of first event also allocate per CPU data dynamically.
- * Start with an array of pointers, the array size is the maximum number of
- * CPUs possible, which might be larger than the number of CPUs currently
- * online.
- */
-static int paicrypt_root_alloc(void)
-{
- if (!refcount_inc_not_zero(&paicrypt_root.refcnt)) {
- /* The memory is already zeroed. */
- paicrypt_root.mapptr = alloc_percpu(struct paicrypt_mapptr);
- if (!paicrypt_root.mapptr)
- return -ENOMEM;
- refcount_set(&paicrypt_root.refcnt, 1);
- }
- return 0;
-}
-
-/* Release the PMU if event is the last perf event */
-static DEFINE_MUTEX(pai_reserve_mutex);
-
-/* Free all memory allocated for event counting/sampling setup */
-static void paicrypt_free(struct paicrypt_mapptr *mp)
-{
- free_page((unsigned long)mp->mapptr->page);
- kvfree(mp->mapptr->save);
- kfree(mp->mapptr);
- mp->mapptr = NULL;
-}
-
-/* Adjust usage counters and remove allocated memory when all users are
- * gone.
- */
-static void paicrypt_event_destroy_cpu(struct perf_event *event, int cpu)
-{
- struct paicrypt_mapptr *mp = per_cpu_ptr(paicrypt_root.mapptr, cpu);
- struct paicrypt_map *cpump = mp->mapptr;
-
- mutex_lock(&pai_reserve_mutex);
- debug_sprintf_event(cfm_dbg, 5, "%s event %#llx cpu %d users %d "
- "refcnt %u\n", __func__, event->attr.config,
- event->cpu, cpump->active_events,
- refcount_read(&cpump->refcnt));
- if (refcount_dec_and_test(&cpump->refcnt))
- paicrypt_free(mp);
- paicrypt_root_free();
- mutex_unlock(&pai_reserve_mutex);
-}
-
-static void paicrypt_event_destroy(struct perf_event *event)
-{
- int cpu;
-
- static_branch_dec(&pai_key);
- free_page(PAI_SAVE_AREA(event));
- if (event->cpu == -1) {
- struct cpumask *mask = PAI_CPU_MASK(event);
-
- for_each_cpu(cpu, mask)
- paicrypt_event_destroy_cpu(event, cpu);
- kfree(mask);
- } else {
- paicrypt_event_destroy_cpu(event, event->cpu);
- }
-}
-
-static u64 paicrypt_getctr(unsigned long *page, int nr, bool kernel)
-{
- if (kernel)
- nr += PAI_CRYPTO_MAXCTR;
- return page[nr];
-}
-
-/* Read the counter values. Return value from location in CMP. For event
- * CRYPTO_ALL sum up all events.
- */
-static u64 paicrypt_getdata(struct perf_event *event, bool kernel)
-{
- struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
- struct paicrypt_map *cpump = mp->mapptr;
- u64 sum = 0;
- int i;
-
- if (event->attr.config != PAI_CRYPTO_BASE) {
- return paicrypt_getctr(cpump->page,
- event->attr.config - PAI_CRYPTO_BASE,
- kernel);
- }
-
- for (i = 1; i <= paicrypt_cnt; i++) {
- u64 val = paicrypt_getctr(cpump->page, i, kernel);
-
- if (!val)
- continue;
- sum += val;
- }
- return sum;
-}
-
-static u64 paicrypt_getall(struct perf_event *event)
-{
- u64 sum = 0;
-
- if (!event->attr.exclude_kernel)
- sum += paicrypt_getdata(event, true);
- if (!event->attr.exclude_user)
- sum += paicrypt_getdata(event, false);
-
- return sum;
-}
-
-/* Check concurrent access of counting and sampling for crypto events.
- * This function is called in process context and it is save to block.
- * When the event initialization functions fails, no other call back will
- * be invoked.
- *
- * Allocate the memory for the event.
- */
-static int paicrypt_alloc_cpu(struct perf_event *event, int cpu)
-{
- struct paicrypt_map *cpump = NULL;
- struct paicrypt_mapptr *mp;
- int rc;
-
- mutex_lock(&pai_reserve_mutex);
- /* Allocate root node */
- rc = paicrypt_root_alloc();
- if (rc)
- goto unlock;
-
- /* Allocate node for this event */
- mp = per_cpu_ptr(paicrypt_root.mapptr, cpu);
- cpump = mp->mapptr;
- if (!cpump) { /* Paicrypt_map allocated? */
- rc = -ENOMEM;
- cpump = kzalloc(sizeof(*cpump), GFP_KERNEL);
- if (!cpump)
- goto undo;
- /* Allocate memory for counter page and counter extraction.
- * Only the first counting event has to allocate a page.
- */
- mp->mapptr = cpump;
- cpump->page = (unsigned long *)get_zeroed_page(GFP_KERNEL);
- cpump->save = kvmalloc_array(paicrypt_cnt + 1,
- sizeof(struct pai_userdata),
- GFP_KERNEL);
- if (!cpump->page || !cpump->save) {
- paicrypt_free(mp);
- goto undo;
- }
- INIT_LIST_HEAD(&cpump->syswide_list);
- refcount_set(&cpump->refcnt, 1);
- rc = 0;
- } else {
- refcount_inc(&cpump->refcnt);
- }
-
-undo:
- if (rc) {
- /* Error in allocation of event, decrement anchor. Since
- * the event in not created, its destroy() function is never
- * invoked. Adjust the reference counter for the anchor.
- */
- paicrypt_root_free();
- }
-unlock:
- mutex_unlock(&pai_reserve_mutex);
- return rc;
-}
-
-static int paicrypt_alloc(struct perf_event *event)
-{
- struct cpumask *maskptr;
- int cpu, rc = -ENOMEM;
-
- maskptr = kzalloc(sizeof(*maskptr), GFP_KERNEL);
- if (!maskptr)
- goto out;
-
- for_each_online_cpu(cpu) {
- rc = paicrypt_alloc_cpu(event, cpu);
- if (rc) {
- for_each_cpu(cpu, maskptr)
- paicrypt_event_destroy_cpu(event, cpu);
- kfree(maskptr);
- goto out;
- }
- cpumask_set_cpu(cpu, maskptr);
- }
-
- /*
- * On error all cpumask are freed and all events have been destroyed.
- * Save of which CPUs data structures have been allocated for.
- * Release them in paicrypt_event_destroy call back function
- * for this event.
- */
- PAI_CPU_MASK(event) = maskptr;
- rc = 0;
-out:
- return rc;
-}
-
-/* Might be called on different CPU than the one the event is intended for. */
-static int paicrypt_event_init(struct perf_event *event)
-{
- struct perf_event_attr *a = &event->attr;
- int rc = 0;
-
- /* PAI crypto PMU registered as PERF_TYPE_RAW, check event type */
- if (a->type != PERF_TYPE_RAW && event->pmu->type != a->type)
- return -ENOENT;
- /* PAI crypto event must be in valid range, try others if not */
- if (a->config < PAI_CRYPTO_BASE ||
- a->config > PAI_CRYPTO_BASE + paicrypt_cnt)
- return -ENOENT;
- /* Allow only CRYPTO_ALL for sampling */
- if (a->sample_period && a->config != PAI_CRYPTO_BASE)
- return -EINVAL;
- /* Get a page to store last counter values for sampling */
- if (a->sample_period) {
- PAI_SAVE_AREA(event) = get_zeroed_page(GFP_KERNEL);
- if (!PAI_SAVE_AREA(event)) {
- rc = -ENOMEM;
- goto out;
- }
- }
-
- if (event->cpu >= 0)
- rc = paicrypt_alloc_cpu(event, event->cpu);
- else
- rc = paicrypt_alloc(event);
- if (rc) {
- free_page(PAI_SAVE_AREA(event));
- goto out;
- }
- event->destroy = paicrypt_event_destroy;
-
- if (a->sample_period) {
- a->sample_period = 1;
- a->freq = 0;
- /* Register for paicrypt_sched_task() to be called */
- event->attach_state |= PERF_ATTACH_SCHED_CB;
- /* Add raw data which contain the memory mapped counters */
- a->sample_type |= PERF_SAMPLE_RAW;
- /* Turn off inheritance */
- a->inherit = 0;
- }
-
- static_branch_inc(&pai_key);
-out:
- return rc;
-}
-
-static void paicrypt_read(struct perf_event *event)
-{
- u64 prev, new, delta;
-
- prev = local64_read(&event->hw.prev_count);
- new = paicrypt_getall(event);
- local64_set(&event->hw.prev_count, new);
- delta = (prev <= new) ? new - prev
- : (-1ULL - prev) + new + 1; /* overflow */
- local64_add(delta, &event->count);
-}
-
-static void paicrypt_start(struct perf_event *event, int flags)
-{
- struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
- struct paicrypt_map *cpump = mp->mapptr;
- u64 sum;
-
- if (!event->attr.sample_period) { /* Counting */
- sum = paicrypt_getall(event); /* Get current value */
- local64_set(&event->hw.prev_count, sum);
- } else { /* Sampling */
- memcpy((void *)PAI_SAVE_AREA(event), cpump->page, PAGE_SIZE);
- /* Enable context switch callback for system-wide sampling */
- if (!(event->attach_state & PERF_ATTACH_TASK)) {
- list_add_tail(PAI_SWLIST(event), &cpump->syswide_list);
- perf_sched_cb_inc(event->pmu);
- } else {
- cpump->event = event;
- }
- }
-}
-
-static int paicrypt_add(struct perf_event *event, int flags)
-{
- struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
- struct paicrypt_map *cpump = mp->mapptr;
- unsigned long ccd;
-
- if (++cpump->active_events == 1) {
- ccd = virt_to_phys(cpump->page) | PAI_CRYPTO_KERNEL_OFFSET;
- WRITE_ONCE(get_lowcore()->ccd, ccd);
- local_ctl_set_bit(0, CR0_CRYPTOGRAPHY_COUNTER_BIT);
- }
- if (flags & PERF_EF_START)
- paicrypt_start(event, PERF_EF_RELOAD);
- event->hw.state = 0;
- return 0;
-}
-
-static void paicrypt_have_sample(struct perf_event *, struct paicrypt_map *);
-static void paicrypt_stop(struct perf_event *event, int flags)
-{
- struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
- struct paicrypt_map *cpump = mp->mapptr;
-
- if (!event->attr.sample_period) { /* Counting */
- paicrypt_read(event);
- } else { /* Sampling */
- if (!(event->attach_state & PERF_ATTACH_TASK)) {
- perf_sched_cb_dec(event->pmu);
- list_del(PAI_SWLIST(event));
- } else {
- paicrypt_have_sample(event, cpump);
- cpump->event = NULL;
- }
- }
- event->hw.state = PERF_HES_STOPPED;
-}
-
-static void paicrypt_del(struct perf_event *event, int flags)
-{
- struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
- struct paicrypt_map *cpump = mp->mapptr;
-
- paicrypt_stop(event, PERF_EF_UPDATE);
- if (--cpump->active_events == 0) {
- local_ctl_clear_bit(0, CR0_CRYPTOGRAPHY_COUNTER_BIT);
- WRITE_ONCE(get_lowcore()->ccd, 0);
- }
-}
-
-/* Create raw data and save it in buffer. Calculate the delta for each
- * counter between this invocation and the last invocation.
- * Returns number of bytes copied.
- * Saves only entries with positive counter difference of the form
- * 2 bytes: Number of counter
- * 8 bytes: Value of counter
- */
-static size_t paicrypt_copy(struct pai_userdata *userdata, unsigned long *page,
- unsigned long *page_old, bool exclude_user,
- bool exclude_kernel)
-{
- int i, outidx = 0;
-
- for (i = 1; i <= paicrypt_cnt; i++) {
- u64 val = 0, val_old = 0;
-
- if (!exclude_kernel) {
- val += paicrypt_getctr(page, i, true);
- val_old += paicrypt_getctr(page_old, i, true);
- }
- if (!exclude_user) {
- val += paicrypt_getctr(page, i, false);
- val_old += paicrypt_getctr(page_old, i, false);
- }
- if (val >= val_old)
- val -= val_old;
- else
- val = (~0ULL - val_old) + val + 1;
- if (val) {
- userdata[outidx].num = i;
- userdata[outidx].value = val;
- outidx++;
- }
- }
- return outidx * sizeof(struct pai_userdata);
-}
-
-static int paicrypt_push_sample(size_t rawsize, struct paicrypt_map *cpump,
- struct perf_event *event)
-{
- struct perf_sample_data data;
- struct perf_raw_record raw;
- struct pt_regs regs;
- int overflow;
-
- /* Setup perf sample */
- memset(&regs, 0, sizeof(regs));
- memset(&raw, 0, sizeof(raw));
- memset(&data, 0, sizeof(data));
- perf_sample_data_init(&data, 0, event->hw.last_period);
- if (event->attr.sample_type & PERF_SAMPLE_TID) {
- data.tid_entry.pid = task_tgid_nr(current);
- data.tid_entry.tid = task_pid_nr(current);
- }
- if (event->attr.sample_type & PERF_SAMPLE_TIME)
- data.time = event->clock();
- if (event->attr.sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER))
- data.id = event->id;
- if (event->attr.sample_type & PERF_SAMPLE_CPU) {
- data.cpu_entry.cpu = smp_processor_id();
- data.cpu_entry.reserved = 0;
- }
- if (event->attr.sample_type & PERF_SAMPLE_RAW) {
- raw.frag.size = rawsize;
- raw.frag.data = cpump->save;
- perf_sample_save_raw_data(&data, event, &raw);
- }
-
- overflow = perf_event_overflow(event, &data, &regs);
- perf_event_update_userpage(event);
- /* Save crypto counter lowcore page after reading event data. */
- memcpy((void *)PAI_SAVE_AREA(event), cpump->page, PAGE_SIZE);
- return overflow;
-}
-
-/* Check if there is data to be saved on schedule out of a task. */
-static void paicrypt_have_sample(struct perf_event *event,
- struct paicrypt_map *cpump)
-{
- size_t rawsize;
-
- if (!event) /* No event active */
- return;
- rawsize = paicrypt_copy(cpump->save, cpump->page,
- (unsigned long *)PAI_SAVE_AREA(event),
- event->attr.exclude_user,
- event->attr.exclude_kernel);
- if (rawsize) /* No incremented counters */
- paicrypt_push_sample(rawsize, cpump, event);
-}
-
-/* Check if there is data to be saved on schedule out of a task. */
-static void paicrypt_have_samples(void)
-{
- struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
- struct paicrypt_map *cpump = mp->mapptr;
- struct perf_event *event;
-
- list_for_each_entry(event, &cpump->syswide_list, hw.tp_list)
- paicrypt_have_sample(event, cpump);
-}
-
-/* Called on schedule-in and schedule-out. No access to event structure,
- * but for sampling only event CRYPTO_ALL is allowed.
- */
-static void paicrypt_sched_task(struct perf_event_pmu_context *pmu_ctx,
- struct task_struct *task, bool sched_in)
-{
- /* We started with a clean page on event installation. So read out
- * results on schedule_out and if page was dirty, save old values.
- */
- if (!sched_in)
- paicrypt_have_samples();
-}
-
-/* Attribute definitions for paicrypt interface. As with other CPU
- * Measurement Facilities, there is one attribute per mapped counter.
- * The number of mapped counters may vary per machine generation. Use
- * the QUERY PROCESSOR ACTIVITY COUNTER INFORMATION (QPACI) instruction
- * to determine the number of mapped counters. The instructions returns
- * a positive number, which is the highest number of supported counters.
- * All counters less than this number are also supported, there are no
- * holes. A returned number of zero means no support for mapped counters.
- *
- * The identification of the counter is a unique number. The chosen range
- * is 0x1000 + offset in mapped kernel page.
- * All CPU Measurement Facility counters identifiers must be unique and
- * the numbers from 0 to 496 are already used for the CPU Measurement
- * Counter facility. Numbers 0xb0000, 0xbc000 and 0xbd000 are already
- * used for the CPU Measurement Sampling facility.
- */
-PMU_FORMAT_ATTR(event, "config:0-63");
-
-static struct attribute *paicrypt_format_attr[] = {
- &format_attr_event.attr,
- NULL,
-};
-
-static struct attribute_group paicrypt_events_group = {
- .name = "events",
- .attrs = NULL /* Filled in attr_event_init() */
-};
-
-static struct attribute_group paicrypt_format_group = {
- .name = "format",
- .attrs = paicrypt_format_attr,
-};
-
-static const struct attribute_group *paicrypt_attr_groups[] = {
- &paicrypt_events_group,
- &paicrypt_format_group,
- NULL,
-};
-
-/* Performance monitoring unit for mapped counters */
-static struct pmu paicrypt = {
- .task_ctx_nr = perf_hw_context,
- .event_init = paicrypt_event_init,
- .add = paicrypt_add,
- .del = paicrypt_del,
- .start = paicrypt_start,
- .stop = paicrypt_stop,
- .read = paicrypt_read,
- .sched_task = paicrypt_sched_task,
- .attr_groups = paicrypt_attr_groups
-};
-
-/* List of symbolic PAI counter names. */
-static const char * const paicrypt_ctrnames[] = {
- [0] = "CRYPTO_ALL",
- [1] = "KM_DEA",
- [2] = "KM_TDEA_128",
- [3] = "KM_TDEA_192",
- [4] = "KM_ENCRYPTED_DEA",
- [5] = "KM_ENCRYPTED_TDEA_128",
- [6] = "KM_ENCRYPTED_TDEA_192",
- [7] = "KM_AES_128",
- [8] = "KM_AES_192",
- [9] = "KM_AES_256",
- [10] = "KM_ENCRYPTED_AES_128",
- [11] = "KM_ENCRYPTED_AES_192",
- [12] = "KM_ENCRYPTED_AES_256",
- [13] = "KM_XTS_AES_128",
- [14] = "KM_XTS_AES_256",
- [15] = "KM_XTS_ENCRYPTED_AES_128",
- [16] = "KM_XTS_ENCRYPTED_AES_256",
- [17] = "KMC_DEA",
- [18] = "KMC_TDEA_128",
- [19] = "KMC_TDEA_192",
- [20] = "KMC_ENCRYPTED_DEA",
- [21] = "KMC_ENCRYPTED_TDEA_128",
- [22] = "KMC_ENCRYPTED_TDEA_192",
- [23] = "KMC_AES_128",
- [24] = "KMC_AES_192",
- [25] = "KMC_AES_256",
- [26] = "KMC_ENCRYPTED_AES_128",
- [27] = "KMC_ENCRYPTED_AES_192",
- [28] = "KMC_ENCRYPTED_AES_256",
- [29] = "KMC_PRNG",
- [30] = "KMA_GCM_AES_128",
- [31] = "KMA_GCM_AES_192",
- [32] = "KMA_GCM_AES_256",
- [33] = "KMA_GCM_ENCRYPTED_AES_128",
- [34] = "KMA_GCM_ENCRYPTED_AES_192",
- [35] = "KMA_GCM_ENCRYPTED_AES_256",
- [36] = "KMF_DEA",
- [37] = "KMF_TDEA_128",
- [38] = "KMF_TDEA_192",
- [39] = "KMF_ENCRYPTED_DEA",
- [40] = "KMF_ENCRYPTED_TDEA_128",
- [41] = "KMF_ENCRYPTED_TDEA_192",
- [42] = "KMF_AES_128",
- [43] = "KMF_AES_192",
- [44] = "KMF_AES_256",
- [45] = "KMF_ENCRYPTED_AES_128",
- [46] = "KMF_ENCRYPTED_AES_192",
- [47] = "KMF_ENCRYPTED_AES_256",
- [48] = "KMCTR_DEA",
- [49] = "KMCTR_TDEA_128",
- [50] = "KMCTR_TDEA_192",
- [51] = "KMCTR_ENCRYPTED_DEA",
- [52] = "KMCTR_ENCRYPTED_TDEA_128",
- [53] = "KMCTR_ENCRYPTED_TDEA_192",
- [54] = "KMCTR_AES_128",
- [55] = "KMCTR_AES_192",
- [56] = "KMCTR_AES_256",
- [57] = "KMCTR_ENCRYPTED_AES_128",
- [58] = "KMCTR_ENCRYPTED_AES_192",
- [59] = "KMCTR_ENCRYPTED_AES_256",
- [60] = "KMO_DEA",
- [61] = "KMO_TDEA_128",
- [62] = "KMO_TDEA_192",
- [63] = "KMO_ENCRYPTED_DEA",
- [64] = "KMO_ENCRYPTED_TDEA_128",
- [65] = "KMO_ENCRYPTED_TDEA_192",
- [66] = "KMO_AES_128",
- [67] = "KMO_AES_192",
- [68] = "KMO_AES_256",
- [69] = "KMO_ENCRYPTED_AES_128",
- [70] = "KMO_ENCRYPTED_AES_192",
- [71] = "KMO_ENCRYPTED_AES_256",
- [72] = "KIMD_SHA_1",
- [73] = "KIMD_SHA_256",
- [74] = "KIMD_SHA_512",
- [75] = "KIMD_SHA3_224",
- [76] = "KIMD_SHA3_256",
- [77] = "KIMD_SHA3_384",
- [78] = "KIMD_SHA3_512",
- [79] = "KIMD_SHAKE_128",
- [80] = "KIMD_SHAKE_256",
- [81] = "KIMD_GHASH",
- [82] = "KLMD_SHA_1",
- [83] = "KLMD_SHA_256",
- [84] = "KLMD_SHA_512",
- [85] = "KLMD_SHA3_224",
- [86] = "KLMD_SHA3_256",
- [87] = "KLMD_SHA3_384",
- [88] = "KLMD_SHA3_512",
- [89] = "KLMD_SHAKE_128",
- [90] = "KLMD_SHAKE_256",
- [91] = "KMAC_DEA",
- [92] = "KMAC_TDEA_128",
- [93] = "KMAC_TDEA_192",
- [94] = "KMAC_ENCRYPTED_DEA",
- [95] = "KMAC_ENCRYPTED_TDEA_128",
- [96] = "KMAC_ENCRYPTED_TDEA_192",
- [97] = "KMAC_AES_128",
- [98] = "KMAC_AES_192",
- [99] = "KMAC_AES_256",
- [100] = "KMAC_ENCRYPTED_AES_128",
- [101] = "KMAC_ENCRYPTED_AES_192",
- [102] = "KMAC_ENCRYPTED_AES_256",
- [103] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_DEA",
- [104] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_TDEA_128",
- [105] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_TDEA_192",
- [106] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_DEA",
- [107] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_TDEA_128",
- [108] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_TDEA_192",
- [109] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_128",
- [110] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_192",
- [111] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_256",
- [112] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_128",
- [113] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_192",
- [114] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_256",
- [115] = "PCC_COMPUTE_XTS_PARAMETER_USING_AES_128",
- [116] = "PCC_COMPUTE_XTS_PARAMETER_USING_AES_256",
- [117] = "PCC_COMPUTE_XTS_PARAMETER_USING_ENCRYPTED_AES_128",
- [118] = "PCC_COMPUTE_XTS_PARAMETER_USING_ENCRYPTED_AES_256",
- [119] = "PCC_SCALAR_MULTIPLY_P256",
- [120] = "PCC_SCALAR_MULTIPLY_P384",
- [121] = "PCC_SCALAR_MULTIPLY_P521",
- [122] = "PCC_SCALAR_MULTIPLY_ED25519",
- [123] = "PCC_SCALAR_MULTIPLY_ED448",
- [124] = "PCC_SCALAR_MULTIPLY_X25519",
- [125] = "PCC_SCALAR_MULTIPLY_X448",
- [126] = "PRNO_SHA_512_DRNG",
- [127] = "PRNO_TRNG_QUERY_RAW_TO_CONDITIONED_RATIO",
- [128] = "PRNO_TRNG",
- [129] = "KDSA_ECDSA_VERIFY_P256",
- [130] = "KDSA_ECDSA_VERIFY_P384",
- [131] = "KDSA_ECDSA_VERIFY_P521",
- [132] = "KDSA_ECDSA_SIGN_P256",
- [133] = "KDSA_ECDSA_SIGN_P384",
- [134] = "KDSA_ECDSA_SIGN_P521",
- [135] = "KDSA_ENCRYPTED_ECDSA_SIGN_P256",
- [136] = "KDSA_ENCRYPTED_ECDSA_SIGN_P384",
- [137] = "KDSA_ENCRYPTED_ECDSA_SIGN_P521",
- [138] = "KDSA_EDDSA_VERIFY_ED25519",
- [139] = "KDSA_EDDSA_VERIFY_ED448",
- [140] = "KDSA_EDDSA_SIGN_ED25519",
- [141] = "KDSA_EDDSA_SIGN_ED448",
- [142] = "KDSA_ENCRYPTED_EDDSA_SIGN_ED25519",
- [143] = "KDSA_ENCRYPTED_EDDSA_SIGN_ED448",
- [144] = "PCKMO_ENCRYPT_DEA_KEY",
- [145] = "PCKMO_ENCRYPT_TDEA_128_KEY",
- [146] = "PCKMO_ENCRYPT_TDEA_192_KEY",
- [147] = "PCKMO_ENCRYPT_AES_128_KEY",
- [148] = "PCKMO_ENCRYPT_AES_192_KEY",
- [149] = "PCKMO_ENCRYPT_AES_256_KEY",
- [150] = "PCKMO_ENCRYPT_ECC_P256_KEY",
- [151] = "PCKMO_ENCRYPT_ECC_P384_KEY",
- [152] = "PCKMO_ENCRYPT_ECC_P521_KEY",
- [153] = "PCKMO_ENCRYPT_ECC_ED25519_KEY",
- [154] = "PCKMO_ENCRYPT_ECC_ED448_KEY",
- [155] = "IBM_RESERVED_155",
- [156] = "IBM_RESERVED_156",
- [157] = "KM_FULL_XTS_AES_128",
- [158] = "KM_FULL_XTS_AES_256",
- [159] = "KM_FULL_XTS_ENCRYPTED_AES_128",
- [160] = "KM_FULL_XTS_ENCRYPTED_AES_256",
- [161] = "KMAC_HMAC_SHA_224",
- [162] = "KMAC_HMAC_SHA_256",
- [163] = "KMAC_HMAC_SHA_384",
- [164] = "KMAC_HMAC_SHA_512",
- [165] = "KMAC_HMAC_ENCRYPTED_SHA_224",
- [166] = "KMAC_HMAC_ENCRYPTED_SHA_256",
- [167] = "KMAC_HMAC_ENCRYPTED_SHA_384",
- [168] = "KMAC_HMAC_ENCRYPTED_SHA_512",
- [169] = "PCKMO_ENCRYPT_HMAC_512_KEY",
- [170] = "PCKMO_ENCRYPT_HMAC_1024_KEY",
- [171] = "PCKMO_ENCRYPT_AES_XTS_128",
- [172] = "PCKMO_ENCRYPT_AES_XTS_256",
-};
-
-static void __init attr_event_free(struct attribute **attrs, int num)
-{
- struct perf_pmu_events_attr *pa;
- int i;
-
- for (i = 0; i < num; i++) {
- struct device_attribute *dap;
-
- dap = container_of(attrs[i], struct device_attribute, attr);
- pa = container_of(dap, struct perf_pmu_events_attr, attr);
- kfree(pa);
- }
- kfree(attrs);
-}
-
-static int __init attr_event_init_one(struct attribute **attrs, int num)
-{
- struct perf_pmu_events_attr *pa;
-
- /* Index larger than array_size, no counter name available */
- if (num >= ARRAY_SIZE(paicrypt_ctrnames)) {
- attrs[num] = NULL;
- return 0;
- }
-
- pa = kzalloc(sizeof(*pa), GFP_KERNEL);
- if (!pa)
- return -ENOMEM;
-
- sysfs_attr_init(&pa->attr.attr);
- pa->id = PAI_CRYPTO_BASE + num;
- pa->attr.attr.name = paicrypt_ctrnames[num];
- pa->attr.attr.mode = 0444;
- pa->attr.show = cpumf_events_sysfs_show;
- pa->attr.store = NULL;
- attrs[num] = &pa->attr.attr;
- return 0;
-}
-
-/* Create PMU sysfs event attributes on the fly. */
-static int __init attr_event_init(void)
-{
- struct attribute **attrs;
- int ret, i;
-
- attrs = kmalloc_array(paicrypt_cnt + 2, sizeof(*attrs), GFP_KERNEL);
- if (!attrs)
- return -ENOMEM;
- for (i = 0; i <= paicrypt_cnt; i++) {
- ret = attr_event_init_one(attrs, i);
- if (ret) {
- attr_event_free(attrs, i);
- return ret;
- }
- }
- attrs[i] = NULL;
- paicrypt_events_group.attrs = attrs;
- return 0;
-}
-
-static int __init paicrypt_init(void)
-{
- struct qpaci_info_block ib;
- int rc;
-
- if (!test_facility(196))
- return 0;
-
- qpaci(&ib);
- paicrypt_cnt = ib.num_cc;
- if (paicrypt_cnt == 0)
- return 0;
- if (paicrypt_cnt >= PAI_CRYPTO_MAXCTR) {
- pr_err("Too many PMU pai_crypto counters %d\n", paicrypt_cnt);
- return -E2BIG;
- }
-
- rc = attr_event_init(); /* Export known PAI crypto events */
- if (rc) {
- pr_err("Creation of PMU pai_crypto /sysfs failed\n");
- return rc;
- }
-
- /* Setup s390dbf facility */
- cfm_dbg = debug_register(KMSG_COMPONENT, 2, 256, 128);
- if (!cfm_dbg) {
- pr_err("Registration of s390dbf pai_crypto failed\n");
- return -ENOMEM;
- }
- debug_register_view(cfm_dbg, &debug_sprintf_view);
-
- rc = perf_pmu_register(&paicrypt, "pai_crypto", -1);
- if (rc) {
- pr_err("Registering the pai_crypto PMU failed with rc=%i\n",
- rc);
- debug_unregister_view(cfm_dbg, &debug_sprintf_view);
- debug_unregister(cfm_dbg);
- return rc;
- }
- return 0;
-}
-
-device_initcall(paicrypt_init);
diff --git a/arch/s390/kernel/perf_pai_ext.c b/arch/s390/kernel/perf_pai_ext.c
deleted file mode 100644
index 7b32935273ce..000000000000
--- a/arch/s390/kernel/perf_pai_ext.c
+++ /dev/null
@@ -1,756 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Performance event support - Processor Activity Instrumentation Extension
- * Facility
- *
- * Copyright IBM Corp. 2022
- * Author(s): Thomas Richter <tmricht@linux.ibm.com>
- */
-#define KMSG_COMPONENT "pai_ext"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/kernel_stat.h>
-#include <linux/percpu.h>
-#include <linux/notifier.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/perf_event.h>
-#include <asm/ctlreg.h>
-#include <asm/pai.h>
-#include <asm/debug.h>
-
-#define PAIE1_CB_SZ 0x200 /* Size of PAIE1 control block */
-#define PAIE1_CTRBLOCK_SZ 0x400 /* Size of PAIE1 counter blocks */
-
-static debug_info_t *paiext_dbg;
-static unsigned int paiext_cnt; /* Extracted with QPACI instruction */
-
-struct pai_userdata {
- u16 num;
- u64 value;
-} __packed;
-
-/* Create the PAI extension 1 control block area.
- * The PAI extension control block 1 is pointed to by lowcore
- * address 0x1508 for each CPU. This control block is 512 bytes in size
- * and requires a 512 byte boundary alignment.
- */
-struct paiext_cb { /* PAI extension 1 control block */
- u64 header; /* Not used */
- u64 reserved1;
- u64 acc; /* Addr to analytics counter control block */
- u8 reserved2[488];
-} __packed;
-
-struct paiext_map {
- unsigned long *area; /* Area for CPU to store counters */
- struct pai_userdata *save; /* Area to store non-zero counters */
- unsigned int active_events; /* # of PAI Extension users */
- refcount_t refcnt;
- struct perf_event *event; /* Perf event for sampling */
- struct paiext_cb *paiext_cb; /* PAI extension control block area */
- struct list_head syswide_list; /* List system-wide sampling events */
-};
-
-struct paiext_mapptr {
- struct paiext_map *mapptr;
-};
-
-static struct paiext_root { /* Anchor to per CPU data */
- refcount_t refcnt; /* Overall active events */
- struct paiext_mapptr __percpu *mapptr;
-} paiext_root;
-
-/* Free per CPU data when the last event is removed. */
-static void paiext_root_free(void)
-{
- if (refcount_dec_and_test(&paiext_root.refcnt)) {
- free_percpu(paiext_root.mapptr);
- paiext_root.mapptr = NULL;
- }
- debug_sprintf_event(paiext_dbg, 5, "%s root.refcount %d\n", __func__,
- refcount_read(&paiext_root.refcnt));
-}
-
-/* On initialization of first event also allocate per CPU data dynamically.
- * Start with an array of pointers, the array size is the maximum number of
- * CPUs possible, which might be larger than the number of CPUs currently
- * online.
- */
-static int paiext_root_alloc(void)
-{
- if (!refcount_inc_not_zero(&paiext_root.refcnt)) {
- /* The memory is already zeroed. */
- paiext_root.mapptr = alloc_percpu(struct paiext_mapptr);
- if (!paiext_root.mapptr) {
- /* Returning without refcnt adjustment is ok. The
- * error code is handled by paiext_alloc() which
- * decrements refcnt when an event can not be
- * created.
- */
- return -ENOMEM;
- }
- refcount_set(&paiext_root.refcnt, 1);
- }
- return 0;
-}
-
-/* Protects against concurrent increment of sampler and counter member
- * increments at the same time and prohibits concurrent execution of
- * counting and sampling events.
- * Ensures that analytics counter block is deallocated only when the
- * sampling and counting on that cpu is zero.
- * For details see paiext_alloc().
- */
-static DEFINE_MUTEX(paiext_reserve_mutex);
-
-/* Free all memory allocated for event counting/sampling setup */
-static void paiext_free(struct paiext_mapptr *mp)
-{
- kfree(mp->mapptr->area);
- kfree(mp->mapptr->paiext_cb);
- kvfree(mp->mapptr->save);
- kfree(mp->mapptr);
- mp->mapptr = NULL;
-}
-
-/* Release the PMU if event is the last perf event */
-static void paiext_event_destroy_cpu(struct perf_event *event, int cpu)
-{
- struct paiext_mapptr *mp = per_cpu_ptr(paiext_root.mapptr, cpu);
- struct paiext_map *cpump = mp->mapptr;
-
- mutex_lock(&paiext_reserve_mutex);
- if (refcount_dec_and_test(&cpump->refcnt)) /* Last reference gone */
- paiext_free(mp);
- paiext_root_free();
- mutex_unlock(&paiext_reserve_mutex);
-}
-
-static void paiext_event_destroy(struct perf_event *event)
-{
- int cpu;
-
- free_page(PAI_SAVE_AREA(event));
- if (event->cpu == -1) {
- struct cpumask *mask = PAI_CPU_MASK(event);
-
- for_each_cpu(cpu, mask)
- paiext_event_destroy_cpu(event, cpu);
- kfree(mask);
- } else {
- paiext_event_destroy_cpu(event, event->cpu);
- }
- debug_sprintf_event(paiext_dbg, 4, "%s cpu %d\n", __func__,
- event->cpu);
-}
-
-/* Used to avoid races in checking concurrent access of counting and
- * sampling for pai_extension events.
- *
- * Only one instance of event pai_ext/NNPA_ALL/ for sampling is
- * allowed and when this event is running, no counting event is allowed.
- * Several counting events are allowed in parallel, but no sampling event
- * is allowed while one (or more) counting events are running.
- *
- * This function is called in process context and it is safe to block.
- * When the event initialization functions fails, no other call back will
- * be invoked.
- *
- * Allocate the memory for the event.
- */
-static int paiext_alloc_cpu(struct perf_event *event, int cpu)
-{
- struct paiext_mapptr *mp;
- struct paiext_map *cpump;
- int rc;
-
- mutex_lock(&paiext_reserve_mutex);
- rc = paiext_root_alloc();
- if (rc)
- goto unlock;
-
- mp = per_cpu_ptr(paiext_root.mapptr, cpu);
- cpump = mp->mapptr;
- if (!cpump) { /* Paiext_map allocated? */
- rc = -ENOMEM;
- cpump = kzalloc(sizeof(*cpump), GFP_KERNEL);
- if (!cpump)
- goto undo;
-
- /* Allocate memory for counter area and counter extraction.
- * These are
- * - a 512 byte block and requires 512 byte boundary alignment.
- * - a 1KB byte block and requires 1KB boundary alignment.
- * Only the first counting event has to allocate the area.
- *
- * Note: This works with commit 59bb47985c1d by default.
- * Backporting this to kernels without this commit might
- * need adjustment.
- */
- mp->mapptr = cpump;
- cpump->area = kzalloc(PAIE1_CTRBLOCK_SZ, GFP_KERNEL);
- cpump->paiext_cb = kzalloc(PAIE1_CB_SZ, GFP_KERNEL);
- cpump->save = kvmalloc_array(paiext_cnt + 1,
- sizeof(struct pai_userdata),
- GFP_KERNEL);
- if (!cpump->save || !cpump->area || !cpump->paiext_cb) {
- paiext_free(mp);
- goto undo;
- }
- INIT_LIST_HEAD(&cpump->syswide_list);
- refcount_set(&cpump->refcnt, 1);
- rc = 0;
- } else {
- refcount_inc(&cpump->refcnt);
- }
-
-undo:
- if (rc) {
- /* Error in allocation of event, decrement anchor. Since
- * the event in not created, its destroy() function is never
- * invoked. Adjust the reference counter for the anchor.
- */
- paiext_root_free();
- }
-unlock:
- mutex_unlock(&paiext_reserve_mutex);
- /* If rc is non-zero, no increment of counter/sampler was done. */
- return rc;
-}
-
-static int paiext_alloc(struct perf_event *event)
-{
- struct cpumask *maskptr;
- int cpu, rc = -ENOMEM;
-
- maskptr = kzalloc(sizeof(*maskptr), GFP_KERNEL);
- if (!maskptr)
- goto out;
-
- for_each_online_cpu(cpu) {
- rc = paiext_alloc_cpu(event, cpu);
- if (rc) {
- for_each_cpu(cpu, maskptr)
- paiext_event_destroy_cpu(event, cpu);
- kfree(maskptr);
- goto out;
- }
- cpumask_set_cpu(cpu, maskptr);
- }
-
- /*
- * On error all cpumask are freed and all events have been destroyed.
- * Save of which CPUs data structures have been allocated for.
- * Release them in paicrypt_event_destroy call back function
- * for this event.
- */
- PAI_CPU_MASK(event) = maskptr;
- rc = 0;
-out:
- return rc;
-}
-
-/* The PAI extension 1 control block supports up to 128 entries. Return
- * the index within PAIE1_CB given the event number. Also validate event
- * number.
- */
-static int paiext_event_valid(struct perf_event *event)
-{
- u64 cfg = event->attr.config;
-
- if (cfg >= PAI_NNPA_BASE && cfg <= PAI_NNPA_BASE + paiext_cnt) {
- /* Offset NNPA in paiext_cb */
- event->hw.config_base = offsetof(struct paiext_cb, acc);
- return 0;
- }
- return -ENOENT;
-}
-
-/* Might be called on different CPU than the one the event is intended for. */
-static int paiext_event_init(struct perf_event *event)
-{
- struct perf_event_attr *a = &event->attr;
- int rc;
-
- /* PMU pai_ext registered as PERF_TYPE_RAW, check event type */
- if (a->type != PERF_TYPE_RAW && event->pmu->type != a->type)
- return -ENOENT;
- /* PAI extension event must be valid and in supported range */
- rc = paiext_event_valid(event);
- if (rc)
- return rc;
- /* Allow only event NNPA_ALL for sampling. */
- if (a->sample_period && a->config != PAI_NNPA_BASE)
- return -EINVAL;
- /* Prohibit exclude_user event selection */
- if (a->exclude_user)
- return -EINVAL;
- /* Get a page to store last counter values for sampling */
- if (a->sample_period) {
- PAI_SAVE_AREA(event) = get_zeroed_page(GFP_KERNEL);
- if (!PAI_SAVE_AREA(event))
- return -ENOMEM;
- }
-
- if (event->cpu >= 0)
- rc = paiext_alloc_cpu(event, event->cpu);
- else
- rc = paiext_alloc(event);
- if (rc) {
- free_page(PAI_SAVE_AREA(event));
- return rc;
- }
- event->destroy = paiext_event_destroy;
-
- if (a->sample_period) {
- a->sample_period = 1;
- a->freq = 0;
- /* Register for paicrypt_sched_task() to be called */
- event->attach_state |= PERF_ATTACH_SCHED_CB;
- /* Add raw data which are the memory mapped counters */
- a->sample_type |= PERF_SAMPLE_RAW;
- /* Turn off inheritance */
- a->inherit = 0;
- }
-
- return 0;
-}
-
-static u64 paiext_getctr(unsigned long *area, int nr)
-{
- return area[nr];
-}
-
-/* Read the counter values. Return value from location in buffer. For event
- * NNPA_ALL sum up all events.
- */
-static u64 paiext_getdata(struct perf_event *event)
-{
- struct paiext_mapptr *mp = this_cpu_ptr(paiext_root.mapptr);
- struct paiext_map *cpump = mp->mapptr;
- u64 sum = 0;
- int i;
-
- if (event->attr.config != PAI_NNPA_BASE)
- return paiext_getctr(cpump->area,
- event->attr.config - PAI_NNPA_BASE);
-
- for (i = 1; i <= paiext_cnt; i++)
- sum += paiext_getctr(cpump->area, i);
-
- return sum;
-}
-
-static u64 paiext_getall(struct perf_event *event)
-{
- return paiext_getdata(event);
-}
-
-static void paiext_read(struct perf_event *event)
-{
- u64 prev, new, delta;
-
- prev = local64_read(&event->hw.prev_count);
- new = paiext_getall(event);
- local64_set(&event->hw.prev_count, new);
- delta = new - prev;
- local64_add(delta, &event->count);
-}
-
-static void paiext_start(struct perf_event *event, int flags)
-{
- struct paiext_mapptr *mp = this_cpu_ptr(paiext_root.mapptr);
- struct paiext_map *cpump = mp->mapptr;
- u64 sum;
-
- if (!event->attr.sample_period) { /* Counting */
- sum = paiext_getall(event); /* Get current value */
- local64_set(&event->hw.prev_count, sum);
- } else { /* Sampling */
- memcpy((void *)PAI_SAVE_AREA(event), cpump->area,
- PAIE1_CTRBLOCK_SZ);
- /* Enable context switch callback for system-wide sampling */
- if (!(event->attach_state & PERF_ATTACH_TASK)) {
- list_add_tail(PAI_SWLIST(event), &cpump->syswide_list);
- perf_sched_cb_inc(event->pmu);
- } else {
- cpump->event = event;
- }
- }
-}
-
-static int paiext_add(struct perf_event *event, int flags)
-{
- struct paiext_mapptr *mp = this_cpu_ptr(paiext_root.mapptr);
- struct paiext_map *cpump = mp->mapptr;
- struct paiext_cb *pcb = cpump->paiext_cb;
-
- if (++cpump->active_events == 1) {
- get_lowcore()->aicd = virt_to_phys(cpump->paiext_cb);
- pcb->acc = virt_to_phys(cpump->area) | 0x1;
- /* Enable CPU instruction lookup for PAIE1 control block */
- local_ctl_set_bit(0, CR0_PAI_EXTENSION_BIT);
- }
- if (flags & PERF_EF_START)
- paiext_start(event, PERF_EF_RELOAD);
- event->hw.state = 0;
- return 0;
-}
-
-static void paiext_have_sample(struct perf_event *, struct paiext_map *);
-static void paiext_stop(struct perf_event *event, int flags)
-{
- struct paiext_mapptr *mp = this_cpu_ptr(paiext_root.mapptr);
- struct paiext_map *cpump = mp->mapptr;
-
- if (!event->attr.sample_period) { /* Counting */
- paiext_read(event);
- } else { /* Sampling */
- if (!(event->attach_state & PERF_ATTACH_TASK)) {
- list_del(PAI_SWLIST(event));
- perf_sched_cb_dec(event->pmu);
- } else {
- paiext_have_sample(event, cpump);
- cpump->event = NULL;
- }
- }
- event->hw.state = PERF_HES_STOPPED;
-}
-
-static void paiext_del(struct perf_event *event, int flags)
-{
- struct paiext_mapptr *mp = this_cpu_ptr(paiext_root.mapptr);
- struct paiext_map *cpump = mp->mapptr;
- struct paiext_cb *pcb = cpump->paiext_cb;
-
- paiext_stop(event, PERF_EF_UPDATE);
- if (--cpump->active_events == 0) {
- /* Disable CPU instruction lookup for PAIE1 control block */
- local_ctl_clear_bit(0, CR0_PAI_EXTENSION_BIT);
- pcb->acc = 0;
- get_lowcore()->aicd = 0;
- }
-}
-
-/* Create raw data and save it in buffer. Returns number of bytes copied.
- * Saves only positive counter entries of the form
- * 2 bytes: Number of counter
- * 8 bytes: Value of counter
- */
-static size_t paiext_copy(struct pai_userdata *userdata, unsigned long *area,
- unsigned long *area_old)
-{
- int i, outidx = 0;
-
- for (i = 1; i <= paiext_cnt; i++) {
- u64 val = paiext_getctr(area, i);
- u64 val_old = paiext_getctr(area_old, i);
-
- if (val >= val_old)
- val -= val_old;
- else
- val = (~0ULL - val_old) + val + 1;
- if (val) {
- userdata[outidx].num = i;
- userdata[outidx].value = val;
- outidx++;
- }
- }
- return outidx * sizeof(*userdata);
-}
-
-/* Write sample when one or more counters values are nonzero.
- *
- * Note: The function paiext_sched_task() and paiext_push_sample() are not
- * invoked after function paiext_del() has been called because of function
- * perf_sched_cb_dec().
- * The function paiext_sched_task() and paiext_push_sample() are only
- * called when sampling is active. Function perf_sched_cb_inc()
- * has been invoked to install function paiext_sched_task() as call back
- * to run at context switch time (see paiext_add()).
- *
- * This causes function perf_event_context_sched_out() and
- * perf_event_context_sched_in() to check whether the PMU has installed an
- * sched_task() callback. That callback is not active after paiext_del()
- * returns and has deleted the event on that CPU.
- */
-static int paiext_push_sample(size_t rawsize, struct paiext_map *cpump,
- struct perf_event *event)
-{
- struct perf_sample_data data;
- struct perf_raw_record raw;
- struct pt_regs regs;
- int overflow;
-
- /* Setup perf sample */
- memset(&regs, 0, sizeof(regs));
- memset(&raw, 0, sizeof(raw));
- memset(&data, 0, sizeof(data));
- perf_sample_data_init(&data, 0, event->hw.last_period);
- if (event->attr.sample_type & PERF_SAMPLE_TID) {
- data.tid_entry.pid = task_tgid_nr(current);
- data.tid_entry.tid = task_pid_nr(current);
- }
- if (event->attr.sample_type & PERF_SAMPLE_TIME)
- data.time = event->clock();
- if (event->attr.sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER))
- data.id = event->id;
- if (event->attr.sample_type & PERF_SAMPLE_CPU)
- data.cpu_entry.cpu = smp_processor_id();
- if (event->attr.sample_type & PERF_SAMPLE_RAW) {
- raw.frag.size = rawsize;
- raw.frag.data = cpump->save;
- perf_sample_save_raw_data(&data, event, &raw);
- }
-
- overflow = perf_event_overflow(event, &data, &regs);
- perf_event_update_userpage(event);
- /* Save NNPA lowcore area after read in event */
- memcpy((void *)PAI_SAVE_AREA(event), cpump->area,
- PAIE1_CTRBLOCK_SZ);
- return overflow;
-}
-
-/* Check if there is data to be saved on schedule out of a task. */
-static void paiext_have_sample(struct perf_event *event,
- struct paiext_map *cpump)
-{
- size_t rawsize;
-
- if (!event)
- return;
- rawsize = paiext_copy(cpump->save, cpump->area,
- (unsigned long *)PAI_SAVE_AREA(event));
- if (rawsize) /* Incremented counters */
- paiext_push_sample(rawsize, cpump, event);
-}
-
-/* Check if there is data to be saved on schedule out of a task. */
-static void paiext_have_samples(void)
-{
- struct paiext_mapptr *mp = this_cpu_ptr(paiext_root.mapptr);
- struct paiext_map *cpump = mp->mapptr;
- struct perf_event *event;
-
- list_for_each_entry(event, &cpump->syswide_list, hw.tp_list)
- paiext_have_sample(event, cpump);
-}
-
-/* Called on schedule-in and schedule-out. No access to event structure,
- * but for sampling only event NNPA_ALL is allowed.
- */
-static void paiext_sched_task(struct perf_event_pmu_context *pmu_ctx,
- struct task_struct *task, bool sched_in)
-{
- /* We started with a clean page on event installation. So read out
- * results on schedule_out and if page was dirty, save old values.
- */
- if (!sched_in)
- paiext_have_samples();
-}
-
-/* Attribute definitions for pai extension1 interface. As with other CPU
- * Measurement Facilities, there is one attribute per mapped counter.
- * The number of mapped counters may vary per machine generation. Use
- * the QUERY PROCESSOR ACTIVITY COUNTER INFORMATION (QPACI) instruction
- * to determine the number of mapped counters. The instructions returns
- * a positive number, which is the highest number of supported counters.
- * All counters less than this number are also supported, there are no
- * holes. A returned number of zero means no support for mapped counters.
- *
- * The identification of the counter is a unique number. The chosen range
- * is 0x1800 + offset in mapped kernel page.
- * All CPU Measurement Facility counters identifiers must be unique and
- * the numbers from 0 to 496 are already used for the CPU Measurement
- * Counter facility. Number 0x1000 to 0x103e are used for PAI cryptography
- * counters.
- * Numbers 0xb0000, 0xbc000 and 0xbd000 are already
- * used for the CPU Measurement Sampling facility.
- */
-PMU_FORMAT_ATTR(event, "config:0-63");
-
-static struct attribute *paiext_format_attr[] = {
- &format_attr_event.attr,
- NULL,
-};
-
-static struct attribute_group paiext_events_group = {
- .name = "events",
- .attrs = NULL, /* Filled in attr_event_init() */
-};
-
-static struct attribute_group paiext_format_group = {
- .name = "format",
- .attrs = paiext_format_attr,
-};
-
-static const struct attribute_group *paiext_attr_groups[] = {
- &paiext_events_group,
- &paiext_format_group,
- NULL,
-};
-
-/* Performance monitoring unit for mapped counters */
-static struct pmu paiext = {
- .task_ctx_nr = perf_hw_context,
- .event_init = paiext_event_init,
- .add = paiext_add,
- .del = paiext_del,
- .start = paiext_start,
- .stop = paiext_stop,
- .read = paiext_read,
- .sched_task = paiext_sched_task,
- .attr_groups = paiext_attr_groups,
-};
-
-/* List of symbolic PAI extension 1 NNPA counter names. */
-static const char * const paiext_ctrnames[] = {
- [0] = "NNPA_ALL",
- [1] = "NNPA_ADD",
- [2] = "NNPA_SUB",
- [3] = "NNPA_MUL",
- [4] = "NNPA_DIV",
- [5] = "NNPA_MIN",
- [6] = "NNPA_MAX",
- [7] = "NNPA_LOG",
- [8] = "NNPA_EXP",
- [9] = "NNPA_IBM_RESERVED_9",
- [10] = "NNPA_RELU",
- [11] = "NNPA_TANH",
- [12] = "NNPA_SIGMOID",
- [13] = "NNPA_SOFTMAX",
- [14] = "NNPA_BATCHNORM",
- [15] = "NNPA_MAXPOOL2D",
- [16] = "NNPA_AVGPOOL2D",
- [17] = "NNPA_LSTMACT",
- [18] = "NNPA_GRUACT",
- [19] = "NNPA_CONVOLUTION",
- [20] = "NNPA_MATMUL_OP",
- [21] = "NNPA_MATMUL_OP_BCAST23",
- [22] = "NNPA_SMALLBATCH",
- [23] = "NNPA_LARGEDIM",
- [24] = "NNPA_SMALLTENSOR",
- [25] = "NNPA_1MFRAME",
- [26] = "NNPA_2GFRAME",
- [27] = "NNPA_ACCESSEXCEPT",
- [28] = "NNPA_TRANSFORM",
- [29] = "NNPA_GELU",
- [30] = "NNPA_MOMENTS",
- [31] = "NNPA_LAYERNORM",
- [32] = "NNPA_MATMUL_OP_BCAST1",
- [33] = "NNPA_SQRT",
- [34] = "NNPA_INVSQRT",
- [35] = "NNPA_NORM",
- [36] = "NNPA_REDUCE",
-};
-
-static void __init attr_event_free(struct attribute **attrs, int num)
-{
- struct perf_pmu_events_attr *pa;
- struct device_attribute *dap;
- int i;
-
- for (i = 0; i < num; i++) {
- dap = container_of(attrs[i], struct device_attribute, attr);
- pa = container_of(dap, struct perf_pmu_events_attr, attr);
- kfree(pa);
- }
- kfree(attrs);
-}
-
-static int __init attr_event_init_one(struct attribute **attrs, int num)
-{
- struct perf_pmu_events_attr *pa;
-
- /* Index larger than array_size, no counter name available */
- if (num >= ARRAY_SIZE(paiext_ctrnames)) {
- attrs[num] = NULL;
- return 0;
- }
-
- pa = kzalloc(sizeof(*pa), GFP_KERNEL);
- if (!pa)
- return -ENOMEM;
-
- sysfs_attr_init(&pa->attr.attr);
- pa->id = PAI_NNPA_BASE + num;
- pa->attr.attr.name = paiext_ctrnames[num];
- pa->attr.attr.mode = 0444;
- pa->attr.show = cpumf_events_sysfs_show;
- pa->attr.store = NULL;
- attrs[num] = &pa->attr.attr;
- return 0;
-}
-
-/* Create PMU sysfs event attributes on the fly. */
-static int __init attr_event_init(void)
-{
- struct attribute **attrs;
- int ret, i;
-
- attrs = kmalloc_array(paiext_cnt + 2, sizeof(*attrs), GFP_KERNEL);
- if (!attrs)
- return -ENOMEM;
- for (i = 0; i <= paiext_cnt; i++) {
- ret = attr_event_init_one(attrs, i);
- if (ret) {
- attr_event_free(attrs, i);
- return ret;
- }
- }
- attrs[i] = NULL;
- paiext_events_group.attrs = attrs;
- return 0;
-}
-
-static int __init paiext_init(void)
-{
- struct qpaci_info_block ib;
- int rc = -ENOMEM;
-
- if (!test_facility(197))
- return 0;
-
- qpaci(&ib);
- paiext_cnt = ib.num_nnpa;
- if (paiext_cnt >= PAI_NNPA_MAXCTR)
- paiext_cnt = PAI_NNPA_MAXCTR;
- if (!paiext_cnt)
- return 0;
-
- rc = attr_event_init();
- if (rc) {
- pr_err("Creation of PMU " KMSG_COMPONENT " /sysfs failed\n");
- return rc;
- }
-
- /* Setup s390dbf facility */
- paiext_dbg = debug_register(KMSG_COMPONENT, 2, 256, 128);
- if (!paiext_dbg) {
- pr_err("Registration of s390dbf " KMSG_COMPONENT " failed\n");
- rc = -ENOMEM;
- goto out_init;
- }
- debug_register_view(paiext_dbg, &debug_sprintf_view);
-
- rc = perf_pmu_register(&paiext, KMSG_COMPONENT, -1);
- if (rc) {
- pr_err("Registration of " KMSG_COMPONENT " PMU failed with "
- "rc=%i\n", rc);
- goto out_pmu;
- }
-
- return 0;
-
-out_pmu:
- debug_unregister_view(paiext_dbg, &debug_sprintf_view);
- debug_unregister(paiext_dbg);
-out_init:
- attr_event_free(paiext_events_group.attrs,
- ARRAY_SIZE(paiext_ctrnames) + 1);
- return rc;
-}
-
-device_initcall(paiext_init);
diff --git a/arch/s390/kernel/perf_regs.c b/arch/s390/kernel/perf_regs.c
index a6b058ee4a36..7b305f1456f8 100644
--- a/arch/s390/kernel/perf_regs.c
+++ b/arch/s390/kernel/perf_regs.c
@@ -44,9 +44,6 @@ int perf_reg_validate(u64 mask)
u64 perf_reg_abi(struct task_struct *task)
{
- if (test_tsk_thread_flag(task, TIF_31BIT))
- return PERF_SAMPLE_REGS_ABI_32;
-
return PERF_SAMPLE_REGS_ABI_64;
}
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index b107dbca4ed7..0df95dcb2101 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -24,7 +24,6 @@
#include <linux/tick.h>
#include <linux/personality.h>
#include <linux/syscalls.h>
-#include <linux/compat.h>
#include <linux/kprobes.h>
#include <linux/random.h>
#include <linux/init_task.h>
@@ -166,12 +165,8 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
/* Set a new TLS ? */
if (clone_flags & CLONE_SETTLS) {
- if (is_compat_task()) {
- p->thread.acrs[0] = (unsigned int)tls;
- } else {
- p->thread.acrs[0] = (unsigned int)(tls >> 32);
- p->thread.acrs[1] = (unsigned int)tls;
- }
+ p->thread.acrs[0] = (unsigned int)(tls >> 32);
+ p->thread.acrs[1] = (unsigned int)tls;
}
/*
* s390 stores the svc return address in arch_data when calling
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index 11f70c1e2797..e33a3eccda56 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -4,8 +4,7 @@
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*/
-#define KMSG_COMPONENT "cpu"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#define pr_fmt(fmt) "cpu: " fmt
#include <linux/stop_machine.h>
#include <linux/cpufeature.h>
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 494216c4b4f3..ceaa1726e328 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -22,7 +22,6 @@
#include <linux/elf.h>
#include <linux/regset.h>
#include <linux/seccomp.h>
-#include <linux/compat.h>
#include <trace/syscall.h>
#include <asm/guarded_storage.h>
#include <asm/access-regs.h>
@@ -38,10 +37,6 @@
#include "entry.h"
-#ifdef CONFIG_COMPAT
-#include "compat_ptrace.h"
-#endif
-
void update_cr_regs(struct task_struct *task)
{
struct pt_regs *regs = task_pt_regs(task);
@@ -507,308 +502,6 @@ long arch_ptrace(struct task_struct *child, long request,
}
}
-#ifdef CONFIG_COMPAT
-/*
- * Now the fun part starts... a 31 bit program running in the
- * 31 bit emulation tracing another program. PTRACE_PEEKTEXT,
- * PTRACE_PEEKDATA, PTRACE_POKETEXT and PTRACE_POKEDATA are easy
- * to handle, the difference to the 64 bit versions of the requests
- * is that the access is done in multiples of 4 byte instead of
- * 8 bytes (sizeof(unsigned long) on 31/64 bit).
- * The ugly part are PTRACE_PEEKUSR, PTRACE_PEEKUSR_AREA,
- * PTRACE_POKEUSR and PTRACE_POKEUSR_AREA. If the traced program
- * is a 31 bit program too, the content of struct user can be
- * emulated. A 31 bit program peeking into the struct user of
- * a 64 bit program is a no-no.
- */
-
-/*
- * Same as peek_user_per but for a 31 bit program.
- */
-static inline __u32 __peek_user_per_compat(struct task_struct *child,
- addr_t addr)
-{
- if (addr == offsetof(struct compat_per_struct_kernel, cr9))
- /* Control bits of the active per set. */
- return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
- PER_EVENT_IFETCH : child->thread.per_user.control;
- else if (addr == offsetof(struct compat_per_struct_kernel, cr10))
- /* Start address of the active per set. */
- return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
- 0 : child->thread.per_user.start;
- else if (addr == offsetof(struct compat_per_struct_kernel, cr11))
- /* End address of the active per set. */
- return test_thread_flag(TIF_SINGLE_STEP) ?
- PSW32_ADDR_INSN : child->thread.per_user.end;
- else if (addr == offsetof(struct compat_per_struct_kernel, bits))
- /* Single-step bit. */
- return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
- 0x80000000 : 0;
- else if (addr == offsetof(struct compat_per_struct_kernel, starting_addr))
- /* Start address of the user specified per set. */
- return (__u32) child->thread.per_user.start;
- else if (addr == offsetof(struct compat_per_struct_kernel, ending_addr))
- /* End address of the user specified per set. */
- return (__u32) child->thread.per_user.end;
- else if (addr == offsetof(struct compat_per_struct_kernel, perc_atmid))
- /* PER code, ATMID and AI of the last PER trap */
- return (__u32) child->thread.per_event.cause << 16;
- else if (addr == offsetof(struct compat_per_struct_kernel, address))
- /* Address of the last PER trap */
- return (__u32) child->thread.per_event.address;
- else if (addr == offsetof(struct compat_per_struct_kernel, access_id))
- /* Access id of the last PER trap */
- return (__u32) child->thread.per_event.paid << 24;
- return 0;
-}
-
-/*
- * Same as peek_user but for a 31 bit program.
- */
-static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
-{
- addr_t offset;
- __u32 tmp;
-
- if (addr < offsetof(struct compat_user, regs.acrs)) {
- struct pt_regs *regs = task_pt_regs(child);
- /*
- * psw and gprs are stored on the stack
- */
- if (addr == offsetof(struct compat_user, regs.psw.mask)) {
- /* Fake a 31 bit psw mask. */
- tmp = (__u32)(regs->psw.mask >> 32);
- tmp &= PSW32_MASK_USER | PSW32_MASK_RI;
- tmp |= PSW32_USER_BITS;
- } else if (addr == offsetof(struct compat_user, regs.psw.addr)) {
- /* Fake a 31 bit psw address. */
- tmp = (__u32) regs->psw.addr |
- (__u32)(regs->psw.mask & PSW_MASK_BA);
- } else {
- /* gpr 0-15 */
- tmp = *(__u32 *)((addr_t) &regs->psw + addr*2 + 4);
- }
- } else if (addr < offsetof(struct compat_user, regs.orig_gpr2)) {
- /*
- * access registers are stored in the thread structure
- */
- offset = addr - offsetof(struct compat_user, regs.acrs);
- tmp = *(__u32*)((addr_t) &child->thread.acrs + offset);
-
- } else if (addr == offsetof(struct compat_user, regs.orig_gpr2)) {
- /*
- * orig_gpr2 is stored on the kernel stack
- */
- tmp = *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4);
-
- } else if (addr < offsetof(struct compat_user, regs.fp_regs)) {
- /*
- * prevent reads of padding hole between
- * orig_gpr2 and fp_regs on s390.
- */
- tmp = 0;
-
- } else if (addr == offsetof(struct compat_user, regs.fp_regs.fpc)) {
- /*
- * floating point control reg. is in the thread structure
- */
- tmp = child->thread.ufpu.fpc;
-
- } else if (addr < offsetof(struct compat_user, regs.fp_regs) + sizeof(s390_fp_regs)) {
- /*
- * floating point regs. are in the child->thread.ufpu.vxrs array
- */
- offset = addr - offsetof(struct compat_user, regs.fp_regs.fprs);
- tmp = *(__u32 *)((addr_t)child->thread.ufpu.vxrs + 2 * offset);
- } else if (addr < offsetof(struct compat_user, regs.per_info) + sizeof(struct compat_per_struct_kernel)) {
- /*
- * Handle access to the per_info structure.
- */
- addr -= offsetof(struct compat_user, regs.per_info);
- tmp = __peek_user_per_compat(child, addr);
-
- } else
- tmp = 0;
-
- return tmp;
-}
-
-static int peek_user_compat(struct task_struct *child,
- addr_t addr, addr_t data)
-{
- __u32 tmp;
-
- if (!is_compat_task() || (addr & 3) || addr > sizeof(struct user) - 3)
- return -EIO;
-
- tmp = __peek_user_compat(child, addr);
- return put_user(tmp, (__u32 __user *) data);
-}
-
-/*
- * Same as poke_user_per but for a 31 bit program.
- */
-static inline void __poke_user_per_compat(struct task_struct *child,
- addr_t addr, __u32 data)
-{
- if (addr == offsetof(struct compat_per_struct_kernel, cr9))
- /* PER event mask of the user specified per set. */
- child->thread.per_user.control =
- data & (PER_EVENT_MASK | PER_CONTROL_MASK);
- else if (addr == offsetof(struct compat_per_struct_kernel, starting_addr))
- /* Starting address of the user specified per set. */
- child->thread.per_user.start = data;
- else if (addr == offsetof(struct compat_per_struct_kernel, ending_addr))
- /* Ending address of the user specified per set. */
- child->thread.per_user.end = data;
-}
-
-/*
- * Same as poke_user but for a 31 bit program.
- */
-static int __poke_user_compat(struct task_struct *child,
- addr_t addr, addr_t data)
-{
- __u32 tmp = (__u32) data;
- addr_t offset;
-
- if (addr < offsetof(struct compat_user, regs.acrs)) {
- struct pt_regs *regs = task_pt_regs(child);
- /*
- * psw, gprs, acrs and orig_gpr2 are stored on the stack
- */
- if (addr == offsetof(struct compat_user, regs.psw.mask)) {
- __u32 mask = PSW32_MASK_USER;
-
- mask |= is_ri_task(child) ? PSW32_MASK_RI : 0;
- /* Build a 64 bit psw mask from 31 bit mask. */
- if ((tmp ^ PSW32_USER_BITS) & ~mask)
- /* Invalid psw mask. */
- return -EINVAL;
- if ((data & PSW32_MASK_ASC) == PSW32_ASC_HOME)
- /* Invalid address-space-control bits */
- return -EINVAL;
- regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
- (regs->psw.mask & PSW_MASK_BA) |
- (__u64)(tmp & mask) << 32;
- } else if (addr == offsetof(struct compat_user, regs.psw.addr)) {
- /* Build a 64 bit psw address from 31 bit address. */
- regs->psw.addr = (__u64) tmp & PSW32_ADDR_INSN;
- /* Transfer 31 bit amode bit to psw mask. */
- regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) |
- (__u64)(tmp & PSW32_ADDR_AMODE);
- } else {
- if (test_pt_regs_flag(regs, PIF_SYSCALL) &&
- addr == offsetof(struct compat_user, regs.gprs[2])) {
- struct pt_regs *regs = task_pt_regs(child);
-
- regs->int_code = 0x20000 | (data & 0xffff);
- }
- /* gpr 0-15 */
- *(__u32*)((addr_t) &regs->psw + addr*2 + 4) = tmp;
- }
- } else if (addr < offsetof(struct compat_user, regs.orig_gpr2)) {
- /*
- * access registers are stored in the thread structure
- */
- offset = addr - offsetof(struct compat_user, regs.acrs);
- *(__u32*)((addr_t) &child->thread.acrs + offset) = tmp;
-
- } else if (addr == offsetof(struct compat_user, regs.orig_gpr2)) {
- /*
- * orig_gpr2 is stored on the kernel stack
- */
- *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4) = tmp;
-
- } else if (addr < offsetof(struct compat_user, regs.fp_regs)) {
- /*
- * prevent writess of padding hole between
- * orig_gpr2 and fp_regs on s390.
- */
- return 0;
-
- } else if (addr == offsetof(struct compat_user, regs.fp_regs.fpc)) {
- /*
- * floating point control reg. is in the thread structure
- */
- child->thread.ufpu.fpc = data;
-
- } else if (addr < offsetof(struct compat_user, regs.fp_regs) + sizeof(s390_fp_regs)) {
- /*
- * floating point regs. are in the child->thread.ufpu.vxrs array
- */
- offset = addr - offsetof(struct compat_user, regs.fp_regs.fprs);
- *(__u32 *)((addr_t)child->thread.ufpu.vxrs + 2 * offset) = tmp;
- } else if (addr < offsetof(struct compat_user, regs.per_info) + sizeof(struct compat_per_struct_kernel)) {
- /*
- * Handle access to the per_info structure.
- */
- addr -= offsetof(struct compat_user, regs.per_info);
- __poke_user_per_compat(child, addr, data);
- }
-
- return 0;
-}
-
-static int poke_user_compat(struct task_struct *child,
- addr_t addr, addr_t data)
-{
- if (!is_compat_task() || (addr & 3) ||
- addr > sizeof(struct compat_user) - 3)
- return -EIO;
-
- return __poke_user_compat(child, addr, data);
-}
-
-long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
- compat_ulong_t caddr, compat_ulong_t cdata)
-{
- unsigned long addr = caddr;
- unsigned long data = cdata;
- compat_ptrace_area parea;
- int copied, ret;
-
- switch (request) {
- case PTRACE_PEEKUSR:
- /* read the word at location addr in the USER area. */
- return peek_user_compat(child, addr, data);
-
- case PTRACE_POKEUSR:
- /* write the word at location addr in the USER area */
- return poke_user_compat(child, addr, data);
-
- case PTRACE_PEEKUSR_AREA:
- case PTRACE_POKEUSR_AREA:
- if (copy_from_user(&parea, (void __force __user *) addr,
- sizeof(parea)))
- return -EFAULT;
- addr = parea.kernel_addr;
- data = parea.process_addr;
- copied = 0;
- while (copied < parea.len) {
- if (request == PTRACE_PEEKUSR_AREA)
- ret = peek_user_compat(child, addr, data);
- else {
- __u32 utmp;
- if (get_user(utmp,
- (__u32 __force __user *) data))
- return -EFAULT;
- ret = poke_user_compat(child, addr, utmp);
- }
- if (ret)
- return ret;
- addr += sizeof(unsigned int);
- data += sizeof(unsigned int);
- copied += sizeof(unsigned int);
- }
- return 0;
- case PTRACE_GET_LAST_BREAK:
- return put_user(child->thread.last_break, (unsigned int __user *)data);
- }
- return compat_ptrace_request(child, request, addr, data);
-}
-#endif
-
/*
* user_regset definitions.
*/
@@ -1297,225 +990,8 @@ static const struct user_regset_view user_s390_view = {
.n = ARRAY_SIZE(s390_regsets)
};
-#ifdef CONFIG_COMPAT
-static int s390_compat_regs_get(struct task_struct *target,
- const struct user_regset *regset,
- struct membuf to)
-{
- unsigned n;
-
- if (target == current)
- save_access_regs(target->thread.acrs);
-
- for (n = 0; n < sizeof(s390_compat_regs); n += sizeof(compat_ulong_t))
- membuf_store(&to, __peek_user_compat(target, n));
- return 0;
-}
-
-static int s390_compat_regs_set(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- const void *kbuf, const void __user *ubuf)
-{
- int rc = 0;
-
- if (target == current)
- save_access_regs(target->thread.acrs);
-
- if (kbuf) {
- const compat_ulong_t *k = kbuf;
- while (count > 0 && !rc) {
- rc = __poke_user_compat(target, pos, *k++);
- count -= sizeof(*k);
- pos += sizeof(*k);
- }
- } else {
- const compat_ulong_t __user *u = ubuf;
- while (count > 0 && !rc) {
- compat_ulong_t word;
- rc = __get_user(word, u++);
- if (rc)
- break;
- rc = __poke_user_compat(target, pos, word);
- count -= sizeof(*u);
- pos += sizeof(*u);
- }
- }
-
- if (rc == 0 && target == current)
- restore_access_regs(target->thread.acrs);
-
- return rc;
-}
-
-static int s390_compat_regs_high_get(struct task_struct *target,
- const struct user_regset *regset,
- struct membuf to)
-{
- compat_ulong_t *gprs_high;
- int i;
-
- gprs_high = (compat_ulong_t *)task_pt_regs(target)->gprs;
- for (i = 0; i < NUM_GPRS; i++, gprs_high += 2)
- membuf_store(&to, *gprs_high);
- return 0;
-}
-
-static int s390_compat_regs_high_set(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- const void *kbuf, const void __user *ubuf)
-{
- compat_ulong_t *gprs_high;
- int rc = 0;
-
- gprs_high = (compat_ulong_t *)
- &task_pt_regs(target)->gprs[pos / sizeof(compat_ulong_t)];
- if (kbuf) {
- const compat_ulong_t *k = kbuf;
- while (count > 0) {
- *gprs_high = *k++;
- *gprs_high += 2;
- count -= sizeof(*k);
- }
- } else {
- const compat_ulong_t __user *u = ubuf;
- while (count > 0 && !rc) {
- unsigned long word;
- rc = __get_user(word, u++);
- if (rc)
- break;
- *gprs_high = word;
- *gprs_high += 2;
- count -= sizeof(*u);
- }
- }
-
- return rc;
-}
-
-static int s390_compat_last_break_get(struct task_struct *target,
- const struct user_regset *regset,
- struct membuf to)
-{
- compat_ulong_t last_break = target->thread.last_break;
-
- return membuf_store(&to, (unsigned long)last_break);
-}
-
-static int s390_compat_last_break_set(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- const void *kbuf, const void __user *ubuf)
-{
- return 0;
-}
-
-static const struct user_regset s390_compat_regsets[] = {
- {
- USER_REGSET_NOTE_TYPE(PRSTATUS),
- .n = sizeof(s390_compat_regs) / sizeof(compat_long_t),
- .size = sizeof(compat_long_t),
- .align = sizeof(compat_long_t),
- .regset_get = s390_compat_regs_get,
- .set = s390_compat_regs_set,
- },
- {
- USER_REGSET_NOTE_TYPE(PRFPREG),
- .n = sizeof(s390_fp_regs) / sizeof(compat_long_t),
- .size = sizeof(compat_long_t),
- .align = sizeof(compat_long_t),
- .regset_get = s390_fpregs_get,
- .set = s390_fpregs_set,
- },
- {
- USER_REGSET_NOTE_TYPE(S390_SYSTEM_CALL),
- .n = 1,
- .size = sizeof(compat_uint_t),
- .align = sizeof(compat_uint_t),
- .regset_get = s390_system_call_get,
- .set = s390_system_call_set,
- },
- {
- USER_REGSET_NOTE_TYPE(S390_LAST_BREAK),
- .n = 1,
- .size = sizeof(long),
- .align = sizeof(long),
- .regset_get = s390_compat_last_break_get,
- .set = s390_compat_last_break_set,
- },
- {
- USER_REGSET_NOTE_TYPE(S390_TDB),
- .n = 1,
- .size = 256,
- .align = 1,
- .regset_get = s390_tdb_get,
- .set = s390_tdb_set,
- },
- {
- USER_REGSET_NOTE_TYPE(S390_VXRS_LOW),
- .n = __NUM_VXRS_LOW,
- .size = sizeof(__u64),
- .align = sizeof(__u64),
- .regset_get = s390_vxrs_low_get,
- .set = s390_vxrs_low_set,
- },
- {
- USER_REGSET_NOTE_TYPE(S390_VXRS_HIGH),
- .n = __NUM_VXRS_HIGH,
- .size = sizeof(__vector128),
- .align = sizeof(__vector128),
- .regset_get = s390_vxrs_high_get,
- .set = s390_vxrs_high_set,
- },
- {
- USER_REGSET_NOTE_TYPE(S390_HIGH_GPRS),
- .n = sizeof(s390_compat_regs_high) / sizeof(compat_long_t),
- .size = sizeof(compat_long_t),
- .align = sizeof(compat_long_t),
- .regset_get = s390_compat_regs_high_get,
- .set = s390_compat_regs_high_set,
- },
- {
- USER_REGSET_NOTE_TYPE(S390_GS_CB),
- .n = sizeof(struct gs_cb) / sizeof(__u64),
- .size = sizeof(__u64),
- .align = sizeof(__u64),
- .regset_get = s390_gs_cb_get,
- .set = s390_gs_cb_set,
- },
- {
- USER_REGSET_NOTE_TYPE(S390_GS_BC),
- .n = sizeof(struct gs_cb) / sizeof(__u64),
- .size = sizeof(__u64),
- .align = sizeof(__u64),
- .regset_get = s390_gs_bc_get,
- .set = s390_gs_bc_set,
- },
- {
- USER_REGSET_NOTE_TYPE(S390_RI_CB),
- .n = sizeof(struct runtime_instr_cb) / sizeof(__u64),
- .size = sizeof(__u64),
- .align = sizeof(__u64),
- .regset_get = s390_runtime_instr_get,
- .set = s390_runtime_instr_set,
- },
-};
-
-static const struct user_regset_view user_s390_compat_view = {
- .name = "s390",
- .e_machine = EM_S390,
- .regsets = s390_compat_regsets,
- .n = ARRAY_SIZE(s390_compat_regsets)
-};
-#endif
-
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
{
-#ifdef CONFIG_COMPAT
- if (test_tsk_thread_flag(task, TIF_31BIT))
- return &user_s390_compat_view;
-#endif
return &user_s390_view;
}
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 892fce2b7549..c1fe0b53c5ac 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -13,8 +13,7 @@
* This file handles the architecture-dependent parts of initialization
*/
-#define KMSG_COMPONENT "setup"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#define pr_fmt(fmt) "setup: " fmt
#include <linux/errno.h>
#include <linux/export.h>
@@ -47,7 +46,6 @@
#include <linux/kexec.h>
#include <linux/crash_dump.h>
#include <linux/memory.h>
-#include <linux/compat.h>
#include <linux/start_kernel.h>
#include <linux/hugetlb.h>
#include <linux/kmemleak.h>
@@ -112,7 +110,7 @@ struct exception_table_entry __amode31_ref *__stop_amode31_ex_table = _stop_amod
* Because the AMODE31 sections are relocated below 2G at startup,
* the content of control registers CR2, CR5 and CR15 must be updated
* with new addresses after the relocation. The initial initialization of
- * control registers occurs in head64.S and then gets updated again after AMODE31
+ * control registers occurs in head.S and then gets updated again after AMODE31
* relocation. We must access the relevant AMODE31 tables indirectly via
* pointers placed in the .amode31.refs linker section. Those pointers get
* updated automatically during AMODE31 relocation and always contain a valid
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index e48013cd832c..4874de5edea0 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -27,7 +27,6 @@
#include <linux/personality.h>
#include <linux/binfmts.h>
#include <linux/syscalls.h>
-#include <linux/compat.h>
#include <asm/ucontext.h>
#include <linux/uaccess.h>
#include <asm/vdso-symbols.h>
@@ -290,12 +289,6 @@ static int setup_frame(int sig, struct k_sigaction *ka,
unsigned long restorer;
size_t frame_size;
- /*
- * gprs_high are only present for a 31-bit task running on
- * a 64-bit kernel (see compat_signal.c) but the space for
- * gprs_high need to be allocated if vector registers are
- * included in the signal frame on a 31-bit system.
- */
frame_size = sizeof(*frame) - sizeof(frame->sregs_ext);
if (cpu_has_vx())
frame_size += sizeof(frame->sregs_ext);
@@ -333,7 +326,7 @@ static int setup_frame(int sig, struct k_sigaction *ka,
if (ka->sa.sa_flags & SA_RESTORER)
restorer = (unsigned long) ka->sa.sa_restorer;
else
- restorer = VDSO64_SYMBOL(current, sigreturn);
+ restorer = VDSO_SYMBOL(current, sigreturn);
/* Set up registers for signal handler */
regs->gprs[14] = restorer;
@@ -367,12 +360,6 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
size_t frame_size;
frame_size = sizeof(struct rt_sigframe) - sizeof(_sigregs_ext);
- /*
- * gprs_high are only present for a 31-bit task running on
- * a 64-bit kernel (see compat_signal.c) but the space for
- * gprs_high need to be allocated if vector registers are
- * included in the signal frame on a 31-bit system.
- */
uc_flags = 0;
if (cpu_has_vx()) {
frame_size += sizeof(_sigregs_ext);
@@ -391,7 +378,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
if (ksig->ka.sa.sa_flags & SA_RESTORER)
restorer = (unsigned long) ksig->ka.sa.sa_restorer;
else
- restorer = VDSO64_SYMBOL(current, rt_sigreturn);
+ restorer = VDSO_SYMBOL(current, rt_sigreturn);
/* Create siginfo on the signal stack */
if (copy_siginfo_to_user(&frame->info, &ksig->info))
@@ -490,10 +477,7 @@ void arch_do_signal_or_restart(struct pt_regs *regs)
clear_pt_regs_flag(regs, PIF_SYSCALL);
rseq_signal_deliver(&ksig, regs);
- if (is_compat_task())
- handle_signal32(&ksig, oldset, regs);
- else
- handle_signal(&ksig, oldset, regs);
+ handle_signal(&ksig, oldset, regs);
return;
}
@@ -506,10 +490,7 @@ void arch_do_signal_or_restart(struct pt_regs *regs)
/* Restart with sys_restart_syscall */
regs->gprs[2] = regs->orig_gpr2;
current->restart_block.arch_data = regs->psw.addr;
- if (is_compat_task())
- regs->psw.addr = VDSO32_SYMBOL(current, restart_syscall);
- else
- regs->psw.addr = VDSO64_SYMBOL(current, restart_syscall);
+ regs->psw.addr = VDSO_SYMBOL(current, restart_syscall);
if (test_thread_flag(TIF_SINGLE_STEP))
clear_thread_flag(TIF_PER_TRAP);
break;
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index da84c0dc6b7e..b7429f30afc1 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -15,8 +15,7 @@
* operates on physical cpu numbers needs to go into smp.c.
*/
-#define KMSG_COMPONENT "cpu"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#define pr_fmt(fmt) "cpu: " fmt
#include <linux/cpufeature.h>
#include <linux/workqueue.h>
@@ -281,6 +280,9 @@ static void pcpu_attach_task(int cpu, struct task_struct *tsk)
lc->hardirq_timer = tsk->thread.hardirq_timer;
lc->softirq_timer = tsk->thread.softirq_timer;
lc->steal_timer = 0;
+#ifdef CONFIG_STACKPROTECTOR
+ lc->stack_canary = tsk->stack_canary;
+#endif
}
static void pcpu_start_fn(int cpu, void (*func)(void *), void *data)
@@ -305,9 +307,9 @@ static void __pcpu_delegate(pcpu_delegate_fn *func, void *data)
func(data); /* should not return */
}
-static void pcpu_delegate(struct pcpu *pcpu, int cpu,
- pcpu_delegate_fn *func,
- void *data, unsigned long stack)
+static void __noreturn pcpu_delegate(struct pcpu *pcpu, int cpu,
+ pcpu_delegate_fn *func,
+ void *data, unsigned long stack)
{
struct lowcore *lc, *abs_lc;
unsigned int source_cpu;
@@ -370,7 +372,7 @@ static int pcpu_set_smt(unsigned int mtid)
/*
* Call function on the ipl CPU.
*/
-void smp_call_ipl_cpu(void (*func)(void *), void *data)
+void __noreturn smp_call_ipl_cpu(void (*func)(void *), void *data)
{
struct lowcore *lc = lowcore_ptr[0];
@@ -697,6 +699,7 @@ static void __ref smp_get_core_info(struct sclp_core_info *info, int early)
continue;
info->core[info->configured].core_id =
address >> smp_cpu_mt_shift;
+ info->core[info->configured].type = boot_core_type;
info->configured++;
}
info->combined = info->configured;
diff --git a/arch/s390/kernel/stackprotector.c b/arch/s390/kernel/stackprotector.c
new file mode 100644
index 000000000000..d4e40483f008
--- /dev/null
+++ b/arch/s390/kernel/stackprotector.c
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#ifndef pr_fmt
+#define pr_fmt(fmt) "stackprot: " fmt
+#endif
+
+#include <linux/export.h>
+#include <linux/uaccess.h>
+#include <linux/printk.h>
+#include <asm/abs_lowcore.h>
+#include <asm/sections.h>
+#include <asm/machine.h>
+#include <asm/asm-offsets.h>
+#include <asm/arch-stackprotector.h>
+
+#ifdef __DECOMPRESSOR
+
+#define DEBUGP boot_debug
+#define EMERGP boot_emerg
+#define PANIC boot_panic
+
+#else /* __DECOMPRESSOR */
+
+#define DEBUGP pr_debug
+#define EMERGP pr_emerg
+#define PANIC panic
+
+#endif /* __DECOMPRESSOR */
+
+int __bootdata_preserved(stack_protector_debug);
+
+unsigned long __stack_chk_guard;
+EXPORT_SYMBOL(__stack_chk_guard);
+
+struct insn_ril {
+ u8 opc1 : 8;
+ u8 r1 : 4;
+ u8 opc2 : 4;
+ u32 imm;
+} __packed;
+
+/*
+ * Convert a virtual instruction address to a real instruction address. The
+ * decompressor needs to patch instructions within the kernel image based on
+ * their virtual addresses, while dynamic address translation is still
+ * disabled. Therefore a translation from virtual kernel image addresses to
+ * the corresponding physical addresses is required.
+ *
+ * After dynamic address translation is enabled and when the kernel needs to
+ * patch instructions such a translation is not required since the addresses
+ * are identical.
+ */
+static struct insn_ril *vaddress_to_insn(unsigned long vaddress)
+{
+#ifdef __DECOMPRESSOR
+ return (struct insn_ril *)__kernel_pa(vaddress);
+#else
+ return (struct insn_ril *)vaddress;
+#endif
+}
+
+static unsigned long insn_to_vaddress(struct insn_ril *insn)
+{
+#ifdef __DECOMPRESSOR
+ return (unsigned long)__kernel_va(insn);
+#else
+ return (unsigned long)insn;
+#endif
+}
+
+#define INSN_RIL_STRING_SIZE (sizeof(struct insn_ril) * 2 + 1)
+
+static void insn_ril_to_string(char *str, struct insn_ril *insn)
+{
+ u8 *ptr = (u8 *)insn;
+ int i;
+
+ for (i = 0; i < sizeof(*insn); i++)
+ hex_byte_pack(&str[2 * i], ptr[i]);
+ str[2 * i] = 0;
+}
+
+static void stack_protector_dump(struct insn_ril *old, struct insn_ril *new)
+{
+ char ostr[INSN_RIL_STRING_SIZE];
+ char nstr[INSN_RIL_STRING_SIZE];
+
+ insn_ril_to_string(ostr, old);
+ insn_ril_to_string(nstr, new);
+ DEBUGP("%016lx: %s -> %s\n", insn_to_vaddress(old), ostr, nstr);
+}
+
+static int stack_protector_verify(struct insn_ril *insn, unsigned long kernel_start)
+{
+ char istr[INSN_RIL_STRING_SIZE];
+ unsigned long vaddress, offset;
+
+ /* larl */
+ if (insn->opc1 == 0xc0 && insn->opc2 == 0x0)
+ return 0;
+ /* lgrl */
+ if (insn->opc1 == 0xc4 && insn->opc2 == 0x8)
+ return 0;
+ insn_ril_to_string(istr, insn);
+ vaddress = insn_to_vaddress(insn);
+ if (__is_defined(__DECOMPRESSOR)) {
+ offset = (unsigned long)insn - kernel_start + TEXT_OFFSET;
+ EMERGP("Unexpected instruction at %016lx/%016lx: %s\n", vaddress, offset, istr);
+ PANIC("Stackprotector error\n");
+ } else {
+ EMERGP("Unexpected instruction at %016lx: %s\n", vaddress, istr);
+ }
+ return -EINVAL;
+}
+
+int __stack_protector_apply(unsigned long *start, unsigned long *end, unsigned long kernel_start)
+{
+ unsigned long canary, *loc;
+ struct insn_ril *insn, new;
+ int rc;
+
+ /*
+ * Convert LARL/LGRL instructions to LLILF so register R1 contains the
+ * address of the per-cpu / per-process stack canary:
+ *
+ * LARL/LGRL R1,__stack_chk_guard => LLILF R1,__lc_stack_canary
+ */
+ canary = __LC_STACK_CANARY;
+ if (machine_has_relocated_lowcore())
+ canary += LOWCORE_ALT_ADDRESS;
+ for (loc = start; loc < end; loc++) {
+ insn = vaddress_to_insn(*loc);
+ rc = stack_protector_verify(insn, kernel_start);
+ if (rc)
+ return rc;
+ new = *insn;
+ new.opc1 = 0xc0;
+ new.opc2 = 0xf;
+ new.imm = canary;
+ if (stack_protector_debug)
+ stack_protector_dump(insn, &new);
+ s390_kernel_write(insn, &new, sizeof(*insn));
+ }
+ return 0;
+}
+
+#ifdef __DECOMPRESSOR
+void __stack_protector_apply_early(unsigned long kernel_start)
+{
+ unsigned long *start, *end;
+
+ start = (unsigned long *)vmlinux.stack_prot_start;
+ end = (unsigned long *)vmlinux.stack_prot_end;
+ __stack_protector_apply(start, end, kernel_start);
+}
+#endif
diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c
index b153a395f46d..3aae7f70e6ab 100644
--- a/arch/s390/kernel/stacktrace.c
+++ b/arch/s390/kernel/stacktrace.c
@@ -8,7 +8,6 @@
#include <linux/perf_event.h>
#include <linux/stacktrace.h>
#include <linux/uaccess.h>
-#include <linux/compat.h>
#include <asm/asm-offsets.h>
#include <asm/stacktrace.h>
#include <asm/unwind.h>
@@ -107,8 +106,6 @@ void arch_stack_walk_user_common(stack_trace_consume_fn consume_entry, void *coo
unsigned long ip, sp;
bool first = true;
- if (is_compat_task())
- return;
if (!current->mm)
return;
ip = instruction_pointer(regs);
diff --git a/arch/s390/kernel/sthyi.c b/arch/s390/kernel/sthyi.c
index f4ccdbed4b89..5eae2e25997a 100644
--- a/arch/s390/kernel/sthyi.c
+++ b/arch/s390/kernel/sthyi.c
@@ -253,7 +253,7 @@ static void fill_diag_mac(struct sthyi_sctns *sctns,
sctns->mac.infmval1 |= MAC_CNT_VLD;
}
-/* Returns a pointer to the the next partition block. */
+/* Returns a pointer to the next partition block. */
static struct diag204_x_part_block *lpar_cpu_inf(struct lpar_cpu_inf *part_inf,
bool this_lpar,
void *diag224_buf,
diff --git a/arch/s390/kernel/syscall.c b/arch/s390/kernel/syscall.c
index 4fee74553ca2..795b6cca74c9 100644
--- a/arch/s390/kernel/syscall.c
+++ b/arch/s390/kernel/syscall.c
@@ -39,6 +39,16 @@
#include "entry.h"
+#define __SYSCALL(nr, sym) long __s390x_##sym(struct pt_regs *);
+#include <asm/syscall_table.h>
+#undef __SYSCALL
+
+#define __SYSCALL(nr, sym) [nr] = (__s390x_##sym),
+const sys_call_ptr_t sys_call_table[__NR_syscalls] = {
+#include <asm/syscall_table.h>
+};
+#undef __SYSCALL
+
#ifdef CONFIG_SYSVIPC
/*
* sys_ipc() is the de-multiplexer for the SysV IPC calls.
@@ -122,7 +132,7 @@ void noinstr __do_syscall(struct pt_regs *regs, int per_trap)
goto out;
regs->gprs[2] = -ENOSYS;
if (likely(nr < NR_syscalls))
- regs->gprs[2] = current->thread.sys_call_table[nr](regs);
+ regs->gprs[2] = sys_call_table[nr](regs);
out:
syscall_exit_to_user_mode(regs);
}
diff --git a/arch/s390/kernel/syscalls/Makefile b/arch/s390/kernel/syscalls/Makefile
index c5d958a09ff4..d5fca0ca0890 100644
--- a/arch/s390/kernel/syscalls/Makefile
+++ b/arch/s390/kernel/syscalls/Makefile
@@ -1,48 +1,32 @@
# SPDX-License-Identifier: GPL-2.0
+kapi := arch/$(SRCARCH)/include/generated/asm
+uapi := arch/$(SRCARCH)/include/generated/uapi/asm
-gen := arch/$(ARCH)/include/generated
-kapi := $(gen)/asm
-uapi := $(gen)/uapi/asm
-
-syscall := $(src)/syscall.tbl
-systbl := $(src)/syscalltbl
-
-gen-y := $(kapi)/syscall_table.h
-kapi-hdrs-y := $(kapi)/unistd_nr.h
-uapi-hdrs-y := $(uapi)/unistd_32.h
-uapi-hdrs-y += $(uapi)/unistd_64.h
-
-targets += $(addprefix ../../../../,$(gen-y) $(kapi-hdrs-y) $(uapi-hdrs-y))
-
-PHONY += kapi uapi
-
-kapi: $(gen-y) $(kapi-hdrs-y)
-uapi: $(uapi-hdrs-y)
-
-
-# Create output directory if not already present
$(shell mkdir -p $(uapi) $(kapi))
-quiet_cmd_syshdr = SYSHDR $@
- cmd_syshdr = $(CONFIG_SHELL) '$(systbl)' -H -a $(syshdr_abi_$(basetarget)) -f "$@" < $< > $@
-
-quiet_cmd_sysnr = SYSNR $@
- cmd_sysnr = $(CONFIG_SHELL) '$(systbl)' -N -a $(sysnr_abi_$(basetarget)) < $< > $@
+syscall := $(src)/syscall.tbl
+syshdr := $(srctree)/scripts/syscallhdr.sh
+systbl := $(srctree)/scripts/syscalltbl.sh
-quiet_cmd_syscalls = SYSTBL $@
- cmd_syscalls = $(CONFIG_SHELL) '$(systbl)' -S < $< > $@
+quiet_cmd_syshdr = SYSHDR $@
+ cmd_syshdr = $(CONFIG_SHELL) $(syshdr) --emit-nr --abis common,$* $< $@
-syshdr_abi_unistd_32 := common,32
-$(uapi)/unistd_32.h: $(syscall) $(systbl) FORCE
- $(call if_changed,syshdr)
+quiet_cmd_systbl = SYSTBL $@
+ cmd_systbl = $(CONFIG_SHELL) $(systbl) --abis common,$* $< $@
-syshdr_abi_unistd_64 := common,64
-$(uapi)/unistd_64.h: $(syscall) $(systbl) FORCE
+$(uapi)/unistd_%.h: $(syscall) $(syshdr) FORCE
$(call if_changed,syshdr)
$(kapi)/syscall_table.h: $(syscall) $(systbl) FORCE
- $(call if_changed,syscalls)
+ $(call if_changed,systbl)
+
+uapisyshdr-y += unistd_64.h
+kapisyshdr-y += syscall_table.h
+
+uapisyshdr-y := $(addprefix $(uapi)/, $(uapisyshdr-y))
+kapisyshdr-y := $(addprefix $(kapi)/, $(kapisyshdr-y))
+targets += $(addprefix ../../../../, $(uapisyshdr-y) $(kapisyshdr-y))
-sysnr_abi_unistd_nr := common,32,64
-$(kapi)/unistd_nr.h: $(syscall) $(systbl) FORCE
- $(call if_changed,sysnr)
+PHONY += all
+all: $(uapisyshdr-y) $(kapisyshdr-y)
+ @:
diff --git a/arch/s390/kernel/syscalls/syscall.tbl b/arch/s390/kernel/syscalls/syscall.tbl
index 8a6744d658db..417ed16b3c63 100644
--- a/arch/s390/kernel/syscalls/syscall.tbl
+++ b/arch/s390/kernel/syscalls/syscall.tbl
@@ -3,472 +3,397 @@
# System call table for s390
#
# Format:
+# <nr> <abi> <syscall> <entry>
#
-# <nr> <abi> <syscall> <entry-64bit> <compat-entry>
-#
-# where <abi> can be common, 64, or 32
+# <abi> is always common.
-1 common exit sys_exit sys_exit
-2 common fork sys_fork sys_fork
-3 common read sys_read compat_sys_s390_read
-4 common write sys_write compat_sys_s390_write
-5 common open sys_open compat_sys_open
-6 common close sys_close sys_close
-7 common restart_syscall sys_restart_syscall sys_restart_syscall
-8 common creat sys_creat sys_creat
-9 common link sys_link sys_link
-10 common unlink sys_unlink sys_unlink
-11 common execve sys_execve compat_sys_execve
-12 common chdir sys_chdir sys_chdir
-13 32 time - sys_time32
-14 common mknod sys_mknod sys_mknod
-15 common chmod sys_chmod sys_chmod
-16 32 lchown - sys_lchown16
-19 common lseek sys_lseek compat_sys_lseek
-20 common getpid sys_getpid sys_getpid
-21 common mount sys_mount sys_mount
-22 common umount sys_oldumount sys_oldumount
-23 32 setuid - sys_setuid16
-24 32 getuid - sys_getuid16
-25 32 stime - sys_stime32
-26 common ptrace sys_ptrace compat_sys_ptrace
-27 common alarm sys_alarm sys_alarm
-29 common pause sys_pause sys_pause
-30 common utime sys_utime sys_utime32
-33 common access sys_access sys_access
-34 common nice sys_nice sys_nice
-36 common sync sys_sync sys_sync
-37 common kill sys_kill sys_kill
-38 common rename sys_rename sys_rename
-39 common mkdir sys_mkdir sys_mkdir
-40 common rmdir sys_rmdir sys_rmdir
-41 common dup sys_dup sys_dup
-42 common pipe sys_pipe sys_pipe
-43 common times sys_times compat_sys_times
-45 common brk sys_brk sys_brk
-46 32 setgid - sys_setgid16
-47 32 getgid - sys_getgid16
-48 common signal sys_signal sys_signal
-49 32 geteuid - sys_geteuid16
-50 32 getegid - sys_getegid16
-51 common acct sys_acct sys_acct
-52 common umount2 sys_umount sys_umount
-54 common ioctl sys_ioctl compat_sys_ioctl
-55 common fcntl sys_fcntl compat_sys_fcntl
-57 common setpgid sys_setpgid sys_setpgid
-60 common umask sys_umask sys_umask
-61 common chroot sys_chroot sys_chroot
-62 common ustat sys_ustat compat_sys_ustat
-63 common dup2 sys_dup2 sys_dup2
-64 common getppid sys_getppid sys_getppid
-65 common getpgrp sys_getpgrp sys_getpgrp
-66 common setsid sys_setsid sys_setsid
-67 common sigaction sys_sigaction compat_sys_sigaction
-70 32 setreuid - sys_setreuid16
-71 32 setregid - sys_setregid16
-72 common sigsuspend sys_sigsuspend sys_sigsuspend
-73 common sigpending sys_sigpending compat_sys_sigpending
-74 common sethostname sys_sethostname sys_sethostname
-75 common setrlimit sys_setrlimit compat_sys_setrlimit
-76 32 getrlimit - compat_sys_old_getrlimit
-77 common getrusage sys_getrusage compat_sys_getrusage
-78 common gettimeofday sys_gettimeofday compat_sys_gettimeofday
-79 common settimeofday sys_settimeofday compat_sys_settimeofday
-80 32 getgroups - sys_getgroups16
-81 32 setgroups - sys_setgroups16
-83 common symlink sys_symlink sys_symlink
-85 common readlink sys_readlink sys_readlink
-86 common uselib sys_uselib sys_uselib
-87 common swapon sys_swapon sys_swapon
-88 common reboot sys_reboot sys_reboot
-89 common readdir - compat_sys_old_readdir
-90 common mmap sys_old_mmap compat_sys_s390_old_mmap
-91 common munmap sys_munmap sys_munmap
-92 common truncate sys_truncate compat_sys_truncate
-93 common ftruncate sys_ftruncate compat_sys_ftruncate
-94 common fchmod sys_fchmod sys_fchmod
-95 32 fchown - sys_fchown16
-96 common getpriority sys_getpriority sys_getpriority
-97 common setpriority sys_setpriority sys_setpriority
-99 common statfs sys_statfs compat_sys_statfs
-100 common fstatfs sys_fstatfs compat_sys_fstatfs
-101 32 ioperm - -
-102 common socketcall sys_socketcall compat_sys_socketcall
-103 common syslog sys_syslog sys_syslog
-104 common setitimer sys_setitimer compat_sys_setitimer
-105 common getitimer sys_getitimer compat_sys_getitimer
-106 common stat sys_newstat compat_sys_newstat
-107 common lstat sys_newlstat compat_sys_newlstat
-108 common fstat sys_newfstat compat_sys_newfstat
-110 common lookup_dcookie - -
-111 common vhangup sys_vhangup sys_vhangup
-112 common idle - -
-114 common wait4 sys_wait4 compat_sys_wait4
-115 common swapoff sys_swapoff sys_swapoff
-116 common sysinfo sys_sysinfo compat_sys_sysinfo
-117 common ipc sys_s390_ipc compat_sys_s390_ipc
-118 common fsync sys_fsync sys_fsync
-119 common sigreturn sys_sigreturn compat_sys_sigreturn
-120 common clone sys_clone sys_clone
-121 common setdomainname sys_setdomainname sys_setdomainname
-122 common uname sys_newuname sys_newuname
-124 common adjtimex sys_adjtimex sys_adjtimex_time32
-125 common mprotect sys_mprotect sys_mprotect
-126 common sigprocmask sys_sigprocmask compat_sys_sigprocmask
-127 common create_module - -
-128 common init_module sys_init_module sys_init_module
-129 common delete_module sys_delete_module sys_delete_module
-130 common get_kernel_syms - -
-131 common quotactl sys_quotactl sys_quotactl
-132 common getpgid sys_getpgid sys_getpgid
-133 common fchdir sys_fchdir sys_fchdir
-134 common bdflush sys_ni_syscall sys_ni_syscall
-135 common sysfs sys_sysfs sys_sysfs
-136 common personality sys_s390_personality sys_s390_personality
-137 common afs_syscall - -
-138 32 setfsuid - sys_setfsuid16
-139 32 setfsgid - sys_setfsgid16
-140 32 _llseek - sys_llseek
-141 common getdents sys_getdents compat_sys_getdents
-142 32 _newselect - compat_sys_select
-142 64 select sys_select -
-143 common flock sys_flock sys_flock
-144 common msync sys_msync sys_msync
-145 common readv sys_readv sys_readv
-146 common writev sys_writev sys_writev
-147 common getsid sys_getsid sys_getsid
-148 common fdatasync sys_fdatasync sys_fdatasync
-149 common _sysctl - -
-150 common mlock sys_mlock sys_mlock
-151 common munlock sys_munlock sys_munlock
-152 common mlockall sys_mlockall sys_mlockall
-153 common munlockall sys_munlockall sys_munlockall
-154 common sched_setparam sys_sched_setparam sys_sched_setparam
-155 common sched_getparam sys_sched_getparam sys_sched_getparam
-156 common sched_setscheduler sys_sched_setscheduler sys_sched_setscheduler
-157 common sched_getscheduler sys_sched_getscheduler sys_sched_getscheduler
-158 common sched_yield sys_sched_yield sys_sched_yield
-159 common sched_get_priority_max sys_sched_get_priority_max sys_sched_get_priority_max
-160 common sched_get_priority_min sys_sched_get_priority_min sys_sched_get_priority_min
-161 common sched_rr_get_interval sys_sched_rr_get_interval sys_sched_rr_get_interval_time32
-162 common nanosleep sys_nanosleep sys_nanosleep_time32
-163 common mremap sys_mremap sys_mremap
-164 32 setresuid - sys_setresuid16
-165 32 getresuid - sys_getresuid16
-167 common query_module - -
-168 common poll sys_poll sys_poll
-169 common nfsservctl - -
-170 32 setresgid - sys_setresgid16
-171 32 getresgid - sys_getresgid16
-172 common prctl sys_prctl sys_prctl
-173 common rt_sigreturn sys_rt_sigreturn compat_sys_rt_sigreturn
-174 common rt_sigaction sys_rt_sigaction compat_sys_rt_sigaction
-175 common rt_sigprocmask sys_rt_sigprocmask compat_sys_rt_sigprocmask
-176 common rt_sigpending sys_rt_sigpending compat_sys_rt_sigpending
-177 common rt_sigtimedwait sys_rt_sigtimedwait compat_sys_rt_sigtimedwait_time32
-178 common rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo
-179 common rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend
-180 common pread64 sys_pread64 compat_sys_s390_pread64
-181 common pwrite64 sys_pwrite64 compat_sys_s390_pwrite64
-182 32 chown - sys_chown16
-183 common getcwd sys_getcwd sys_getcwd
-184 common capget sys_capget sys_capget
-185 common capset sys_capset sys_capset
-186 common sigaltstack sys_sigaltstack compat_sys_sigaltstack
-187 common sendfile sys_sendfile64 compat_sys_sendfile
-188 common getpmsg - -
-189 common putpmsg - -
-190 common vfork sys_vfork sys_vfork
-191 32 ugetrlimit - compat_sys_getrlimit
-191 64 getrlimit sys_getrlimit -
-192 32 mmap2 - compat_sys_s390_mmap2
-193 32 truncate64 - compat_sys_s390_truncate64
-194 32 ftruncate64 - compat_sys_s390_ftruncate64
-195 32 stat64 - compat_sys_s390_stat64
-196 32 lstat64 - compat_sys_s390_lstat64
-197 32 fstat64 - compat_sys_s390_fstat64
-198 32 lchown32 - sys_lchown
-198 64 lchown sys_lchown -
-199 32 getuid32 - sys_getuid
-199 64 getuid sys_getuid -
-200 32 getgid32 - sys_getgid
-200 64 getgid sys_getgid -
-201 32 geteuid32 - sys_geteuid
-201 64 geteuid sys_geteuid -
-202 32 getegid32 - sys_getegid
-202 64 getegid sys_getegid -
-203 32 setreuid32 - sys_setreuid
-203 64 setreuid sys_setreuid -
-204 32 setregid32 - sys_setregid
-204 64 setregid sys_setregid -
-205 32 getgroups32 - sys_getgroups
-205 64 getgroups sys_getgroups -
-206 32 setgroups32 - sys_setgroups
-206 64 setgroups sys_setgroups -
-207 32 fchown32 - sys_fchown
-207 64 fchown sys_fchown -
-208 32 setresuid32 - sys_setresuid
-208 64 setresuid sys_setresuid -
-209 32 getresuid32 - sys_getresuid
-209 64 getresuid sys_getresuid -
-210 32 setresgid32 - sys_setresgid
-210 64 setresgid sys_setresgid -
-211 32 getresgid32 - sys_getresgid
-211 64 getresgid sys_getresgid -
-212 32 chown32 - sys_chown
-212 64 chown sys_chown -
-213 32 setuid32 - sys_setuid
-213 64 setuid sys_setuid -
-214 32 setgid32 - sys_setgid
-214 64 setgid sys_setgid -
-215 32 setfsuid32 - sys_setfsuid
-215 64 setfsuid sys_setfsuid -
-216 32 setfsgid32 - sys_setfsgid
-216 64 setfsgid sys_setfsgid -
-217 common pivot_root sys_pivot_root sys_pivot_root
-218 common mincore sys_mincore sys_mincore
-219 common madvise sys_madvise sys_madvise
-220 common getdents64 sys_getdents64 sys_getdents64
-221 32 fcntl64 - compat_sys_fcntl64
-222 common readahead sys_readahead compat_sys_s390_readahead
-223 32 sendfile64 - compat_sys_sendfile64
-224 common setxattr sys_setxattr sys_setxattr
-225 common lsetxattr sys_lsetxattr sys_lsetxattr
-226 common fsetxattr sys_fsetxattr sys_fsetxattr
-227 common getxattr sys_getxattr sys_getxattr
-228 common lgetxattr sys_lgetxattr sys_lgetxattr
-229 common fgetxattr sys_fgetxattr sys_fgetxattr
-230 common listxattr sys_listxattr sys_listxattr
-231 common llistxattr sys_llistxattr sys_llistxattr
-232 common flistxattr sys_flistxattr sys_flistxattr
-233 common removexattr sys_removexattr sys_removexattr
-234 common lremovexattr sys_lremovexattr sys_lremovexattr
-235 common fremovexattr sys_fremovexattr sys_fremovexattr
-236 common gettid sys_gettid sys_gettid
-237 common tkill sys_tkill sys_tkill
-238 common futex sys_futex sys_futex_time32
-239 common sched_setaffinity sys_sched_setaffinity compat_sys_sched_setaffinity
-240 common sched_getaffinity sys_sched_getaffinity compat_sys_sched_getaffinity
-241 common tgkill sys_tgkill sys_tgkill
-243 common io_setup sys_io_setup compat_sys_io_setup
-244 common io_destroy sys_io_destroy sys_io_destroy
-245 common io_getevents sys_io_getevents sys_io_getevents_time32
-246 common io_submit sys_io_submit compat_sys_io_submit
-247 common io_cancel sys_io_cancel sys_io_cancel
-248 common exit_group sys_exit_group sys_exit_group
-249 common epoll_create sys_epoll_create sys_epoll_create
-250 common epoll_ctl sys_epoll_ctl sys_epoll_ctl
-251 common epoll_wait sys_epoll_wait sys_epoll_wait
-252 common set_tid_address sys_set_tid_address sys_set_tid_address
-253 common fadvise64 sys_fadvise64_64 compat_sys_s390_fadvise64
-254 common timer_create sys_timer_create compat_sys_timer_create
-255 common timer_settime sys_timer_settime sys_timer_settime32
-256 common timer_gettime sys_timer_gettime sys_timer_gettime32
-257 common timer_getoverrun sys_timer_getoverrun sys_timer_getoverrun
-258 common timer_delete sys_timer_delete sys_timer_delete
-259 common clock_settime sys_clock_settime sys_clock_settime32
-260 common clock_gettime sys_clock_gettime sys_clock_gettime32
-261 common clock_getres sys_clock_getres sys_clock_getres_time32
-262 common clock_nanosleep sys_clock_nanosleep sys_clock_nanosleep_time32
-264 32 fadvise64_64 - compat_sys_s390_fadvise64_64
-265 common statfs64 sys_statfs64 compat_sys_statfs64
-266 common fstatfs64 sys_fstatfs64 compat_sys_fstatfs64
-267 common remap_file_pages sys_remap_file_pages sys_remap_file_pages
-268 common mbind sys_mbind sys_mbind
-269 common get_mempolicy sys_get_mempolicy sys_get_mempolicy
-270 common set_mempolicy sys_set_mempolicy sys_set_mempolicy
-271 common mq_open sys_mq_open compat_sys_mq_open
-272 common mq_unlink sys_mq_unlink sys_mq_unlink
-273 common mq_timedsend sys_mq_timedsend sys_mq_timedsend_time32
-274 common mq_timedreceive sys_mq_timedreceive sys_mq_timedreceive_time32
-275 common mq_notify sys_mq_notify compat_sys_mq_notify
-276 common mq_getsetattr sys_mq_getsetattr compat_sys_mq_getsetattr
-277 common kexec_load sys_kexec_load compat_sys_kexec_load
-278 common add_key sys_add_key sys_add_key
-279 common request_key sys_request_key sys_request_key
-280 common keyctl sys_keyctl compat_sys_keyctl
-281 common waitid sys_waitid compat_sys_waitid
-282 common ioprio_set sys_ioprio_set sys_ioprio_set
-283 common ioprio_get sys_ioprio_get sys_ioprio_get
-284 common inotify_init sys_inotify_init sys_inotify_init
-285 common inotify_add_watch sys_inotify_add_watch sys_inotify_add_watch
-286 common inotify_rm_watch sys_inotify_rm_watch sys_inotify_rm_watch
-287 common migrate_pages sys_migrate_pages sys_migrate_pages
-288 common openat sys_openat compat_sys_openat
-289 common mkdirat sys_mkdirat sys_mkdirat
-290 common mknodat sys_mknodat sys_mknodat
-291 common fchownat sys_fchownat sys_fchownat
-292 common futimesat sys_futimesat sys_futimesat_time32
-293 32 fstatat64 - compat_sys_s390_fstatat64
-293 64 newfstatat sys_newfstatat -
-294 common unlinkat sys_unlinkat sys_unlinkat
-295 common renameat sys_renameat sys_renameat
-296 common linkat sys_linkat sys_linkat
-297 common symlinkat sys_symlinkat sys_symlinkat
-298 common readlinkat sys_readlinkat sys_readlinkat
-299 common fchmodat sys_fchmodat sys_fchmodat
-300 common faccessat sys_faccessat sys_faccessat
-301 common pselect6 sys_pselect6 compat_sys_pselect6_time32
-302 common ppoll sys_ppoll compat_sys_ppoll_time32
-303 common unshare sys_unshare sys_unshare
-304 common set_robust_list sys_set_robust_list compat_sys_set_robust_list
-305 common get_robust_list sys_get_robust_list compat_sys_get_robust_list
-306 common splice sys_splice sys_splice
-307 common sync_file_range sys_sync_file_range compat_sys_s390_sync_file_range
-308 common tee sys_tee sys_tee
-309 common vmsplice sys_vmsplice sys_vmsplice
-310 common move_pages sys_move_pages sys_move_pages
-311 common getcpu sys_getcpu sys_getcpu
-312 common epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait
-313 common utimes sys_utimes sys_utimes_time32
-314 common fallocate sys_fallocate compat_sys_s390_fallocate
-315 common utimensat sys_utimensat sys_utimensat_time32
-316 common signalfd sys_signalfd compat_sys_signalfd
-317 common timerfd - -
-318 common eventfd sys_eventfd sys_eventfd
-319 common timerfd_create sys_timerfd_create sys_timerfd_create
-320 common timerfd_settime sys_timerfd_settime sys_timerfd_settime32
-321 common timerfd_gettime sys_timerfd_gettime sys_timerfd_gettime32
-322 common signalfd4 sys_signalfd4 compat_sys_signalfd4
-323 common eventfd2 sys_eventfd2 sys_eventfd2
-324 common inotify_init1 sys_inotify_init1 sys_inotify_init1
-325 common pipe2 sys_pipe2 sys_pipe2
-326 common dup3 sys_dup3 sys_dup3
-327 common epoll_create1 sys_epoll_create1 sys_epoll_create1
-328 common preadv sys_preadv compat_sys_preadv
-329 common pwritev sys_pwritev compat_sys_pwritev
-330 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo
-331 common perf_event_open sys_perf_event_open sys_perf_event_open
-332 common fanotify_init sys_fanotify_init sys_fanotify_init
-333 common fanotify_mark sys_fanotify_mark compat_sys_fanotify_mark
-334 common prlimit64 sys_prlimit64 sys_prlimit64
-335 common name_to_handle_at sys_name_to_handle_at sys_name_to_handle_at
-336 common open_by_handle_at sys_open_by_handle_at compat_sys_open_by_handle_at
-337 common clock_adjtime sys_clock_adjtime sys_clock_adjtime32
-338 common syncfs sys_syncfs sys_syncfs
-339 common setns sys_setns sys_setns
-340 common process_vm_readv sys_process_vm_readv sys_process_vm_readv
-341 common process_vm_writev sys_process_vm_writev sys_process_vm_writev
-342 common s390_runtime_instr sys_s390_runtime_instr sys_s390_runtime_instr
-343 common kcmp sys_kcmp sys_kcmp
-344 common finit_module sys_finit_module sys_finit_module
-345 common sched_setattr sys_sched_setattr sys_sched_setattr
-346 common sched_getattr sys_sched_getattr sys_sched_getattr
-347 common renameat2 sys_renameat2 sys_renameat2
-348 common seccomp sys_seccomp sys_seccomp
-349 common getrandom sys_getrandom sys_getrandom
-350 common memfd_create sys_memfd_create sys_memfd_create
-351 common bpf sys_bpf sys_bpf
-352 common s390_pci_mmio_write sys_s390_pci_mmio_write sys_s390_pci_mmio_write
-353 common s390_pci_mmio_read sys_s390_pci_mmio_read sys_s390_pci_mmio_read
-354 common execveat sys_execveat compat_sys_execveat
-355 common userfaultfd sys_userfaultfd sys_userfaultfd
-356 common membarrier sys_membarrier sys_membarrier
-357 common recvmmsg sys_recvmmsg compat_sys_recvmmsg_time32
-358 common sendmmsg sys_sendmmsg compat_sys_sendmmsg
-359 common socket sys_socket sys_socket
-360 common socketpair sys_socketpair sys_socketpair
-361 common bind sys_bind sys_bind
-362 common connect sys_connect sys_connect
-363 common listen sys_listen sys_listen
-364 common accept4 sys_accept4 sys_accept4
-365 common getsockopt sys_getsockopt sys_getsockopt
-366 common setsockopt sys_setsockopt sys_setsockopt
-367 common getsockname sys_getsockname sys_getsockname
-368 common getpeername sys_getpeername sys_getpeername
-369 common sendto sys_sendto sys_sendto
-370 common sendmsg sys_sendmsg compat_sys_sendmsg
-371 common recvfrom sys_recvfrom compat_sys_recvfrom
-372 common recvmsg sys_recvmsg compat_sys_recvmsg
-373 common shutdown sys_shutdown sys_shutdown
-374 common mlock2 sys_mlock2 sys_mlock2
-375 common copy_file_range sys_copy_file_range sys_copy_file_range
-376 common preadv2 sys_preadv2 compat_sys_preadv2
-377 common pwritev2 sys_pwritev2 compat_sys_pwritev2
-378 common s390_guarded_storage sys_s390_guarded_storage sys_s390_guarded_storage
-379 common statx sys_statx sys_statx
-380 common s390_sthyi sys_s390_sthyi sys_s390_sthyi
-381 common kexec_file_load sys_kexec_file_load sys_kexec_file_load
-382 common io_pgetevents sys_io_pgetevents compat_sys_io_pgetevents
-383 common rseq sys_rseq sys_rseq
-384 common pkey_mprotect sys_pkey_mprotect sys_pkey_mprotect
-385 common pkey_alloc sys_pkey_alloc sys_pkey_alloc
-386 common pkey_free sys_pkey_free sys_pkey_free
+1 common exit sys_exit
+2 common fork sys_fork
+3 common read sys_read
+4 common write sys_write
+5 common open sys_open
+6 common close sys_close
+7 common restart_syscall sys_restart_syscall
+8 common creat sys_creat
+9 common link sys_link
+10 common unlink sys_unlink
+11 common execve sys_execve
+12 common chdir sys_chdir
+14 common mknod sys_mknod
+15 common chmod sys_chmod
+19 common lseek sys_lseek
+20 common getpid sys_getpid
+21 common mount sys_mount
+22 common umount sys_oldumount
+26 common ptrace sys_ptrace
+27 common alarm sys_alarm
+29 common pause sys_pause
+30 common utime sys_utime
+33 common access sys_access
+34 common nice sys_nice
+36 common sync sys_sync
+37 common kill sys_kill
+38 common rename sys_rename
+39 common mkdir sys_mkdir
+40 common rmdir sys_rmdir
+41 common dup sys_dup
+42 common pipe sys_pipe
+43 common times sys_times
+45 common brk sys_brk
+48 common signal sys_signal
+51 common acct sys_acct
+52 common umount2 sys_umount
+54 common ioctl sys_ioctl
+55 common fcntl sys_fcntl
+57 common setpgid sys_setpgid
+60 common umask sys_umask
+61 common chroot sys_chroot
+62 common ustat sys_ustat
+63 common dup2 sys_dup2
+64 common getppid sys_getppid
+65 common getpgrp sys_getpgrp
+66 common setsid sys_setsid
+67 common sigaction sys_sigaction
+72 common sigsuspend sys_sigsuspend
+73 common sigpending sys_sigpending
+74 common sethostname sys_sethostname
+75 common setrlimit sys_setrlimit
+77 common getrusage sys_getrusage
+78 common gettimeofday sys_gettimeofday
+79 common settimeofday sys_settimeofday
+83 common symlink sys_symlink
+85 common readlink sys_readlink
+86 common uselib sys_uselib
+87 common swapon sys_swapon
+88 common reboot sys_reboot
+89 common readdir sys_ni_syscall
+90 common mmap sys_old_mmap
+91 common munmap sys_munmap
+92 common truncate sys_truncate
+93 common ftruncate sys_ftruncate
+94 common fchmod sys_fchmod
+96 common getpriority sys_getpriority
+97 common setpriority sys_setpriority
+99 common statfs sys_statfs
+100 common fstatfs sys_fstatfs
+102 common socketcall sys_socketcall
+103 common syslog sys_syslog
+104 common setitimer sys_setitimer
+105 common getitimer sys_getitimer
+106 common stat sys_newstat
+107 common lstat sys_newlstat
+108 common fstat sys_newfstat
+110 common lookup_dcookie sys_ni_syscall
+111 common vhangup sys_vhangup
+112 common idle sys_ni_syscall
+114 common wait4 sys_wait4
+115 common swapoff sys_swapoff
+116 common sysinfo sys_sysinfo
+117 common ipc sys_s390_ipc
+118 common fsync sys_fsync
+119 common sigreturn sys_sigreturn
+120 common clone sys_clone
+121 common setdomainname sys_setdomainname
+122 common uname sys_newuname
+124 common adjtimex sys_adjtimex
+125 common mprotect sys_mprotect
+126 common sigprocmask sys_sigprocmask
+127 common create_module sys_ni_syscall
+128 common init_module sys_init_module
+129 common delete_module sys_delete_module
+130 common get_kernel_syms sys_ni_syscall
+131 common quotactl sys_quotactl
+132 common getpgid sys_getpgid
+133 common fchdir sys_fchdir
+134 common bdflush sys_ni_syscall
+135 common sysfs sys_sysfs
+136 common personality sys_s390_personality
+137 common afs_syscall sys_ni_syscall
+141 common getdents sys_getdents
+142 common select sys_select
+143 common flock sys_flock
+144 common msync sys_msync
+145 common readv sys_readv
+146 common writev sys_writev
+147 common getsid sys_getsid
+148 common fdatasync sys_fdatasync
+149 common _sysctl sys_ni_syscall
+150 common mlock sys_mlock
+151 common munlock sys_munlock
+152 common mlockall sys_mlockall
+153 common munlockall sys_munlockall
+154 common sched_setparam sys_sched_setparam
+155 common sched_getparam sys_sched_getparam
+156 common sched_setscheduler sys_sched_setscheduler
+157 common sched_getscheduler sys_sched_getscheduler
+158 common sched_yield sys_sched_yield
+159 common sched_get_priority_max sys_sched_get_priority_max
+160 common sched_get_priority_min sys_sched_get_priority_min
+161 common sched_rr_get_interval sys_sched_rr_get_interval
+162 common nanosleep sys_nanosleep
+163 common mremap sys_mremap
+167 common query_module sys_ni_syscall
+168 common poll sys_poll
+169 common nfsservctl sys_ni_syscall
+172 common prctl sys_prctl
+173 common rt_sigreturn sys_rt_sigreturn
+174 common rt_sigaction sys_rt_sigaction
+175 common rt_sigprocmask sys_rt_sigprocmask
+176 common rt_sigpending sys_rt_sigpending
+177 common rt_sigtimedwait sys_rt_sigtimedwait
+178 common rt_sigqueueinfo sys_rt_sigqueueinfo
+179 common rt_sigsuspend sys_rt_sigsuspend
+180 common pread64 sys_pread64
+181 common pwrite64 sys_pwrite64
+183 common getcwd sys_getcwd
+184 common capget sys_capget
+185 common capset sys_capset
+186 common sigaltstack sys_sigaltstack
+187 common sendfile sys_sendfile64
+188 common getpmsg sys_ni_syscall
+189 common putpmsg sys_ni_syscall
+190 common vfork sys_vfork
+191 common getrlimit sys_getrlimit
+198 common lchown sys_lchown
+199 common getuid sys_getuid
+200 common getgid sys_getgid
+201 common geteuid sys_geteuid
+202 common getegid sys_getegid
+203 common setreuid sys_setreuid
+204 common setregid sys_setregid
+205 common getgroups sys_getgroups
+206 common setgroups sys_setgroups
+207 common fchown sys_fchown
+208 common setresuid sys_setresuid
+209 common getresuid sys_getresuid
+210 common setresgid sys_setresgid
+211 common getresgid sys_getresgid
+212 common chown sys_chown
+213 common setuid sys_setuid
+214 common setgid sys_setgid
+215 common setfsuid sys_setfsuid
+216 common setfsgid sys_setfsgid
+217 common pivot_root sys_pivot_root
+218 common mincore sys_mincore
+219 common madvise sys_madvise
+220 common getdents64 sys_getdents64
+222 common readahead sys_readahead
+224 common setxattr sys_setxattr
+225 common lsetxattr sys_lsetxattr
+226 common fsetxattr sys_fsetxattr
+227 common getxattr sys_getxattr
+228 common lgetxattr sys_lgetxattr
+229 common fgetxattr sys_fgetxattr
+230 common listxattr sys_listxattr
+231 common llistxattr sys_llistxattr
+232 common flistxattr sys_flistxattr
+233 common removexattr sys_removexattr
+234 common lremovexattr sys_lremovexattr
+235 common fremovexattr sys_fremovexattr
+236 common gettid sys_gettid
+237 common tkill sys_tkill
+238 common futex sys_futex
+239 common sched_setaffinity sys_sched_setaffinity
+240 common sched_getaffinity sys_sched_getaffinity
+241 common tgkill sys_tgkill
+243 common io_setup sys_io_setup
+244 common io_destroy sys_io_destroy
+245 common io_getevents sys_io_getevents
+246 common io_submit sys_io_submit
+247 common io_cancel sys_io_cancel
+248 common exit_group sys_exit_group
+249 common epoll_create sys_epoll_create
+250 common epoll_ctl sys_epoll_ctl
+251 common epoll_wait sys_epoll_wait
+252 common set_tid_address sys_set_tid_address
+253 common fadvise64 sys_fadvise64_64
+254 common timer_create sys_timer_create
+255 common timer_settime sys_timer_settime
+256 common timer_gettime sys_timer_gettime
+257 common timer_getoverrun sys_timer_getoverrun
+258 common timer_delete sys_timer_delete
+259 common clock_settime sys_clock_settime
+260 common clock_gettime sys_clock_gettime
+261 common clock_getres sys_clock_getres
+262 common clock_nanosleep sys_clock_nanosleep
+265 common statfs64 sys_statfs64
+266 common fstatfs64 sys_fstatfs64
+267 common remap_file_pages sys_remap_file_pages
+268 common mbind sys_mbind
+269 common get_mempolicy sys_get_mempolicy
+270 common set_mempolicy sys_set_mempolicy
+271 common mq_open sys_mq_open
+272 common mq_unlink sys_mq_unlink
+273 common mq_timedsend sys_mq_timedsend
+274 common mq_timedreceive sys_mq_timedreceive
+275 common mq_notify sys_mq_notify
+276 common mq_getsetattr sys_mq_getsetattr
+277 common kexec_load sys_kexec_load
+278 common add_key sys_add_key
+279 common request_key sys_request_key
+280 common keyctl sys_keyctl
+281 common waitid sys_waitid
+282 common ioprio_set sys_ioprio_set
+283 common ioprio_get sys_ioprio_get
+284 common inotify_init sys_inotify_init
+285 common inotify_add_watch sys_inotify_add_watch
+286 common inotify_rm_watch sys_inotify_rm_watch
+287 common migrate_pages sys_migrate_pages
+288 common openat sys_openat
+289 common mkdirat sys_mkdirat
+290 common mknodat sys_mknodat
+291 common fchownat sys_fchownat
+292 common futimesat sys_futimesat
+293 common newfstatat sys_newfstatat
+294 common unlinkat sys_unlinkat
+295 common renameat sys_renameat
+296 common linkat sys_linkat
+297 common symlinkat sys_symlinkat
+298 common readlinkat sys_readlinkat
+299 common fchmodat sys_fchmodat
+300 common faccessat sys_faccessat
+301 common pselect6 sys_pselect6
+302 common ppoll sys_ppoll
+303 common unshare sys_unshare
+304 common set_robust_list sys_set_robust_list
+305 common get_robust_list sys_get_robust_list
+306 common splice sys_splice
+307 common sync_file_range sys_sync_file_range
+308 common tee sys_tee
+309 common vmsplice sys_vmsplice
+310 common move_pages sys_move_pages
+311 common getcpu sys_getcpu
+312 common epoll_pwait sys_epoll_pwait
+313 common utimes sys_utimes
+314 common fallocate sys_fallocate
+315 common utimensat sys_utimensat
+316 common signalfd sys_signalfd
+317 common timerfd sys_ni_syscall
+318 common eventfd sys_eventfd
+319 common timerfd_create sys_timerfd_create
+320 common timerfd_settime sys_timerfd_settime
+321 common timerfd_gettime sys_timerfd_gettime
+322 common signalfd4 sys_signalfd4
+323 common eventfd2 sys_eventfd2
+324 common inotify_init1 sys_inotify_init1
+325 common pipe2 sys_pipe2
+326 common dup3 sys_dup3
+327 common epoll_create1 sys_epoll_create1
+328 common preadv sys_preadv
+329 common pwritev sys_pwritev
+330 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo
+331 common perf_event_open sys_perf_event_open
+332 common fanotify_init sys_fanotify_init
+333 common fanotify_mark sys_fanotify_mark
+334 common prlimit64 sys_prlimit64
+335 common name_to_handle_at sys_name_to_handle_at
+336 common open_by_handle_at sys_open_by_handle_at
+337 common clock_adjtime sys_clock_adjtime
+338 common syncfs sys_syncfs
+339 common setns sys_setns
+340 common process_vm_readv sys_process_vm_readv
+341 common process_vm_writev sys_process_vm_writev
+342 common s390_runtime_instr sys_s390_runtime_instr
+343 common kcmp sys_kcmp
+344 common finit_module sys_finit_module
+345 common sched_setattr sys_sched_setattr
+346 common sched_getattr sys_sched_getattr
+347 common renameat2 sys_renameat2
+348 common seccomp sys_seccomp
+349 common getrandom sys_getrandom
+350 common memfd_create sys_memfd_create
+351 common bpf sys_bpf
+352 common s390_pci_mmio_write sys_s390_pci_mmio_write
+353 common s390_pci_mmio_read sys_s390_pci_mmio_read
+354 common execveat sys_execveat
+355 common userfaultfd sys_userfaultfd
+356 common membarrier sys_membarrier
+357 common recvmmsg sys_recvmmsg
+358 common sendmmsg sys_sendmmsg
+359 common socket sys_socket
+360 common socketpair sys_socketpair
+361 common bind sys_bind
+362 common connect sys_connect
+363 common listen sys_listen
+364 common accept4 sys_accept4
+365 common getsockopt sys_getsockopt
+366 common setsockopt sys_setsockopt
+367 common getsockname sys_getsockname
+368 common getpeername sys_getpeername
+369 common sendto sys_sendto
+370 common sendmsg sys_sendmsg
+371 common recvfrom sys_recvfrom
+372 common recvmsg sys_recvmsg
+373 common shutdown sys_shutdown
+374 common mlock2 sys_mlock2
+375 common copy_file_range sys_copy_file_range
+376 common preadv2 sys_preadv2
+377 common pwritev2 sys_pwritev2
+378 common s390_guarded_storage sys_s390_guarded_storage
+379 common statx sys_statx
+380 common s390_sthyi sys_s390_sthyi
+381 common kexec_file_load sys_kexec_file_load
+382 common io_pgetevents sys_io_pgetevents
+383 common rseq sys_rseq
+384 common pkey_mprotect sys_pkey_mprotect
+385 common pkey_alloc sys_pkey_alloc
+386 common pkey_free sys_pkey_free
# room for arch specific syscalls
-392 64 semtimedop sys_semtimedop -
-393 common semget sys_semget sys_semget
-394 common semctl sys_semctl compat_sys_semctl
-395 common shmget sys_shmget sys_shmget
-396 common shmctl sys_shmctl compat_sys_shmctl
-397 common shmat sys_shmat compat_sys_shmat
-398 common shmdt sys_shmdt sys_shmdt
-399 common msgget sys_msgget sys_msgget
-400 common msgsnd sys_msgsnd compat_sys_msgsnd
-401 common msgrcv sys_msgrcv compat_sys_msgrcv
-402 common msgctl sys_msgctl compat_sys_msgctl
-403 32 clock_gettime64 - sys_clock_gettime
-404 32 clock_settime64 - sys_clock_settime
-405 32 clock_adjtime64 - sys_clock_adjtime
-406 32 clock_getres_time64 - sys_clock_getres
-407 32 clock_nanosleep_time64 - sys_clock_nanosleep
-408 32 timer_gettime64 - sys_timer_gettime
-409 32 timer_settime64 - sys_timer_settime
-410 32 timerfd_gettime64 - sys_timerfd_gettime
-411 32 timerfd_settime64 - sys_timerfd_settime
-412 32 utimensat_time64 - sys_utimensat
-413 32 pselect6_time64 - compat_sys_pselect6_time64
-414 32 ppoll_time64 - compat_sys_ppoll_time64
-416 32 io_pgetevents_time64 - compat_sys_io_pgetevents_time64
-417 32 recvmmsg_time64 - compat_sys_recvmmsg_time64
-418 32 mq_timedsend_time64 - sys_mq_timedsend
-419 32 mq_timedreceive_time64 - sys_mq_timedreceive
-420 32 semtimedop_time64 - sys_semtimedop
-421 32 rt_sigtimedwait_time64 - compat_sys_rt_sigtimedwait_time64
-422 32 futex_time64 - sys_futex
-423 32 sched_rr_get_interval_time64 - sys_sched_rr_get_interval
-424 common pidfd_send_signal sys_pidfd_send_signal sys_pidfd_send_signal
-425 common io_uring_setup sys_io_uring_setup sys_io_uring_setup
-426 common io_uring_enter sys_io_uring_enter sys_io_uring_enter
-427 common io_uring_register sys_io_uring_register sys_io_uring_register
-428 common open_tree sys_open_tree sys_open_tree
-429 common move_mount sys_move_mount sys_move_mount
-430 common fsopen sys_fsopen sys_fsopen
-431 common fsconfig sys_fsconfig sys_fsconfig
-432 common fsmount sys_fsmount sys_fsmount
-433 common fspick sys_fspick sys_fspick
-434 common pidfd_open sys_pidfd_open sys_pidfd_open
-435 common clone3 sys_clone3 sys_clone3
-436 common close_range sys_close_range sys_close_range
-437 common openat2 sys_openat2 sys_openat2
-438 common pidfd_getfd sys_pidfd_getfd sys_pidfd_getfd
-439 common faccessat2 sys_faccessat2 sys_faccessat2
-440 common process_madvise sys_process_madvise sys_process_madvise
-441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2
-442 common mount_setattr sys_mount_setattr sys_mount_setattr
-443 common quotactl_fd sys_quotactl_fd sys_quotactl_fd
-444 common landlock_create_ruleset sys_landlock_create_ruleset sys_landlock_create_ruleset
-445 common landlock_add_rule sys_landlock_add_rule sys_landlock_add_rule
-446 common landlock_restrict_self sys_landlock_restrict_self sys_landlock_restrict_self
-447 common memfd_secret sys_memfd_secret sys_memfd_secret
-448 common process_mrelease sys_process_mrelease sys_process_mrelease
-449 common futex_waitv sys_futex_waitv sys_futex_waitv
-450 common set_mempolicy_home_node sys_set_mempolicy_home_node sys_set_mempolicy_home_node
-451 common cachestat sys_cachestat sys_cachestat
-452 common fchmodat2 sys_fchmodat2 sys_fchmodat2
-453 common map_shadow_stack sys_map_shadow_stack sys_map_shadow_stack
-454 common futex_wake sys_futex_wake sys_futex_wake
-455 common futex_wait sys_futex_wait sys_futex_wait
-456 common futex_requeue sys_futex_requeue sys_futex_requeue
-457 common statmount sys_statmount sys_statmount
-458 common listmount sys_listmount sys_listmount
-459 common lsm_get_self_attr sys_lsm_get_self_attr sys_lsm_get_self_attr
-460 common lsm_set_self_attr sys_lsm_set_self_attr sys_lsm_set_self_attr
-461 common lsm_list_modules sys_lsm_list_modules sys_lsm_list_modules
-462 common mseal sys_mseal sys_mseal
-463 common setxattrat sys_setxattrat sys_setxattrat
-464 common getxattrat sys_getxattrat sys_getxattrat
-465 common listxattrat sys_listxattrat sys_listxattrat
-466 common removexattrat sys_removexattrat sys_removexattrat
-467 common open_tree_attr sys_open_tree_attr sys_open_tree_attr
-468 common file_getattr sys_file_getattr sys_file_getattr
-469 common file_setattr sys_file_setattr sys_file_setattr
+392 common semtimedop sys_semtimedop
+393 common semget sys_semget
+394 common semctl sys_semctl
+395 common shmget sys_shmget
+396 common shmctl sys_shmctl
+397 common shmat sys_shmat
+398 common shmdt sys_shmdt
+399 common msgget sys_msgget
+400 common msgsnd sys_msgsnd
+401 common msgrcv sys_msgrcv
+402 common msgctl sys_msgctl
+424 common pidfd_send_signal sys_pidfd_send_signal
+425 common io_uring_setup sys_io_uring_setup
+426 common io_uring_enter sys_io_uring_enter
+427 common io_uring_register sys_io_uring_register
+428 common open_tree sys_open_tree
+429 common move_mount sys_move_mount
+430 common fsopen sys_fsopen
+431 common fsconfig sys_fsconfig
+432 common fsmount sys_fsmount
+433 common fspick sys_fspick
+434 common pidfd_open sys_pidfd_open
+435 common clone3 sys_clone3
+436 common close_range sys_close_range
+437 common openat2 sys_openat2
+438 common pidfd_getfd sys_pidfd_getfd
+439 common faccessat2 sys_faccessat2
+440 common process_madvise sys_process_madvise
+441 common epoll_pwait2 sys_epoll_pwait2
+442 common mount_setattr sys_mount_setattr
+443 common quotactl_fd sys_quotactl_fd
+444 common landlock_create_ruleset sys_landlock_create_ruleset
+445 common landlock_add_rule sys_landlock_add_rule
+446 common landlock_restrict_self sys_landlock_restrict_self
+447 common memfd_secret sys_memfd_secret
+448 common process_mrelease sys_process_mrelease
+449 common futex_waitv sys_futex_waitv
+450 common set_mempolicy_home_node sys_set_mempolicy_home_node
+451 common cachestat sys_cachestat
+452 common fchmodat2 sys_fchmodat2
+453 common map_shadow_stack sys_map_shadow_stack
+454 common futex_wake sys_futex_wake
+455 common futex_wait sys_futex_wait
+456 common futex_requeue sys_futex_requeue
+457 common statmount sys_statmount
+458 common listmount sys_listmount
+459 common lsm_get_self_attr sys_lsm_get_self_attr
+460 common lsm_set_self_attr sys_lsm_set_self_attr
+461 common lsm_list_modules sys_lsm_list_modules
+462 common mseal sys_mseal
+463 common setxattrat sys_setxattrat
+464 common getxattrat sys_getxattrat
+465 common listxattrat sys_listxattrat
+466 common removexattrat sys_removexattrat
+467 common open_tree_attr sys_open_tree_attr
+468 common file_getattr sys_file_getattr
+469 common file_setattr sys_file_setattr
+470 common listns sys_listns
diff --git a/arch/s390/kernel/syscalls/syscalltbl b/arch/s390/kernel/syscalls/syscalltbl
deleted file mode 100755
index fbac1732f874..000000000000
--- a/arch/s390/kernel/syscalls/syscalltbl
+++ /dev/null
@@ -1,232 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
-#
-# Generate system call table and header files
-#
-# Copyright IBM Corp. 2018
-# Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
-
-#
-# File path to the system call table definition.
-# You can set the path with the -i option. If omitted,
-# system call table definitions are read from standard input.
-#
-SYSCALL_TBL=""
-
-
-create_syscall_table_entries()
-{
- local nr abi name entry64 entry32 _ignore
- local temp=$(mktemp ${TMPDIR:-/tmp}/syscalltbl-common.XXXXXXXXX)
-
- (
- #
- # Initialize with 0 to create an NI_SYSCALL for 0
- #
- local prev_nr=0 prev_32=sys_ni_syscall prev_64=sys_ni_syscall
- while read nr abi name entry64 entry32 _ignore; do
- test x$entry32 = x- && entry32=sys_ni_syscall
- test x$entry64 = x- && entry64=sys_ni_syscall
-
- if test $prev_nr -eq $nr; then
- #
- # Same syscall but different ABI, just update
- # the respective entry point
- #
- case $abi in
- 32)
- prev_32=$entry32
- ;;
- 64)
- prev_64=$entry64
- ;;
- esac
- continue;
- else
- printf "%d\t%s\t%s\n" $prev_nr $prev_64 $prev_32
- fi
-
- prev_nr=$nr
- prev_64=$entry64
- prev_32=$entry32
- done
- printf "%d\t%s\t%s\n" $prev_nr $prev_64 $prev_32
- ) >> $temp
-
- #
- # Check for duplicate syscall numbers
- #
- if ! cat $temp |cut -f1 |uniq -d 2>&1; then
- echo "Error: generated system call table contains duplicate entries: $temp" >&2
- exit 1
- fi
-
- #
- # Generate syscall table
- #
- prev_nr=0
- while read nr entry64 entry32; do
- while test $prev_nr -lt $((nr - 1)); do
- printf "NI_SYSCALL\n"
- prev_nr=$((prev_nr + 1))
- done
- if test x$entry64 = xsys_ni_syscall &&
- test x$entry32 = xsys_ni_syscall; then
- printf "NI_SYSCALL\n"
- else
- printf "SYSCALL(%s,%s)\n" $entry64 $entry32
- fi
- prev_nr=$nr
- done < $temp
- rm $temp
-}
-
-generate_syscall_table()
-{
- cat <<-EoHEADER
- /* SPDX-License-Identifier: GPL-2.0 */
- /*
- * Definitions for sys_call_table, each line represents an
- * entry in the table in the form
- * SYSCALL(64 bit syscall, 31 bit emulated syscall)
- *
- * This file is meant to be included from entry.S.
- */
-
- #define NI_SYSCALL SYSCALL(sys_ni_syscall,sys_ni_syscall)
-
-EoHEADER
- grep -Ev '^(#|[[:blank:]]*$)' $SYSCALL_TBL \
- |sort -k1 -n \
- |create_syscall_table_entries
-}
-
-create_header_defines()
-{
- local nr abi name _ignore
-
- while read nr abi name _ignore; do
- printf "#define __NR_%s %d\n" $name $nr
- done
-}
-
-normalize_fileguard()
-{
- local fileguard="$1"
-
- echo "$1" |tr '[[:lower:]]' '[[:upper:]]' \
- |sed -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'
-}
-
-generate_syscall_header()
-{
- local abis=$(echo "($1)" | tr ',' '|')
- local filename="$2"
- local fileguard suffix
-
- if test "$filename"; then
- fileguard=$(normalize_fileguard "__UAPI_ASM_S390_$2")
- else
- case "$abis" in
- *64*) suffix=64 ;;
- *32*) suffix=32 ;;
- esac
- fileguard=$(normalize_fileguard "__UAPI_ASM_S390_SYSCALLS_$suffix")
- fi
-
- cat <<-EoHEADER
- /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
- #ifndef ${fileguard}
- #define ${fileguard}
-
-EoHEADER
-
- grep -E "^[[:digit:]]+[[:space:]]+${abis}" $SYSCALL_TBL \
- |sort -k1 -n \
- |create_header_defines
-
- cat <<-EoFOOTER
-
- #endif /* ${fileguard} */
-EoFOOTER
-}
-
-__max_syscall_nr()
-{
- local abis=$(echo "($1)" | tr ',' '|')
-
- grep -E "^[[:digit:]]+[[:space:]]+${abis}" $SYSCALL_TBL \
- |sed -ne 's/^\([[:digit:]]*\)[[:space:]].*/\1/p' \
- |sort -n \
- |tail -1
-}
-
-
-generate_syscall_nr()
-{
- local abis="$1"
- local max_syscall_nr num_syscalls
-
- max_syscall_nr=$(__max_syscall_nr "$abis")
- num_syscalls=$((max_syscall_nr + 1))
-
- cat <<-EoHEADER
- /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
- #ifndef __ASM_S390_SYSCALLS_NR
- #define __ASM_S390_SYSCALLS_NR
-
- #define NR_syscalls ${num_syscalls}
-
- #endif /* __ASM_S390_SYSCALLS_NR */
-EoHEADER
-}
-
-
-#
-# Parse command line arguments
-#
-do_syscall_header=""
-do_syscall_table=""
-do_syscall_nr=""
-output_file=""
-abi_list="common,64"
-filename=""
-while getopts ":HNSXi:a:f:" arg; do
- case $arg in
- a)
- abi_list="$OPTARG"
- ;;
- i)
- SYSCALL_TBL="$OPTARG"
- ;;
- f)
- filename=${OPTARG##*/}
- ;;
- H)
- do_syscall_header=1
- ;;
- N)
- do_syscall_nr=1
- ;;
- S)
- do_syscall_table=1
- ;;
- X)
- set -x
- ;;
- :)
- echo "Missing argument for -$OPTARG" >&2
- exit 1
- ;;
- \?)
- echo "Invalid option specified" >&2
- exit 1
- ;;
- esac
-done
-
-test "$do_syscall_header" && generate_syscall_header "$abi_list" "$filename"
-test "$do_syscall_table" && generate_syscall_table
-test "$do_syscall_nr" && generate_syscall_nr "$abi_list"
-
-exit 0
diff --git a/arch/s390/kernel/sysinfo.c b/arch/s390/kernel/sysinfo.c
index 1ea84e942bd4..33ca3e47a0e6 100644
--- a/arch/s390/kernel/sysinfo.c
+++ b/arch/s390/kernel/sysinfo.c
@@ -526,7 +526,7 @@ static __init int stsi_init_debugfs(void)
if (IS_ENABLED(CONFIG_SCHED_TOPOLOGY) && cpu_has_topology()) {
char link_to[10];
- sprintf(link_to, "15_1_%d", topology_mnest_limit());
+ snprintf(link_to, sizeof(link_to), "15_1_%d", topology_mnest_limit());
debugfs_create_symlink("topology", stsi_root, link_to);
}
return 0;
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 63517b85f4c9..bd0df61d1907 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -12,8 +12,7 @@
* Copyright (C) 1991, 1992, 1995 Linus Torvalds
*/
-#define KMSG_COMPONENT "time"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#define pr_fmt(fmt) "time: " fmt
#include <linux/kernel_stat.h>
#include <linux/errno.h>
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 1594c80e9bc4..1913a5566ac2 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -3,8 +3,7 @@
* Copyright IBM Corp. 2007, 2011
*/
-#define KMSG_COMPONENT "cpu"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#define pr_fmt(fmt) "cpu: " fmt
#include <linux/cpufeature.h>
#include <linux/workqueue.h>
diff --git a/arch/s390/kernel/uprobes.c b/arch/s390/kernel/uprobes.c
index 5b0633ea8d93..c624f3361e43 100644
--- a/arch/s390/kernel/uprobes.c
+++ b/arch/s390/kernel/uprobes.c
@@ -8,7 +8,6 @@
#include <linux/uaccess.h>
#include <linux/uprobes.h>
-#include <linux/compat.h>
#include <linux/kdebug.h>
#include <linux/sched/task_stack.h>
@@ -29,7 +28,7 @@ int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
{
if (psw_bits(regs->psw).eaba == PSW_BITS_AMODE_24BIT)
return -EINVAL;
- if (!is_compat_task() && psw_bits(regs->psw).eaba == PSW_BITS_AMODE_31BIT)
+ if (psw_bits(regs->psw).eaba == PSW_BITS_AMODE_31BIT)
return -EINVAL;
clear_thread_flag(TIF_PER_TRAP);
auprobe->saved_per = psw_bits(regs->psw).per;
@@ -161,11 +160,6 @@ bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
/* Instruction Emulation */
-static void adjust_psw_addr(psw_t *psw, unsigned long len)
-{
- psw->addr = __rewind_psw(*psw, -len);
-}
-
#define EMU_ILLEGAL_OP 1
#define EMU_SPECIFICATION 2
#define EMU_ADDRESSING 3
@@ -353,7 +347,7 @@ static void handle_insn_ril(struct arch_uprobe *auprobe, struct pt_regs *regs)
}
break;
}
- adjust_psw_addr(&regs->psw, ilen);
+ regs->psw.addr = __forward_psw(regs->psw, ilen);
switch (rc) {
case EMU_ILLEGAL_OP:
regs->int_code = ilen << 16 | 0x0001;
@@ -373,8 +367,7 @@ static void handle_insn_ril(struct arch_uprobe *auprobe, struct pt_regs *regs)
bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
{
if ((psw_bits(regs->psw).eaba == PSW_BITS_AMODE_24BIT) ||
- ((psw_bits(regs->psw).eaba == PSW_BITS_AMODE_31BIT) &&
- !is_compat_task())) {
+ (psw_bits(regs->psw).eaba == PSW_BITS_AMODE_31BIT)) {
regs->psw.addr = __rewind_psw(regs->psw, UPROBE_SWBP_INSN_SIZE);
do_report_trap(regs, SIGILL, ILL_ILLADR, NULL);
return true;
diff --git a/arch/s390/kernel/uv.c b/arch/s390/kernel/uv.c
index 5d17609bcfe1..ed46950be86f 100644
--- a/arch/s390/kernel/uv.c
+++ b/arch/s390/kernel/uv.c
@@ -4,8 +4,7 @@
*
* Copyright IBM Corp. 2019, 2024
*/
-#define KMSG_COMPONENT "prot_virt"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#define pr_fmt(fmt) "prot_virt: " fmt
#include <linux/export.h>
#include <linux/kernel.h>
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index 430feb1a5013..a27a90a199be 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -7,7 +7,6 @@
*/
#include <linux/binfmts.h>
-#include <linux/compat.h>
#include <linux/elf.h>
#include <linux/errno.h>
#include <linux/init.h>
@@ -23,8 +22,7 @@
#include <asm/alternative.h>
#include <asm/vdso.h>
-extern char vdso64_start[], vdso64_end[];
-extern char vdso32_start[], vdso32_end[];
+extern char vdso_start[], vdso_end[];
static int vdso_mremap(const struct vm_special_mapping *sm,
struct vm_area_struct *vma)
@@ -33,12 +31,7 @@ static int vdso_mremap(const struct vm_special_mapping *sm,
return 0;
}
-static struct vm_special_mapping vdso64_mapping = {
- .name = "[vdso]",
- .mremap = vdso_mremap,
-};
-
-static struct vm_special_mapping vdso32_mapping = {
+static struct vm_special_mapping vdso_mapping = {
.name = "[vdso]",
.mremap = vdso_mremap,
};
@@ -53,7 +46,6 @@ early_initcall(vdso_getcpu_init); /* Must be called before SMP init */
static int map_vdso(unsigned long addr, unsigned long vdso_mapping_len)
{
unsigned long vvar_start, vdso_text_start, vdso_text_len;
- struct vm_special_mapping *vdso_mapping;
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
int rc;
@@ -62,13 +54,7 @@ static int map_vdso(unsigned long addr, unsigned long vdso_mapping_len)
if (mmap_write_lock_killable(mm))
return -EINTR;
- if (is_compat_task()) {
- vdso_text_len = vdso32_end - vdso32_start;
- vdso_mapping = &vdso32_mapping;
- } else {
- vdso_text_len = vdso64_end - vdso64_start;
- vdso_mapping = &vdso64_mapping;
- }
+ vdso_text_len = vdso_end - vdso_start;
vvar_start = get_unmapped_area(NULL, addr, vdso_mapping_len, 0, 0);
rc = vvar_start;
if (IS_ERR_VALUE(vvar_start))
@@ -82,7 +68,7 @@ static int map_vdso(unsigned long addr, unsigned long vdso_mapping_len)
vma = _install_special_mapping(mm, vdso_text_start, vdso_text_len,
VM_READ|VM_EXEC|VM_SEALED_SYSMAP|
VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
- vdso_mapping);
+ &vdso_mapping);
if (IS_ERR(vma)) {
do_munmap(mm, vvar_start, PAGE_SIZE, NULL);
rc = PTR_ERR(vma);
@@ -122,13 +108,7 @@ static unsigned long vdso_addr(unsigned long start, unsigned long len)
unsigned long vdso_text_size(void)
{
- unsigned long size;
-
- if (is_compat_task())
- size = vdso32_end - vdso32_start;
- else
- size = vdso64_end - vdso64_start;
- return PAGE_ALIGN(size);
+ return PAGE_ALIGN(vdso_end - vdso_start);
}
unsigned long vdso_size(void)
@@ -166,7 +146,7 @@ static void vdso_apply_alternatives(void)
struct alt_instr *start, *end;
const struct elf64_hdr *hdr;
- hdr = (struct elf64_hdr *)vdso64_start;
+ hdr = (struct elf64_hdr *)vdso_start;
shdr = (void *)hdr + hdr->e_shoff;
alt = find_section(hdr, shdr, ".altinstructions");
if (!alt)
@@ -179,9 +159,7 @@ static void vdso_apply_alternatives(void)
static int __init vdso_init(void)
{
vdso_apply_alternatives();
- vdso64_mapping.pages = vdso_setup_pages(vdso64_start, vdso64_end);
- if (IS_ENABLED(CONFIG_COMPAT))
- vdso32_mapping.pages = vdso_setup_pages(vdso32_start, vdso32_end);
+ vdso_mapping.pages = vdso_setup_pages(vdso_start, vdso_end);
return 0;
}
arch_initcall(vdso_init);
diff --git a/arch/s390/kernel/vdso32/.gitignore b/arch/s390/kernel/vdso/.gitignore
index 5167384843b9..652e31d82582 100644
--- a/arch/s390/kernel/vdso32/.gitignore
+++ b/arch/s390/kernel/vdso/.gitignore
@@ -1,2 +1,2 @@
# SPDX-License-Identifier: GPL-2.0-only
-vdso32.lds
+vdso.lds
diff --git a/arch/s390/kernel/vdso/Makefile b/arch/s390/kernel/vdso/Makefile
new file mode 100644
index 000000000000..2fa12d4ac106
--- /dev/null
+++ b/arch/s390/kernel/vdso/Makefile
@@ -0,0 +1,76 @@
+# SPDX-License-Identifier: GPL-2.0
+# List of files in the vdso
+
+# Include the generic Makefile to check the built vdso.
+include $(srctree)/lib/vdso/Makefile.include
+obj-vdso = vdso_user_wrapper.o note.o vgetrandom-chacha.o
+obj-cvdso = vdso_generic.o getcpu.o vgetrandom.o
+VDSO_CFLAGS_REMOVE := -pg $(CC_FLAGS_FTRACE) $(CC_FLAGS_EXPOLINE)
+CFLAGS_REMOVE_getcpu.o = $(VDSO_CFLAGS_REMOVE)
+CFLAGS_REMOVE_vgetrandom.o = $(VDSO_CFLAGS_REMOVE)
+CFLAGS_REMOVE_vdso_generic.o = $(VDSO_CFLAGS_REMOVE)
+
+ifneq ($(c-getrandom-y),)
+ CFLAGS_vgetrandom.o += -include $(c-getrandom-y)
+endif
+
+# Build rules
+
+targets := $(obj-vdso) $(obj-cvdso) vdso.so vdso.so.dbg
+obj-vdso := $(addprefix $(obj)/, $(obj-vdso))
+obj-cvdso := $(addprefix $(obj)/, $(obj-cvdso))
+
+KBUILD_AFLAGS_VDSO := $(KBUILD_AFLAGS) -DBUILD_VDSO
+
+KBUILD_CFLAGS_VDSO := $(KBUILD_CFLAGS) -DBUILD_VDSO -DDISABLE_BRANCH_PROFILING
+KBUILD_CFLAGS_VDSO := $(filter-out -mpacked-stack,$(KBUILD_CFLAGS_VDSO))
+KBUILD_CFLAGS_VDSO := $(filter-out -mno-pic-data-is-text-relative,$(KBUILD_CFLAGS_VDSO))
+KBUILD_CFLAGS_VDSO := $(filter-out -munaligned-symbols,$(KBUILD_CFLAGS_VDSO))
+KBUILD_CFLAGS_VDSO := $(filter-out -fno-asynchronous-unwind-tables,$(KBUILD_CFLAGS_VDSO))
+KBUILD_CFLAGS_VDSO += -fPIC -fno-common -fno-builtin -fasynchronous-unwind-tables
+KBUILD_CFLAGS_VDSO += -fno-stack-protector
+ldflags-y := -shared -soname=linux-vdso.so.1 \
+ --hash-style=both --build-id=sha1 -T
+
+$(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_VDSO)
+$(targets:%=$(obj)/%.dbg): KBUILD_AFLAGS = $(KBUILD_AFLAGS_VDSO)
+
+obj-y += vdso_wrapper.o
+targets += vdso.lds
+CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
+
+# Force dependency (incbin is bad)
+$(obj)/vdso_wrapper.o : $(obj)/vdso.so
+
+quiet_cmd_vdso_and_check = VDSO $@
+ cmd_vdso_and_check = $(cmd_ld); $(cmd_vdso_check)
+
+# link rule for the .so file, .lds has to be first
+$(obj)/vdso.so.dbg: $(obj)/vdso.lds $(obj-vdso) $(obj-cvdso) FORCE
+ $(call if_changed,vdso_and_check)
+
+# strip rule for the .so file
+$(obj)/%.so: OBJCOPYFLAGS := -S
+$(obj)/%.so: $(obj)/%.so.dbg FORCE
+ $(call if_changed,objcopy)
+
+# assembly rules for the .S files
+$(obj-vdso): %.o: %.S FORCE
+ $(call if_changed_dep,vdsoas)
+
+$(obj-cvdso): %.o: %.c FORCE
+ $(call if_changed_dep,vdsocc)
+
+# actual build commands
+quiet_cmd_vdsoas = VDSOA $@
+ cmd_vdsoas = $(CC) $(a_flags) -c -o $@ $<
+quiet_cmd_vdsocc = VDSOC $@
+ cmd_vdsocc = $(CC) $(c_flags) -c -o $@ $<
+
+# Generate VDSO offsets using helper script
+gen-vdsosym := $(src)/gen_vdso_offsets.sh
+quiet_cmd_vdsosym = VDSOSYM $@
+ cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@
+
+include/generated/vdso-offsets.h: $(obj)/vdso.so.dbg FORCE
+ $(call if_changed,vdsosym)
diff --git a/arch/s390/kernel/vdso64/gen_vdso_offsets.sh b/arch/s390/kernel/vdso/gen_vdso_offsets.sh
index 37f05cb38dad..359982fb002d 100755
--- a/arch/s390/kernel/vdso64/gen_vdso_offsets.sh
+++ b/arch/s390/kernel/vdso/gen_vdso_offsets.sh
@@ -12,4 +12,4 @@
#
LC_ALL=C
-sed -n 's/\([0-9a-f]*\) . __kernel_\(.*\)/\#define vdso64_offset_\2\t0x\1/p'
+sed -n 's/\([0-9a-f]*\) . __kernel_\(.*\)/\#define vdso_offset_\2\t0x\1/p'
diff --git a/arch/s390/kernel/vdso64/getcpu.c b/arch/s390/kernel/vdso/getcpu.c
index 5c5d4a848b76..5c5d4a848b76 100644
--- a/arch/s390/kernel/vdso64/getcpu.c
+++ b/arch/s390/kernel/vdso/getcpu.c
diff --git a/arch/s390/kernel/vdso32/note.S b/arch/s390/kernel/vdso/note.S
index db19d0680a0a..db19d0680a0a 100644
--- a/arch/s390/kernel/vdso32/note.S
+++ b/arch/s390/kernel/vdso/note.S
diff --git a/arch/s390/kernel/vdso64/vdso.h b/arch/s390/kernel/vdso/vdso.h
index 9e5397e7b590..8cff033dd854 100644
--- a/arch/s390/kernel/vdso64/vdso.h
+++ b/arch/s390/kernel/vdso/vdso.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ARCH_S390_KERNEL_VDSO64_VDSO_H
-#define __ARCH_S390_KERNEL_VDSO64_VDSO_H
+#ifndef __ARCH_S390_KERNEL_VDSO_VDSO_H
+#define __ARCH_S390_KERNEL_VDSO_VDSO_H
#include <vdso/datapage.h>
@@ -12,4 +12,4 @@ int __s390_vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts);
int __s390_vdso_clock_getres(clockid_t clock, struct __kernel_timespec *ts);
ssize_t __kernel_getrandom(void *buffer, size_t len, unsigned int flags, void *opaque_state, size_t opaque_len);
-#endif /* __ARCH_S390_KERNEL_VDSO64_VDSO_H */
+#endif /* __ARCH_S390_KERNEL_VDSO_VDSO_H */
diff --git a/arch/s390/kernel/vdso64/vdso64.lds.S b/arch/s390/kernel/vdso/vdso.lds.S
index e4f6551ae898..7bec4de0e8e0 100644
--- a/arch/s390/kernel/vdso64/vdso64.lds.S
+++ b/arch/s390/kernel/vdso/vdso.lds.S
@@ -7,6 +7,7 @@
#include <asm/vdso/vsyscall.h>
#include <asm/page.h>
#include <asm/vdso.h>
+#include <asm-generic/vmlinux.lds.h>
#include <vdso/datapage.h>
OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390")
@@ -59,47 +60,9 @@ SECTIONS
_end = .;
PROVIDE(end = .);
- /*
- * Stabs debugging sections are here too.
- */
- .stab 0 : { *(.stab) }
- .stabstr 0 : { *(.stabstr) }
- .stab.excl 0 : { *(.stab.excl) }
- .stab.exclstr 0 : { *(.stab.exclstr) }
- .stab.index 0 : { *(.stab.index) }
- .stab.indexstr 0 : { *(.stab.indexstr) }
+ STABS_DEBUG
+ DWARF_DEBUG
.comment 0 : { *(.comment) }
-
- /*
- * DWARF debug sections.
- * Symbols in the DWARF debugging sections are relative to the
- * beginning of the section so we begin them at 0.
- */
- /* DWARF 1 */
- .debug 0 : { *(.debug) }
- .line 0 : { *(.line) }
- /* GNU DWARF 1 extensions */
- .debug_srcinfo 0 : { *(.debug_srcinfo) }
- .debug_sfnames 0 : { *(.debug_sfnames) }
- /* DWARF 1.1 and DWARF 2 */
- .debug_aranges 0 : { *(.debug_aranges) }
- .debug_pubnames 0 : { *(.debug_pubnames) }
- /* DWARF 2 */
- .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
- .debug_abbrev 0 : { *(.debug_abbrev) }
- .debug_line 0 : { *(.debug_line) }
- .debug_frame 0 : { *(.debug_frame) }
- .debug_str 0 : { *(.debug_str) }
- .debug_loc 0 : { *(.debug_loc) }
- .debug_macinfo 0 : { *(.debug_macinfo) }
- /* SGI/MIPS DWARF 2 extensions */
- .debug_weaknames 0 : { *(.debug_weaknames) }
- .debug_funcnames 0 : { *(.debug_funcnames) }
- .debug_typenames 0 : { *(.debug_typenames) }
- .debug_varnames 0 : { *(.debug_varnames) }
- /* DWARF 3 */
- .debug_pubtypes 0 : { *(.debug_pubtypes) }
- .debug_ranges 0 : { *(.debug_ranges) }
.gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
/DISCARD/ : {
diff --git a/arch/s390/kernel/vdso64/vdso64_generic.c b/arch/s390/kernel/vdso/vdso_generic.c
index a9aa75643c08..a9aa75643c08 100644
--- a/arch/s390/kernel/vdso64/vdso64_generic.c
+++ b/arch/s390/kernel/vdso/vdso_generic.c
diff --git a/arch/s390/kernel/vdso64/vdso_user_wrapper.S b/arch/s390/kernel/vdso/vdso_user_wrapper.S
index aa06c85bcbd3..aa06c85bcbd3 100644
--- a/arch/s390/kernel/vdso64/vdso_user_wrapper.S
+++ b/arch/s390/kernel/vdso/vdso_user_wrapper.S
diff --git a/arch/s390/kernel/vdso32/vdso32_wrapper.S b/arch/s390/kernel/vdso/vdso_wrapper.S
index de2fb930471a..f69e62a14978 100644
--- a/arch/s390/kernel/vdso32/vdso32_wrapper.S
+++ b/arch/s390/kernel/vdso/vdso_wrapper.S
@@ -5,11 +5,11 @@
__PAGE_ALIGNED_DATA
- .globl vdso32_start, vdso32_end
+ .globl vdso_start, vdso_end
.balign PAGE_SIZE
-vdso32_start:
- .incbin "arch/s390/kernel/vdso32/vdso32.so"
+vdso_start:
+ .incbin "arch/s390/kernel/vdso/vdso.so"
.balign PAGE_SIZE
-vdso32_end:
+vdso_end:
.previous
diff --git a/arch/s390/kernel/vdso64/vgetrandom-chacha.S b/arch/s390/kernel/vdso/vgetrandom-chacha.S
index 09c034c2f853..09c034c2f853 100644
--- a/arch/s390/kernel/vdso64/vgetrandom-chacha.S
+++ b/arch/s390/kernel/vdso/vgetrandom-chacha.S
diff --git a/arch/s390/kernel/vdso64/vgetrandom.c b/arch/s390/kernel/vdso/vgetrandom.c
index b5268b507fb5..b5268b507fb5 100644
--- a/arch/s390/kernel/vdso64/vgetrandom.c
+++ b/arch/s390/kernel/vdso/vgetrandom.c
diff --git a/arch/s390/kernel/vdso32/Makefile b/arch/s390/kernel/vdso32/Makefile
deleted file mode 100644
index 1e4ddd1a683f..000000000000
--- a/arch/s390/kernel/vdso32/Makefile
+++ /dev/null
@@ -1,64 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-# List of files in the vdso
-
-# Include the generic Makefile to check the built vdso.
-include $(srctree)/lib/vdso/Makefile.include
-obj-vdso32 = vdso_user_wrapper-32.o note-32.o
-
-# Build rules
-
-targets := $(obj-vdso32) vdso32.so vdso32.so.dbg
-obj-vdso32 := $(addprefix $(obj)/, $(obj-vdso32))
-
-KBUILD_AFLAGS += -DBUILD_VDSO
-KBUILD_CFLAGS += -DBUILD_VDSO -DDISABLE_BRANCH_PROFILING
-
-KBUILD_AFLAGS_32 := $(filter-out -m64,$(KBUILD_AFLAGS))
-KBUILD_AFLAGS_32 += -m31 -s
-
-KBUILD_CFLAGS_32 := $(filter-out -m64,$(KBUILD_CFLAGS))
-KBUILD_CFLAGS_32 := $(filter-out -mpacked-stack,$(KBUILD_CFLAGS))
-KBUILD_CFLAGS_32 := $(filter-out -mno-pic-data-is-text-relative,$(KBUILD_CFLAGS_32))
-KBUILD_CFLAGS_32 := $(filter-out -fno-asynchronous-unwind-tables,$(KBUILD_CFLAGS_32))
-KBUILD_CFLAGS_32 += -m31 -fPIC -shared -fno-common -fno-builtin -fasynchronous-unwind-tables
-
-LDFLAGS_vdso32.so.dbg += -shared -soname=linux-vdso32.so.1 \
- --hash-style=both --build-id=sha1 -melf_s390 -T
-
-$(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_32)
-$(targets:%=$(obj)/%.dbg): KBUILD_AFLAGS = $(KBUILD_AFLAGS_32)
-
-obj-y += vdso32_wrapper.o
-targets += vdso32.lds
-CPPFLAGS_vdso32.lds += -P -C -U$(ARCH)
-
-# Force dependency (incbin is bad)
-$(obj)/vdso32_wrapper.o : $(obj)/vdso32.so
-
-quiet_cmd_vdso_and_check = VDSO $@
- cmd_vdso_and_check = $(cmd_ld); $(cmd_vdso_check)
-
-$(obj)/vdso32.so.dbg: $(obj)/vdso32.lds $(obj-vdso32) FORCE
- $(call if_changed,vdso_and_check)
-
-# strip rule for the .so file
-$(obj)/%.so: OBJCOPYFLAGS := -S
-$(obj)/%.so: $(obj)/%.so.dbg FORCE
- $(call if_changed,objcopy)
-
-$(obj-vdso32): %-32.o: %.S FORCE
- $(call if_changed_dep,vdso32as)
-
-# actual build commands
-quiet_cmd_vdso32as = VDSO32A $@
- cmd_vdso32as = $(CC) $(a_flags) -c -o $@ $<
-quiet_cmd_vdso32cc = VDSO32C $@
- cmd_vdso32cc = $(CC) $(c_flags) -c -o $@ $<
-
-# Generate VDSO offsets using helper script
-gen-vdsosym := $(src)/gen_vdso_offsets.sh
-quiet_cmd_vdsosym = VDSOSYM $@
- cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@
-
-include/generated/vdso32-offsets.h: $(obj)/vdso32.so.dbg FORCE
- $(call if_changed,vdsosym)
diff --git a/arch/s390/kernel/vdso32/gen_vdso_offsets.sh b/arch/s390/kernel/vdso32/gen_vdso_offsets.sh
deleted file mode 100755
index 9c4f951e227d..000000000000
--- a/arch/s390/kernel/vdso32/gen_vdso_offsets.sh
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
-
-#
-# Match symbols in the DSO that look like VDSO_*; produce a header file
-# of constant offsets into the shared object.
-#
-# Doing this inside the Makefile will break the $(filter-out) function,
-# causing Kbuild to rebuild the vdso-offsets header file every time.
-#
-# Inspired by arm64 version.
-#
-
-LC_ALL=C
-sed -n 's/\([0-9a-f]*\) . __kernel_compat_\(.*\)/\#define vdso32_offset_\2\t0x\1/p'
diff --git a/arch/s390/kernel/vdso32/vdso32.lds.S b/arch/s390/kernel/vdso32/vdso32.lds.S
deleted file mode 100644
index 9630d58c2080..000000000000
--- a/arch/s390/kernel/vdso32/vdso32.lds.S
+++ /dev/null
@@ -1,140 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This is the infamous ld script for the 64 bits vdso
- * library
- */
-
-#include <asm/page.h>
-#include <asm/vdso.h>
-#include <vdso/datapage.h>
-
-OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390")
-OUTPUT_ARCH(s390:31-bit)
-
-SECTIONS
-{
- VDSO_VVAR_SYMS
-
- . = SIZEOF_HEADERS;
-
- .hash : { *(.hash) } :text
- .gnu.hash : { *(.gnu.hash) }
- .dynsym : { *(.dynsym) }
- .dynstr : { *(.dynstr) }
- .gnu.version : { *(.gnu.version) }
- .gnu.version_d : { *(.gnu.version_d) }
- .gnu.version_r : { *(.gnu.version_r) }
-
- .note : { *(.note.*) } :text :note
-
- . = ALIGN(16);
- .text : {
- *(.text .stub .text.* .gnu.linkonce.t.*)
- } :text
- PROVIDE(__etext = .);
- PROVIDE(_etext = .);
- PROVIDE(etext = .);
-
- /*
- * Other stuff is appended to the text segment:
- */
- .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
- .rodata1 : { *(.rodata1) }
-
- .dynamic : { *(.dynamic) } :text :dynamic
-
- .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
- .eh_frame : { KEEP (*(.eh_frame)) } :text
- .gcc_except_table : { *(.gcc_except_table .gcc_except_table.*) }
-
- .rela.dyn ALIGN(8) : { *(.rela.dyn) }
- .got ALIGN(8) : { *(.got .toc) }
- .got.plt ALIGN(8) : { *(.got.plt) }
-
- _end = .;
- PROVIDE(end = .);
-
- /*
- * Stabs debugging sections are here too.
- */
- .stab 0 : { *(.stab) }
- .stabstr 0 : { *(.stabstr) }
- .stab.excl 0 : { *(.stab.excl) }
- .stab.exclstr 0 : { *(.stab.exclstr) }
- .stab.index 0 : { *(.stab.index) }
- .stab.indexstr 0 : { *(.stab.indexstr) }
- .comment 0 : { *(.comment) }
-
- /*
- * DWARF debug sections.
- * Symbols in the DWARF debugging sections are relative to the
- * beginning of the section so we begin them at 0.
- */
- /* DWARF 1 */
- .debug 0 : { *(.debug) }
- .line 0 : { *(.line) }
- /* GNU DWARF 1 extensions */
- .debug_srcinfo 0 : { *(.debug_srcinfo) }
- .debug_sfnames 0 : { *(.debug_sfnames) }
- /* DWARF 1.1 and DWARF 2 */
- .debug_aranges 0 : { *(.debug_aranges) }
- .debug_pubnames 0 : { *(.debug_pubnames) }
- /* DWARF 2 */
- .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
- .debug_abbrev 0 : { *(.debug_abbrev) }
- .debug_line 0 : { *(.debug_line) }
- .debug_frame 0 : { *(.debug_frame) }
- .debug_str 0 : { *(.debug_str) }
- .debug_loc 0 : { *(.debug_loc) }
- .debug_macinfo 0 : { *(.debug_macinfo) }
- /* SGI/MIPS DWARF 2 extensions */
- .debug_weaknames 0 : { *(.debug_weaknames) }
- .debug_funcnames 0 : { *(.debug_funcnames) }
- .debug_typenames 0 : { *(.debug_typenames) }
- .debug_varnames 0 : { *(.debug_varnames) }
- /* DWARF 3 */
- .debug_pubtypes 0 : { *(.debug_pubtypes) }
- .debug_ranges 0 : { *(.debug_ranges) }
- .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
-
- /DISCARD/ : {
- *(.note.GNU-stack)
- *(.branch_lt)
- *(.data .data.* .gnu.linkonce.d.* .sdata*)
- *(.bss .sbss .dynbss .dynsbss)
- }
-}
-
-/*
- * Very old versions of ld do not recognize this name token; use the constant.
- */
-#define PT_GNU_EH_FRAME 0x6474e550
-
-/*
- * We must supply the ELF program headers explicitly to get just one
- * PT_LOAD segment, and set the flags explicitly to make segments read-only.
- */
-PHDRS
-{
- text PT_LOAD FILEHDR PHDRS FLAGS(5); /* PF_R|PF_X */
- dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
- note PT_NOTE FLAGS(4); /* PF_R */
- eh_frame_hdr PT_GNU_EH_FRAME;
-}
-
-/*
- * This controls what symbols we export from the DSO.
- */
-VERSION
-{
- VDSO_VERSION_STRING {
- global:
- /*
- * Has to be there for the kernel to find
- */
- __kernel_compat_restart_syscall;
- __kernel_compat_rt_sigreturn;
- __kernel_compat_sigreturn;
- local: *;
- };
-}
diff --git a/arch/s390/kernel/vdso32/vdso_user_wrapper.S b/arch/s390/kernel/vdso32/vdso_user_wrapper.S
deleted file mode 100644
index 2e645003fdaf..000000000000
--- a/arch/s390/kernel/vdso32/vdso_user_wrapper.S
+++ /dev/null
@@ -1,22 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-#include <linux/linkage.h>
-#include <asm/unistd.h>
-#include <asm/dwarf.h>
-
-.macro vdso_syscall func,syscall
- .globl __kernel_compat_\func
- .type __kernel_compat_\func,@function
- __ALIGN
-__kernel_compat_\func:
- CFI_STARTPROC
- svc \syscall
- /* Make sure we notice when a syscall returns, which shouldn't happen */
- .word 0
- CFI_ENDPROC
- .size __kernel_compat_\func,.-__kernel_compat_\func
-.endm
-
-vdso_syscall restart_syscall,__NR_restart_syscall
-vdso_syscall sigreturn,__NR_sigreturn
-vdso_syscall rt_sigreturn,__NR_rt_sigreturn
diff --git a/arch/s390/kernel/vdso64/.gitignore b/arch/s390/kernel/vdso64/.gitignore
deleted file mode 100644
index 4ec80685fecc..000000000000
--- a/arch/s390/kernel/vdso64/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-vdso64.lds
diff --git a/arch/s390/kernel/vdso64/Makefile b/arch/s390/kernel/vdso64/Makefile
deleted file mode 100644
index d8f0df742809..000000000000
--- a/arch/s390/kernel/vdso64/Makefile
+++ /dev/null
@@ -1,79 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-# List of files in the vdso
-
-# Include the generic Makefile to check the built vdso.
-include $(srctree)/lib/vdso/Makefile.include
-obj-vdso64 = vdso_user_wrapper.o note.o vgetrandom-chacha.o
-obj-cvdso64 = vdso64_generic.o getcpu.o vgetrandom.o
-VDSO_CFLAGS_REMOVE := -pg $(CC_FLAGS_FTRACE) $(CC_FLAGS_EXPOLINE)
-CFLAGS_REMOVE_getcpu.o = $(VDSO_CFLAGS_REMOVE)
-CFLAGS_REMOVE_vgetrandom.o = $(VDSO_CFLAGS_REMOVE)
-CFLAGS_REMOVE_vdso64_generic.o = $(VDSO_CFLAGS_REMOVE)
-
-ifneq ($(c-getrandom-y),)
- CFLAGS_vgetrandom.o += -include $(c-getrandom-y)
-endif
-
-# Build rules
-
-targets := $(obj-vdso64) $(obj-cvdso64) vdso64.so vdso64.so.dbg
-obj-vdso64 := $(addprefix $(obj)/, $(obj-vdso64))
-obj-cvdso64 := $(addprefix $(obj)/, $(obj-cvdso64))
-
-KBUILD_AFLAGS += -DBUILD_VDSO
-KBUILD_CFLAGS += -DBUILD_VDSO -DDISABLE_BRANCH_PROFILING
-
-KBUILD_AFLAGS_64 := $(filter-out -m64,$(KBUILD_AFLAGS))
-KBUILD_AFLAGS_64 += -m64
-
-KBUILD_CFLAGS_64 := $(filter-out -m64,$(KBUILD_CFLAGS))
-KBUILD_CFLAGS_64 := $(filter-out -mpacked-stack,$(KBUILD_CFLAGS_64))
-KBUILD_CFLAGS_64 := $(filter-out -mno-pic-data-is-text-relative,$(KBUILD_CFLAGS_64))
-KBUILD_CFLAGS_64 := $(filter-out -munaligned-symbols,$(KBUILD_CFLAGS_64))
-KBUILD_CFLAGS_64 := $(filter-out -fno-asynchronous-unwind-tables,$(KBUILD_CFLAGS_64))
-KBUILD_CFLAGS_64 += -m64 -fPIC -fno-common -fno-builtin -fasynchronous-unwind-tables
-ldflags-y := -shared -soname=linux-vdso64.so.1 \
- --hash-style=both --build-id=sha1 -T
-
-$(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_64)
-$(targets:%=$(obj)/%.dbg): KBUILD_AFLAGS = $(KBUILD_AFLAGS_64)
-
-obj-y += vdso64_wrapper.o
-targets += vdso64.lds
-CPPFLAGS_vdso64.lds += -P -C -U$(ARCH)
-
-# Force dependency (incbin is bad)
-$(obj)/vdso64_wrapper.o : $(obj)/vdso64.so
-
-quiet_cmd_vdso_and_check = VDSO $@
- cmd_vdso_and_check = $(cmd_ld); $(cmd_vdso_check)
-
-# link rule for the .so file, .lds has to be first
-$(obj)/vdso64.so.dbg: $(obj)/vdso64.lds $(obj-vdso64) $(obj-cvdso64) FORCE
- $(call if_changed,vdso_and_check)
-
-# strip rule for the .so file
-$(obj)/%.so: OBJCOPYFLAGS := -S
-$(obj)/%.so: $(obj)/%.so.dbg FORCE
- $(call if_changed,objcopy)
-
-# assembly rules for the .S files
-$(obj-vdso64): %.o: %.S FORCE
- $(call if_changed_dep,vdso64as)
-
-$(obj-cvdso64): %.o: %.c FORCE
- $(call if_changed_dep,vdso64cc)
-
-# actual build commands
-quiet_cmd_vdso64as = VDSO64A $@
- cmd_vdso64as = $(CC) $(a_flags) -c -o $@ $<
-quiet_cmd_vdso64cc = VDSO64C $@
- cmd_vdso64cc = $(CC) $(c_flags) -c -o $@ $<
-
-# Generate VDSO offsets using helper script
-gen-vdsosym := $(src)/gen_vdso_offsets.sh
-quiet_cmd_vdsosym = VDSOSYM $@
- cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@
-
-include/generated/vdso64-offsets.h: $(obj)/vdso64.so.dbg FORCE
- $(call if_changed,vdsosym)
diff --git a/arch/s390/kernel/vdso64/note.S b/arch/s390/kernel/vdso64/note.S
deleted file mode 100644
index db19d0680a0a..000000000000
--- a/arch/s390/kernel/vdso64/note.S
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text.
- * Here we can supply some information useful to userland.
- */
-
-#include <linux/uts.h>
-#include <linux/version.h>
-#include <linux/elfnote.h>
-
-ELFNOTE_START(Linux, 0, "a")
- .long LINUX_VERSION_CODE
-ELFNOTE_END
diff --git a/arch/s390/kernel/vdso64/vdso64_wrapper.S b/arch/s390/kernel/vdso64/vdso64_wrapper.S
deleted file mode 100644
index 672184998623..000000000000
--- a/arch/s390/kernel/vdso64/vdso64_wrapper.S
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#include <linux/init.h>
-#include <linux/linkage.h>
-#include <asm/page.h>
-
- __PAGE_ALIGNED_DATA
-
- .globl vdso64_start, vdso64_end
- .balign PAGE_SIZE
-vdso64_start:
- .incbin "arch/s390/kernel/vdso64/vdso64.so"
- .balign PAGE_SIZE
-vdso64_end:
-
- .previous
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index d74d4c52ccd0..53bcbb91bb9b 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -51,7 +51,7 @@ SECTIONS
IRQENTRY_TEXT
SOFTIRQENTRY_TEXT
FTRACE_HOTPATCH_TRAMPOLINES_TEXT
- *(.text.*_indirect_*)
+ *(.text..*_indirect_*)
*(.gnu.warning)
. = ALIGN(PAGE_SIZE);
_etext = .; /* End of text section */
@@ -150,6 +150,15 @@ SECTIONS
*(.altinstr_replacement)
}
+#ifdef CONFIG_STACKPROTECTOR
+ . = ALIGN(8);
+ .stack_prot_table : {
+ __stack_prot_start = .;
+ KEEP(*(__stack_protector_loc))
+ __stack_prot_end = .;
+ }
+#endif
+
/*
* Table with the patch locations to undo expolines
*/
@@ -257,6 +266,10 @@ SECTIONS
QUAD(invalid_pg_dir)
QUAD(__alt_instructions)
QUAD(__alt_instructions_end)
+#ifdef CONFIG_STACKPROTECTOR
+ QUAD(__stack_prot_start)
+ QUAD(__stack_prot_end)
+#endif
#ifdef CONFIG_KASAN
QUAD(kasan_early_shadow_page)
QUAD(kasan_early_shadow_pte)