diff options
Diffstat (limited to 'arch/riscv')
| -rw-r--r-- | arch/riscv/include/asm/asm.h | 14 | ||||
| -rw-r--r-- | arch/riscv/include/asm/cpufeature.h | 2 | ||||
| -rw-r--r-- | arch/riscv/include/asm/hwprobe.h | 7 | ||||
| -rw-r--r-- | arch/riscv/include/asm/insn-def.h | 8 | ||||
| -rw-r--r-- | arch/riscv/include/asm/pgtable-64.h | 2 | ||||
| -rw-r--r-- | arch/riscv/include/asm/pgtable.h | 2 | ||||
| -rw-r--r-- | arch/riscv/include/asm/vdso/arch_data.h | 6 | ||||
| -rw-r--r-- | arch/riscv/include/asm/vendor_extensions/mips.h | 6 | ||||
| -rw-r--r-- | arch/riscv/kernel/cpu.c | 4 | ||||
| -rw-r--r-- | arch/riscv/kernel/cpufeature.c | 4 | ||||
| -rw-r--r-- | arch/riscv/kernel/kgdb.c | 4 | ||||
| -rw-r--r-- | arch/riscv/kernel/module-sections.c | 8 | ||||
| -rw-r--r-- | arch/riscv/kernel/smp.c | 24 | ||||
| -rw-r--r-- | arch/riscv/kernel/stacktrace.c | 21 | ||||
| -rw-r--r-- | arch/riscv/kernel/sys_hwprobe.c | 76 | ||||
| -rw-r--r-- | arch/riscv/kernel/tests/Kconfig.debug | 2 | ||||
| -rw-r--r-- | arch/riscv/kernel/tests/kprobes/Makefile | 4 | ||||
| -rw-r--r-- | arch/riscv/kernel/tests/kprobes/test-kprobes.c | 5 | ||||
| -rw-r--r-- | arch/riscv/kernel/unaligned_access_speed.c | 9 | ||||
| -rw-r--r-- | arch/riscv/kernel/vdso/hwprobe.c | 2 | ||||
| -rw-r--r-- | arch/riscv/mm/ptdump.c | 2 |
21 files changed, 156 insertions, 56 deletions
diff --git a/arch/riscv/include/asm/asm.h b/arch/riscv/include/asm/asm.h index 8bd2a11382a3..e9e8ba83e632 100644 --- a/arch/riscv/include/asm/asm.h +++ b/arch/riscv/include/asm/asm.h @@ -12,6 +12,12 @@ #define __ASM_STR(x) #x #endif +#ifdef CONFIG_AS_HAS_INSN +#define ASM_INSN_I(__x) ".insn " __x +#else +#define ASM_INSN_I(__x) ".4byte " __x +#endif + #if __riscv_xlen == 64 #define __REG_SEL(a, b) __ASM_STR(a) #elif __riscv_xlen == 32 @@ -84,15 +90,9 @@ .endm #ifdef CONFIG_SMP -#ifdef CONFIG_32BIT -#define PER_CPU_OFFSET_SHIFT 2 -#else -#define PER_CPU_OFFSET_SHIFT 3 -#endif - .macro asm_per_cpu dst sym tmp lw \tmp, TASK_TI_CPU_NUM(tp) - slli \tmp, \tmp, PER_CPU_OFFSET_SHIFT + slli \tmp, \tmp, RISCV_LGPTR la \dst, __per_cpu_offset add \dst, \dst, \tmp REG_L \tmp, 0(\dst) diff --git a/arch/riscv/include/asm/cpufeature.h b/arch/riscv/include/asm/cpufeature.h index fbd0e4306c93..62837fa981e8 100644 --- a/arch/riscv/include/asm/cpufeature.h +++ b/arch/riscv/include/asm/cpufeature.h @@ -31,6 +31,8 @@ struct riscv_isainfo { DECLARE_PER_CPU(struct riscv_cpuinfo, riscv_cpuinfo); +extern const struct seq_operations cpuinfo_op; + /* Per-cpu ISA extensions. */ extern struct riscv_isainfo hart_isa[NR_CPUS]; diff --git a/arch/riscv/include/asm/hwprobe.h b/arch/riscv/include/asm/hwprobe.h index 948d2b34e94e..58f8dda73259 100644 --- a/arch/riscv/include/asm/hwprobe.h +++ b/arch/riscv/include/asm/hwprobe.h @@ -42,4 +42,11 @@ static inline bool riscv_hwprobe_pair_cmp(struct riscv_hwprobe *pair, return pair->value == other_pair->value; } +#ifdef CONFIG_MMU +void riscv_hwprobe_register_async_probe(void); +void riscv_hwprobe_complete_async_probe(void); +#else +static inline void riscv_hwprobe_register_async_probe(void) {} +static inline void riscv_hwprobe_complete_async_probe(void) {} +#endif #endif diff --git a/arch/riscv/include/asm/insn-def.h b/arch/riscv/include/asm/insn-def.h index c9cfcea52cbb..d29da6ccd3dd 100644 --- a/arch/riscv/include/asm/insn-def.h +++ b/arch/riscv/include/asm/insn-def.h @@ -256,10 +256,10 @@ INSN_S(OPCODE_OP_IMM, FUNC3(6), __RS2(3), \ SIMM12((offset) & 0xfe0), RS1(base)) -#define RISCV_PAUSE ".4byte 0x100000f" -#define ZAWRS_WRS_NTO ".4byte 0x00d00073" -#define ZAWRS_WRS_STO ".4byte 0x01d00073" -#define RISCV_NOP4 ".4byte 0x00000013" +#define RISCV_PAUSE ASM_INSN_I("0x100000f") +#define ZAWRS_WRS_NTO ASM_INSN_I("0x00d00073") +#define ZAWRS_WRS_STO ASM_INSN_I("0x01d00073") +#define RISCV_NOP4 ASM_INSN_I("0x00000013") #define RISCV_INSN_NOP4 _AC(0x00000013, U) diff --git a/arch/riscv/include/asm/pgtable-64.h b/arch/riscv/include/asm/pgtable-64.h index 1018d2216901..6e789fa58514 100644 --- a/arch/riscv/include/asm/pgtable-64.h +++ b/arch/riscv/include/asm/pgtable-64.h @@ -69,6 +69,8 @@ typedef struct { #define PTRS_PER_PMD (PAGE_SIZE / sizeof(pmd_t)) +#define MAX_POSSIBLE_PHYSMEM_BITS 56 + /* * rv64 PTE format: * | 63 | 62 61 | 60 54 | 53 10 | 9 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index 29e994a9afb6..5a08eb5fe99f 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -654,6 +654,8 @@ static inline pgprot_t pgprot_writecombine(pgprot_t _prot) return __pgprot(prot); } +#define pgprot_dmacoherent pgprot_writecombine + /* * Both Svade and Svadu control the hardware behavior when the PTE A/D bits need to be set. By * default the M-mode firmware enables the hardware updating scheme when only Svadu is present in diff --git a/arch/riscv/include/asm/vdso/arch_data.h b/arch/riscv/include/asm/vdso/arch_data.h index da57a3786f7a..88b37af55175 100644 --- a/arch/riscv/include/asm/vdso/arch_data.h +++ b/arch/riscv/include/asm/vdso/arch_data.h @@ -12,6 +12,12 @@ struct vdso_arch_data { /* Boolean indicating all CPUs have the same static hwprobe values. */ __u8 homogeneous_cpus; + + /* + * A gate to check and see if the hwprobe data is actually ready, as + * probing is deferred to avoid boot slowdowns. + */ + __u8 ready; }; #endif /* __RISCV_ASM_VDSO_ARCH_DATA_H */ diff --git a/arch/riscv/include/asm/vendor_extensions/mips.h b/arch/riscv/include/asm/vendor_extensions/mips.h index ea8ca747d691..ffeb12dc17a3 100644 --- a/arch/riscv/include/asm/vendor_extensions/mips.h +++ b/arch/riscv/include/asm/vendor_extensions/mips.h @@ -30,8 +30,8 @@ extern struct riscv_isa_vendor_ext_data_list riscv_isa_vendor_ext_list_mips; * allowing any subsequent instructions to fetch. */ -#define MIPS_PAUSE ".4byte 0x00501013\n\t" -#define MIPS_EHB ".4byte 0x00301013\n\t" -#define MIPS_IHB ".4byte 0x00101013\n\t" +#define MIPS_PAUSE ASM_INSN_I("0x00501013\n\t") +#define MIPS_EHB ASM_INSN_I("0x00301013\n\t") +#define MIPS_IHB ASM_INSN_I("0x00101013\n\t") #endif // _ASM_RISCV_VENDOR_EXTENSIONS_MIPS_H diff --git a/arch/riscv/kernel/cpu.c b/arch/riscv/kernel/cpu.c index f6b13e9f5e6c..3dbc8cc557dd 100644 --- a/arch/riscv/kernel/cpu.c +++ b/arch/riscv/kernel/cpu.c @@ -62,10 +62,8 @@ int __init riscv_early_of_processor_hartid(struct device_node *node, unsigned lo return -ENODEV; } - if (!of_device_is_available(node)) { - pr_info("CPU with hartid=%lu is not available\n", *hart); + if (!of_device_is_available(node)) return -ENODEV; - } if (of_property_read_string(node, "riscv,isa-base", &isa)) goto old_interface; diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c index 67b59699357d..72ca768f4e91 100644 --- a/arch/riscv/kernel/cpufeature.c +++ b/arch/riscv/kernel/cpufeature.c @@ -932,9 +932,9 @@ static int has_thead_homogeneous_vlenb(void) { int cpu; u32 prev_vlenb = 0; - u32 vlenb; + u32 vlenb = 0; - /* Ignore thead,vlenb property if xtheavector is not enabled in the kernel */ + /* Ignore thead,vlenb property if xtheadvector is not enabled in the kernel */ if (!IS_ENABLED(CONFIG_RISCV_ISA_XTHEADVECTOR)) return 0; diff --git a/arch/riscv/kernel/kgdb.c b/arch/riscv/kernel/kgdb.c index 9f3db3503dab..15fec5d1e6de 100644 --- a/arch/riscv/kernel/kgdb.c +++ b/arch/riscv/kernel/kgdb.c @@ -265,10 +265,10 @@ void kgdb_arch_handle_qxfer_pkt(char *remcom_in_buffer, { if (!strncmp(remcom_in_buffer, gdb_xfer_read_target, sizeof(gdb_xfer_read_target))) - strcpy(remcom_out_buffer, riscv_gdb_stub_target_desc); + strscpy(remcom_out_buffer, riscv_gdb_stub_target_desc, BUFMAX); else if (!strncmp(remcom_in_buffer, gdb_xfer_read_cpuxml, sizeof(gdb_xfer_read_cpuxml))) - strcpy(remcom_out_buffer, riscv_gdb_stub_cpuxml); + strscpy(remcom_out_buffer, riscv_gdb_stub_cpuxml, BUFMAX); } static inline void kgdb_arch_update_addr(struct pt_regs *regs, diff --git a/arch/riscv/kernel/module-sections.c b/arch/riscv/kernel/module-sections.c index 75551ac6504c..1675cbad8619 100644 --- a/arch/riscv/kernel/module-sections.c +++ b/arch/riscv/kernel/module-sections.c @@ -119,6 +119,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, unsigned int num_plts = 0; unsigned int num_gots = 0; Elf_Rela *scratch = NULL; + Elf_Rela *new_scratch; size_t scratch_size = 0; int i; @@ -168,9 +169,12 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, scratch_size_needed = (num_scratch_relas + num_relas) * sizeof(*scratch); if (scratch_size_needed > scratch_size) { scratch_size = scratch_size_needed; - scratch = kvrealloc(scratch, scratch_size, GFP_KERNEL); - if (!scratch) + new_scratch = kvrealloc(scratch, scratch_size, GFP_KERNEL); + if (!new_scratch) { + kvfree(scratch); return -ENOMEM; + } + scratch = new_scratch; } for (size_t j = 0; j < num_relas; j++) diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c index e650dec44817..5ed5095320e6 100644 --- a/arch/riscv/kernel/smp.c +++ b/arch/riscv/kernel/smp.c @@ -40,6 +40,17 @@ enum ipi_message_type { IPI_MAX }; +static const char * const ipi_names[] = { + [IPI_RESCHEDULE] = "Rescheduling interrupts", + [IPI_CALL_FUNC] = "Function call interrupts", + [IPI_CPU_STOP] = "CPU stop interrupts", + [IPI_CPU_CRASH_STOP] = "CPU stop (for crash dump) interrupts", + [IPI_IRQ_WORK] = "IRQ work interrupts", + [IPI_TIMER] = "Timer broadcast interrupts", + [IPI_CPU_BACKTRACE] = "CPU backtrace interrupts", + [IPI_KGDB_ROUNDUP] = "KGDB roundup interrupts", +}; + unsigned long __cpuid_to_hartid_map[NR_CPUS] __ro_after_init = { [0 ... NR_CPUS-1] = INVALID_HARTID }; @@ -199,7 +210,7 @@ void riscv_ipi_set_virq_range(int virq, int nr) /* Request IPIs */ for (i = 0; i < nr_ipi; i++) { err = request_percpu_irq(ipi_virq_base + i, handle_IPI, - "IPI", &ipi_dummy_dev); + ipi_names[i], &ipi_dummy_dev); WARN_ON(err); ipi_desc[i] = irq_to_desc(ipi_virq_base + i); @@ -210,17 +221,6 @@ void riscv_ipi_set_virq_range(int virq, int nr) riscv_ipi_enable(); } -static const char * const ipi_names[] = { - [IPI_RESCHEDULE] = "Rescheduling interrupts", - [IPI_CALL_FUNC] = "Function call interrupts", - [IPI_CPU_STOP] = "CPU stop interrupts", - [IPI_CPU_CRASH_STOP] = "CPU stop (for crash dump) interrupts", - [IPI_IRQ_WORK] = "IRQ work interrupts", - [IPI_TIMER] = "Timer broadcast interrupts", - [IPI_CPU_BACKTRACE] = "CPU backtrace interrupts", - [IPI_KGDB_ROUNDUP] = "KGDB roundup interrupts", -}; - void show_ipi_stats(struct seq_file *p, int prec) { unsigned int cpu, i; diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c index 3fe9e6edef8f..b41b6255751c 100644 --- a/arch/riscv/kernel/stacktrace.c +++ b/arch/riscv/kernel/stacktrace.c @@ -16,6 +16,22 @@ #ifdef CONFIG_FRAME_POINTER +/* + * This disables KASAN checking when reading a value from another task's stack, + * since the other task could be running on another CPU and could have poisoned + * the stack in the meantime. + */ +#define READ_ONCE_TASK_STACK(task, x) \ +({ \ + unsigned long val; \ + unsigned long addr = x; \ + if ((task) == current) \ + val = READ_ONCE(addr); \ + else \ + val = READ_ONCE_NOCHECK(addr); \ + val; \ +}) + extern asmlinkage void handle_exception(void); extern unsigned long ret_from_exception_end; @@ -69,8 +85,9 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs, fp = frame->ra; pc = regs->ra; } else { - fp = frame->fp; - pc = ftrace_graph_ret_addr(current, &graph_idx, frame->ra, + fp = READ_ONCE_TASK_STACK(task, frame->fp); + pc = READ_ONCE_TASK_STACK(task, frame->ra); + pc = ftrace_graph_ret_addr(current, &graph_idx, pc, &frame->ra); if (pc >= (unsigned long)handle_exception && pc < (unsigned long)&ret_from_exception_end) { diff --git a/arch/riscv/kernel/sys_hwprobe.c b/arch/riscv/kernel/sys_hwprobe.c index 000f4451a9d8..199d13f86f31 100644 --- a/arch/riscv/kernel/sys_hwprobe.c +++ b/arch/riscv/kernel/sys_hwprobe.c @@ -5,6 +5,9 @@ * more details. */ #include <linux/syscalls.h> +#include <linux/completion.h> +#include <linux/atomic.h> +#include <linux/once.h> #include <asm/cacheflush.h> #include <asm/cpufeature.h> #include <asm/hwprobe.h> @@ -28,6 +31,11 @@ static void hwprobe_arch_id(struct riscv_hwprobe *pair, bool first = true; int cpu; + if (pair->key != RISCV_HWPROBE_KEY_MVENDORID && + pair->key != RISCV_HWPROBE_KEY_MIMPID && + pair->key != RISCV_HWPROBE_KEY_MARCHID) + goto out; + for_each_cpu(cpu, cpus) { u64 cpu_id; @@ -58,6 +66,7 @@ static void hwprobe_arch_id(struct riscv_hwprobe *pair, } } +out: pair->value = id; } @@ -454,28 +463,32 @@ static int hwprobe_get_cpus(struct riscv_hwprobe __user *pairs, return 0; } -static int do_riscv_hwprobe(struct riscv_hwprobe __user *pairs, - size_t pair_count, size_t cpusetsize, - unsigned long __user *cpus_user, - unsigned int flags) -{ - if (flags & RISCV_HWPROBE_WHICH_CPUS) - return hwprobe_get_cpus(pairs, pair_count, cpusetsize, - cpus_user, flags); +#ifdef CONFIG_MMU - return hwprobe_get_values(pairs, pair_count, cpusetsize, - cpus_user, flags); +static DECLARE_COMPLETION(boot_probes_done); +static atomic_t pending_boot_probes = ATOMIC_INIT(1); + +void riscv_hwprobe_register_async_probe(void) +{ + atomic_inc(&pending_boot_probes); } -#ifdef CONFIG_MMU +void riscv_hwprobe_complete_async_probe(void) +{ + if (atomic_dec_and_test(&pending_boot_probes)) + complete(&boot_probes_done); +} -static int __init init_hwprobe_vdso_data(void) +static int complete_hwprobe_vdso_data(void) { struct vdso_arch_data *avd = vdso_k_arch_data; u64 id_bitsmash = 0; struct riscv_hwprobe pair; int key; + if (unlikely(!atomic_dec_and_test(&pending_boot_probes))) + wait_for_completion(&boot_probes_done); + /* * Initialize vDSO data with the answers for the "all CPUs" case, to * save a syscall in the common case. @@ -503,13 +516,52 @@ static int __init init_hwprobe_vdso_data(void) * vDSO should defer to the kernel for exotic cpu masks. */ avd->homogeneous_cpus = id_bitsmash != 0 && id_bitsmash != -1; + + /* + * Make sure all the VDSO values are visible before we look at them. + * This pairs with the implicit "no speculativly visible accesses" + * barrier in the VDSO hwprobe code. + */ + smp_wmb(); + avd->ready = true; + return 0; +} + +static int __init init_hwprobe_vdso_data(void) +{ + struct vdso_arch_data *avd = vdso_k_arch_data; + + /* + * Prevent the vDSO cached values from being used, as they're not ready + * yet. + */ + avd->ready = false; return 0; } arch_initcall_sync(init_hwprobe_vdso_data); +#else + +static int complete_hwprobe_vdso_data(void) { return 0; } + #endif /* CONFIG_MMU */ +static int do_riscv_hwprobe(struct riscv_hwprobe __user *pairs, + size_t pair_count, size_t cpusetsize, + unsigned long __user *cpus_user, + unsigned int flags) +{ + DO_ONCE_SLEEPABLE(complete_hwprobe_vdso_data); + + if (flags & RISCV_HWPROBE_WHICH_CPUS) + return hwprobe_get_cpus(pairs, pair_count, cpusetsize, + cpus_user, flags); + + return hwprobe_get_values(pairs, pair_count, cpusetsize, + cpus_user, flags); +} + SYSCALL_DEFINE5(riscv_hwprobe, struct riscv_hwprobe __user *, pairs, size_t, pair_count, size_t, cpusetsize, unsigned long __user *, cpus, unsigned int, flags) diff --git a/arch/riscv/kernel/tests/Kconfig.debug b/arch/riscv/kernel/tests/Kconfig.debug index 5db4df44279e..40f8dafffa0a 100644 --- a/arch/riscv/kernel/tests/Kconfig.debug +++ b/arch/riscv/kernel/tests/Kconfig.debug @@ -31,7 +31,7 @@ config RISCV_MODULE_LINKING_KUNIT If unsure, say N. config RISCV_KPROBES_KUNIT - bool "KUnit test for riscv kprobes" if !KUNIT_ALL_TESTS + tristate "KUnit test for riscv kprobes" if !KUNIT_ALL_TESTS depends on KUNIT depends on KPROBES default KUNIT_ALL_TESTS diff --git a/arch/riscv/kernel/tests/kprobes/Makefile b/arch/riscv/kernel/tests/kprobes/Makefile index 4cb6c66a98e8..df7256f62313 100644 --- a/arch/riscv/kernel/tests/kprobes/Makefile +++ b/arch/riscv/kernel/tests/kprobes/Makefile @@ -1 +1,3 @@ -obj-y += test-kprobes.o test-kprobes-asm.o +obj-$(CONFIG_RISCV_KPROBES_KUNIT) += kprobes_riscv_kunit.o + +kprobes_riscv_kunit-objs := test-kprobes.o test-kprobes-asm.o diff --git a/arch/riscv/kernel/tests/kprobes/test-kprobes.c b/arch/riscv/kernel/tests/kprobes/test-kprobes.c index 6f6cdfbf5a95..664535ca0a98 100644 --- a/arch/riscv/kernel/tests/kprobes/test-kprobes.c +++ b/arch/riscv/kernel/tests/kprobes/test-kprobes.c @@ -49,8 +49,11 @@ static struct kunit_case kprobes_testcases[] = { }; static struct kunit_suite kprobes_test_suite = { - .name = "kprobes_test_riscv", + .name = "kprobes_riscv", .test_cases = kprobes_testcases, }; kunit_test_suites(&kprobes_test_suite); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("KUnit test for riscv kprobes"); diff --git a/arch/riscv/kernel/unaligned_access_speed.c b/arch/riscv/kernel/unaligned_access_speed.c index ae2068425fbc..70b5e6927620 100644 --- a/arch/riscv/kernel/unaligned_access_speed.c +++ b/arch/riscv/kernel/unaligned_access_speed.c @@ -379,6 +379,7 @@ free: static int __init vec_check_unaligned_access_speed_all_cpus(void *unused __always_unused) { schedule_on_each_cpu(check_vector_unaligned_access); + riscv_hwprobe_complete_async_probe(); return 0; } @@ -473,8 +474,12 @@ static int __init check_unaligned_access_all_cpus(void) per_cpu(vector_misaligned_access, cpu) = unaligned_vector_speed_param; } else if (!check_vector_unaligned_access_emulated_all_cpus() && IS_ENABLED(CONFIG_RISCV_PROBE_VECTOR_UNALIGNED_ACCESS)) { - kthread_run(vec_check_unaligned_access_speed_all_cpus, - NULL, "vec_check_unaligned_access_speed_all_cpus"); + riscv_hwprobe_register_async_probe(); + if (IS_ERR(kthread_run(vec_check_unaligned_access_speed_all_cpus, + NULL, "vec_check_unaligned_access_speed_all_cpus"))) { + pr_warn("Failed to create vec_unalign_check kthread\n"); + riscv_hwprobe_complete_async_probe(); + } } /* diff --git a/arch/riscv/kernel/vdso/hwprobe.c b/arch/riscv/kernel/vdso/hwprobe.c index 2ddeba6c68dd..8f45500d0a6e 100644 --- a/arch/riscv/kernel/vdso/hwprobe.c +++ b/arch/riscv/kernel/vdso/hwprobe.c @@ -27,7 +27,7 @@ static int riscv_vdso_get_values(struct riscv_hwprobe *pairs, size_t pair_count, * homogeneous, then this function can handle requests for arbitrary * masks. */ - if ((flags != 0) || (!all_cpus && !avd->homogeneous_cpus)) + if (flags != 0 || (!all_cpus && !avd->homogeneous_cpus) || unlikely(!avd->ready)) return riscv_hwprobe(pairs, pair_count, cpusetsize, cpus, flags); /* This is something we can handle, fill out the pairs. */ diff --git a/arch/riscv/mm/ptdump.c b/arch/riscv/mm/ptdump.c index 3b51690cc876..34299c2b231f 100644 --- a/arch/riscv/mm/ptdump.c +++ b/arch/riscv/mm/ptdump.c @@ -21,7 +21,7 @@ #define pt_dump_seq_puts(m, fmt) \ ({ \ if (m) \ - seq_printf(m, fmt); \ + seq_puts(m, fmt); \ }) /* |
