diff options
Diffstat (limited to 'arch/riscv')
83 files changed, 1081 insertions, 499 deletions
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 51dcd8eaa243..28d00744cbb5 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -28,6 +28,7 @@ config RISCV select ARCH_HAS_DEBUG_VIRTUAL if MMU select ARCH_HAS_DEBUG_VM_PGTABLE select ARCH_HAS_DEBUG_WX + select ARCH_HAS_ELF_CORE_EFLAGS select ARCH_HAS_FAST_MULTIPLIER select ARCH_HAS_FORTIFY_SOURCE select ARCH_HAS_GCOV_PROFILE_ALL @@ -53,6 +54,7 @@ config RISCV select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_HAS_UBSAN select ARCH_HAS_VDSO_ARCH_DATA if GENERIC_VDSO_DATA_STORE + select ARCH_HAVE_NMI_SAFE_CMPXCHG select ARCH_KEEP_MEMBLOCK if ACPI select ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE if 64BIT && MMU select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX @@ -60,7 +62,7 @@ config RISCV select ARCH_STACKWALK select ARCH_SUPPORTS_ATOMIC_RMW # clang >= 17: https://github.com/llvm/llvm-project/commit/62fa708ceb027713b386c7e0efda994f8bdc27e2 - select ARCH_SUPPORTS_CFI_CLANG if CLANG_VERSION >= 170000 + select ARCH_SUPPORTS_CFI if (!CC_IS_CLANG || CLANG_VERSION >= 170000) select ARCH_SUPPORTS_DEBUG_PAGEALLOC if MMU select ARCH_SUPPORTS_HUGE_PFNMAP if TRANSPARENT_HUGEPAGE select ARCH_SUPPORTS_HUGETLBFS if MMU @@ -76,7 +78,7 @@ config RISCV select ARCH_USE_MEMTEST select ARCH_USE_QUEUED_RWLOCKS select ARCH_USE_SYM_ANNOTATIONS - select ARCH_USES_CFI_TRAPS if CFI_CLANG + select ARCH_USES_CFI_TRAPS if CFI select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH if MMU select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU select ARCH_WANT_FRAME_POINTERS @@ -154,7 +156,7 @@ config RISCV select HAVE_DYNAMIC_FTRACE if !XIP_KERNEL && MMU && (CLANG_SUPPORTS_DYNAMIC_FTRACE || GCC_SUPPORTS_DYNAMIC_FTRACE) select FUNCTION_ALIGNMENT_4B if HAVE_DYNAMIC_FTRACE && RISCV_ISA_C select HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS if HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS - select HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS if (DYNAMIC_FTRACE_WITH_ARGS && !CFI_CLANG) + select HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS if (DYNAMIC_FTRACE_WITH_ARGS && !CFI) select HAVE_DYNAMIC_FTRACE_WITH_ARGS if HAVE_DYNAMIC_FTRACE select HAVE_FTRACE_GRAPH_FUNC select HAVE_FUNCTION_GRAPH_TRACER if HAVE_DYNAMIC_FTRACE_WITH_ARGS diff --git a/arch/riscv/Kconfig.errata b/arch/riscv/Kconfig.errata index e318119d570d..aca9b0cfcfec 100644 --- a/arch/riscv/Kconfig.errata +++ b/arch/riscv/Kconfig.errata @@ -21,6 +21,29 @@ config ERRATA_ANDES_CMO If you don't know what to do here, say "Y". +config ERRATA_MIPS + bool "MIPS errata" + depends on RISCV_ALTERNATIVE + help + All MIPS errata Kconfig depend on this Kconfig. Disabling + this Kconfig will disable all MIPS errata. Please say "Y" + here if your platform uses MIPS CPU cores. + + Otherwise, please say "N" here to avoid unnecessary overhead. + +config ERRATA_MIPS_P8700_PAUSE_OPCODE + bool "Fix the PAUSE Opcode for MIPS P8700" + depends on ERRATA_MIPS && 64BIT + default n + help + The RISCV MIPS P8700 uses a different opcode for PAUSE. + It is a 'hint' encoding of the SLLI instruction, + with rd=0, rs1=0 and imm=5. It will behave as a NOP + instruction if no additional behavior beyond that of + SLLI is implemented. + + If you are not using the P8700 processor, say n. + config ERRATA_SIFIVE bool "SiFive errata" depends on RISCV_ALTERNATIVE diff --git a/arch/riscv/Kconfig.vendor b/arch/riscv/Kconfig.vendor index e14f26368963..3c1f92e406c3 100644 --- a/arch/riscv/Kconfig.vendor +++ b/arch/riscv/Kconfig.vendor @@ -16,6 +16,19 @@ config RISCV_ISA_VENDOR_EXT_ANDES If you don't know what to do here, say Y. endmenu +menu "MIPS" +config RISCV_ISA_VENDOR_EXT_MIPS + bool "MIPS vendor extension support" + select RISCV_ISA_VENDOR_EXT + default y + help + Say N here to disable detection of and support for all MIPS vendor + extensions. Without this option enabled, MIPS vendor extensions will + not be detected at boot and their presence not reported to userspace. + + If you don't know what to do here, say Y. +endmenu + menu "SiFive" config RISCV_ISA_VENDOR_EXT_SIFIVE bool "SiFive vendor extension support" diff --git a/arch/riscv/errata/Makefile b/arch/riscv/errata/Makefile index bc6c77ba837d..02a7a3335b1d 100644 --- a/arch/riscv/errata/Makefile +++ b/arch/riscv/errata/Makefile @@ -13,5 +13,6 @@ endif endif obj-$(CONFIG_ERRATA_ANDES) += andes/ +obj-$(CONFIG_ERRATA_MIPS) += mips/ obj-$(CONFIG_ERRATA_SIFIVE) += sifive/ obj-$(CONFIG_ERRATA_THEAD) += thead/ diff --git a/arch/riscv/errata/mips/Makefile b/arch/riscv/errata/mips/Makefile new file mode 100644 index 000000000000..6278c389b801 --- /dev/null +++ b/arch/riscv/errata/mips/Makefile @@ -0,0 +1,5 @@ +ifdef CONFIG_RISCV_ALTERNATIVE_EARLY +CFLAGS_errata.o := -mcmodel=medany +endif + +obj-y += errata.o diff --git a/arch/riscv/errata/mips/errata.c b/arch/riscv/errata/mips/errata.c new file mode 100644 index 000000000000..e984a8152208 --- /dev/null +++ b/arch/riscv/errata/mips/errata.c @@ -0,0 +1,67 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2025 MIPS. + */ + +#include <linux/memory.h> +#include <linux/module.h> +#include <asm/text-patching.h> +#include <asm/alternative.h> +#include <asm/errata_list.h> +#include <asm/vendorid_list.h> +#include <asm/vendor_extensions.h> +#include <asm/vendor_extensions/mips.h> + +static inline bool errata_probe_pause(void) +{ + if (!IS_ENABLED(CONFIG_ERRATA_MIPS_P8700_PAUSE_OPCODE)) + return false; + + if (!riscv_isa_vendor_extension_available(MIPS_VENDOR_ID, XMIPSEXECTL)) + return false; + + return true; +} + +static u32 mips_errata_probe(void) +{ + u32 cpu_req_errata = 0; + + if (errata_probe_pause()) + cpu_req_errata |= BIT(ERRATA_MIPS_P8700_PAUSE_OPCODE); + + return cpu_req_errata; +} + +void mips_errata_patch_func(struct alt_entry *begin, struct alt_entry *end, + unsigned long archid, unsigned long impid, + unsigned int stage) +{ + struct alt_entry *alt; + u32 cpu_req_errata = mips_errata_probe(); + u32 tmp; + + BUILD_BUG_ON(ERRATA_MIPS_NUMBER >= RISCV_VENDOR_EXT_ALTERNATIVES_BASE); + + if (stage == RISCV_ALTERNATIVES_EARLY_BOOT) + return; + + for (alt = begin; alt < end; alt++) { + if (alt->vendor_id != MIPS_VENDOR_ID) + continue; + + if (alt->patch_id >= ERRATA_MIPS_NUMBER) { + WARN(1, "MIPS errata id:%d not in kernel errata list\n", + alt->patch_id); + continue; + } + + tmp = (1U << alt->patch_id); + if (cpu_req_errata && tmp) { + mutex_lock(&text_mutex); + patch_text_nosync(ALT_OLD_PTR(alt), ALT_ALT_PTR(alt), + alt->alt_len); + mutex_unlock(&text_mutex); + } + } +} diff --git a/arch/riscv/include/asm/alternative-macros.h b/arch/riscv/include/asm/alternative-macros.h index 231d777d936c..9619bd5c8eba 100644 --- a/arch/riscv/include/asm/alternative-macros.h +++ b/arch/riscv/include/asm/alternative-macros.h @@ -4,7 +4,7 @@ #ifdef CONFIG_RISCV_ALTERNATIVE -#ifdef __ASSEMBLY__ +#ifdef __ASSEMBLER__ .macro ALT_ENTRY oldptr newptr vendor_id patch_id new_len .4byte \oldptr - . @@ -53,7 +53,7 @@ #define __ALTERNATIVE_CFG(...) ALTERNATIVE_CFG __VA_ARGS__ #define __ALTERNATIVE_CFG_2(...) ALTERNATIVE_CFG_2 __VA_ARGS__ -#else /* !__ASSEMBLY__ */ +#else /* !__ASSEMBLER__ */ #include <asm/asm.h> #include <linux/stringify.h> @@ -98,7 +98,7 @@ __ALTERNATIVE_CFG(old_c, new_c_1, vendor_id_1, patch_id_1, enable_1) \ ALT_NEW_CONTENT(vendor_id_2, patch_id_2, enable_2, new_c_2) -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, patch_id, CONFIG_k) \ __ALTERNATIVE_CFG(old_c, new_c, vendor_id, patch_id, IS_ENABLED(CONFIG_k)) @@ -109,7 +109,7 @@ new_c_2, vendor_id_2, patch_id_2, IS_ENABLED(CONFIG_k_2)) #else /* CONFIG_RISCV_ALTERNATIVE */ -#ifdef __ASSEMBLY__ +#ifdef __ASSEMBLER__ .macro ALTERNATIVE_CFG old_c \old_c @@ -118,12 +118,12 @@ #define __ALTERNATIVE_CFG(old_c, ...) ALTERNATIVE_CFG old_c #define __ALTERNATIVE_CFG_2(old_c, ...) ALTERNATIVE_CFG old_c -#else /* !__ASSEMBLY__ */ +#else /* !__ASSEMBLER__ */ #define __ALTERNATIVE_CFG(old_c, ...) old_c "\n" #define __ALTERNATIVE_CFG_2(old_c, ...) old_c "\n" -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #define _ALTERNATIVE_CFG(old_c, ...) __ALTERNATIVE_CFG(old_c) #define _ALTERNATIVE_CFG_2(old_c, ...) __ALTERNATIVE_CFG_2(old_c) diff --git a/arch/riscv/include/asm/alternative.h b/arch/riscv/include/asm/alternative.h index 3c2b59b25017..8407d1d535b8 100644 --- a/arch/riscv/include/asm/alternative.h +++ b/arch/riscv/include/asm/alternative.h @@ -8,7 +8,7 @@ #include <asm/alternative-macros.h> -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #ifdef CONFIG_RISCV_ALTERNATIVE @@ -48,6 +48,9 @@ struct alt_entry { void andes_errata_patch_func(struct alt_entry *begin, struct alt_entry *end, unsigned long archid, unsigned long impid, unsigned int stage); +void mips_errata_patch_func(struct alt_entry *begin, struct alt_entry *end, + unsigned long archid, unsigned long impid, + unsigned int stage); void sifive_errata_patch_func(struct alt_entry *begin, struct alt_entry *end, unsigned long archid, unsigned long impid, unsigned int stage); diff --git a/arch/riscv/include/asm/asm-extable.h b/arch/riscv/include/asm/asm-extable.h index 0c8bfd54fc4e..37d425d7a762 100644 --- a/arch/riscv/include/asm/asm-extable.h +++ b/arch/riscv/include/asm/asm-extable.h @@ -10,7 +10,7 @@ #ifdef CONFIG_MMU -#ifdef __ASSEMBLY__ +#ifdef __ASSEMBLER__ #define __ASM_EXTABLE_RAW(insn, fixup, type, data) \ .pushsection __ex_table, "a"; \ @@ -25,7 +25,7 @@ __ASM_EXTABLE_RAW(\insn, \fixup, EX_TYPE_FIXUP, 0) .endm -#else /* __ASSEMBLY__ */ +#else /* __ASSEMBLER__ */ #include <linux/bits.h> #include <linux/stringify.h> @@ -77,7 +77,7 @@ EX_DATA_REG(ADDR, addr) \ ")") -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #else /* CONFIG_MMU */ #define _ASM_EXTABLE_UACCESS_ERR(insn, fixup, err) diff --git a/arch/riscv/include/asm/asm.h b/arch/riscv/include/asm/asm.h index 2a16e88e13de..8bd2a11382a3 100644 --- a/arch/riscv/include/asm/asm.h +++ b/arch/riscv/include/asm/asm.h @@ -6,7 +6,7 @@ #ifndef _ASM_RISCV_ASM_H #define _ASM_RISCV_ASM_H -#ifdef __ASSEMBLY__ +#ifdef __ASSEMBLER__ #define __ASM_STR(x) x #else #define __ASM_STR(x) #x @@ -30,7 +30,7 @@ #define SRLI __REG_SEL(srliw, srli) #if __SIZEOF_POINTER__ == 8 -#ifdef __ASSEMBLY__ +#ifdef __ASSEMBLER__ #define RISCV_PTR .dword #define RISCV_SZPTR 8 #define RISCV_LGPTR 3 @@ -40,7 +40,7 @@ #define RISCV_LGPTR "3" #endif #elif __SIZEOF_POINTER__ == 4 -#ifdef __ASSEMBLY__ +#ifdef __ASSEMBLER__ #define RISCV_PTR .word #define RISCV_SZPTR 4 #define RISCV_LGPTR 2 @@ -69,7 +69,7 @@ #error "Unexpected __SIZEOF_SHORT__" #endif -#ifdef __ASSEMBLY__ +#ifdef __ASSEMBLER__ #include <asm/asm-offsets.h> /* Common assembly source macros */ @@ -194,6 +194,6 @@ #define ASM_NOKPROBE(name) #endif -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* _ASM_RISCV_ASM_H */ diff --git a/arch/riscv/include/asm/assembler.h b/arch/riscv/include/asm/assembler.h index 44b1457d3e95..16931712beab 100644 --- a/arch/riscv/include/asm/assembler.h +++ b/arch/riscv/include/asm/assembler.h @@ -5,7 +5,7 @@ * Author: Jee Heng Sia <jeeheng.sia@starfivetech.com> */ -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #error "Only include this from assembly code" #endif diff --git a/arch/riscv/include/asm/barrier.h b/arch/riscv/include/asm/barrier.h index b8c5726d86ac..700ba3f922cb 100644 --- a/arch/riscv/include/asm/barrier.h +++ b/arch/riscv/include/asm/barrier.h @@ -10,7 +10,7 @@ #ifndef _ASM_RISCV_BARRIER_H #define _ASM_RISCV_BARRIER_H -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <asm/cmpxchg.h> #include <asm/fence.h> @@ -82,6 +82,6 @@ do { \ #include <asm-generic/barrier.h> -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* _ASM_RISCV_BARRIER_H */ diff --git a/arch/riscv/include/asm/bitops.h b/arch/riscv/include/asm/bitops.h index d59310f74c2b..77880677b06e 100644 --- a/arch/riscv/include/asm/bitops.h +++ b/arch/riscv/include/asm/bitops.h @@ -45,7 +45,7 @@ #error "Unexpected BITS_PER_LONG" #endif -static __always_inline unsigned long variable__ffs(unsigned long word) +static __always_inline __attribute_const__ unsigned long variable__ffs(unsigned long word) { asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0, RISCV_ISA_EXT_ZBB, 1) @@ -74,7 +74,7 @@ legacy: (unsigned long)__builtin_ctzl(word) : \ variable__ffs(word)) -static __always_inline unsigned long variable__fls(unsigned long word) +static __always_inline __attribute_const__ unsigned long variable__fls(unsigned long word) { asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0, RISCV_ISA_EXT_ZBB, 1) @@ -103,7 +103,7 @@ legacy: (unsigned long)(BITS_PER_LONG - 1 - __builtin_clzl(word)) : \ variable__fls(word)) -static __always_inline int variable_ffs(int x) +static __always_inline __attribute_const__ int variable_ffs(int x) { asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0, RISCV_ISA_EXT_ZBB, 1) diff --git a/arch/riscv/include/asm/cache.h b/arch/riscv/include/asm/cache.h index 570e9d8acad1..eb42b739d132 100644 --- a/arch/riscv/include/asm/cache.h +++ b/arch/riscv/include/asm/cache.h @@ -24,7 +24,7 @@ #define ARCH_SLAB_MINALIGN 16 #endif -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ extern int dma_cache_alignment; #ifdef CONFIG_RISCV_DMA_NONCOHERENT @@ -35,6 +35,6 @@ static inline int dma_get_cache_alignment(void) } #endif -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* _ASM_RISCV_CACHE_H */ diff --git a/arch/riscv/include/asm/cfi.h b/arch/riscv/include/asm/cfi.h index 4508aaa7a2fd..710aa8192edd 100644 --- a/arch/riscv/include/asm/cfi.h +++ b/arch/riscv/include/asm/cfi.h @@ -11,7 +11,7 @@ struct pt_regs; -#ifdef CONFIG_CFI_CLANG +#ifdef CONFIG_CFI enum bug_trap_type handle_cfi_failure(struct pt_regs *regs); #define __bpfcall #else @@ -19,6 +19,6 @@ static inline enum bug_trap_type handle_cfi_failure(struct pt_regs *regs) { return BUG_TRAP_TYPE_NONE; } -#endif /* CONFIG_CFI_CLANG */ +#endif /* CONFIG_CFI */ #endif /* _ASM_RISCV_CFI_H */ diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h index 0b749e710216..80bd52363c68 100644 --- a/arch/riscv/include/asm/cmpxchg.h +++ b/arch/riscv/include/asm/cmpxchg.h @@ -14,6 +14,7 @@ #include <asm/insn-def.h> #include <asm/cpufeature-macros.h> #include <asm/processor.h> +#include <asm/errata_list.h> #define __arch_xchg_masked(sc_sfx, swap_sfx, prepend, sc_append, \ swap_append, r, p, n) \ @@ -438,7 +439,7 @@ static __always_inline void __cmpwait(volatile void *ptr, return; no_zawrs: - asm volatile(RISCV_PAUSE : : : "memory"); + ALT_RISCV_PAUSE(); } #define __cmpwait_relaxed(ptr, val) \ diff --git a/arch/riscv/include/asm/cpu_ops_sbi.h b/arch/riscv/include/asm/cpu_ops_sbi.h index d6e4665b3195..776fa55fbaa4 100644 --- a/arch/riscv/include/asm/cpu_ops_sbi.h +++ b/arch/riscv/include/asm/cpu_ops_sbi.h @@ -5,7 +5,7 @@ #ifndef __ASM_CPU_OPS_SBI_H #define __ASM_CPU_OPS_SBI_H -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/init.h> #include <linux/sched.h> #include <linux/threads.h> diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h index 6fed42e37705..4a37a98398ad 100644 --- a/arch/riscv/include/asm/csr.h +++ b/arch/riscv/include/asm/csr.h @@ -513,7 +513,7 @@ #define IE_TIE (_AC(0x1, UL) << RV_IRQ_TIMER) #define IE_EIE (_AC(0x1, UL) << RV_IRQ_EXT) -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #define csr_swap(csr, val) \ ({ \ @@ -575,6 +575,6 @@ : "memory"); \ }) -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* _ASM_RISCV_CSR_H */ diff --git a/arch/riscv/include/asm/current.h b/arch/riscv/include/asm/current.h index 21774d868c65..ba5aa72aff63 100644 --- a/arch/riscv/include/asm/current.h +++ b/arch/riscv/include/asm/current.h @@ -13,7 +13,7 @@ #include <linux/bug.h> #include <linux/compiler.h> -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ struct task_struct; @@ -35,6 +35,6 @@ static __always_inline struct task_struct *get_current(void) register unsigned long current_stack_pointer __asm__("sp"); -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* _ASM_RISCV_CURRENT_H */ diff --git a/arch/riscv/include/asm/errata_list.h b/arch/riscv/include/asm/errata_list.h index 6e426ed7919a..6694b5ccdcf8 100644 --- a/arch/riscv/include/asm/errata_list.h +++ b/arch/riscv/include/asm/errata_list.h @@ -5,31 +5,14 @@ #ifndef ASM_ERRATA_LIST_H #define ASM_ERRATA_LIST_H -#include <asm/alternative.h> #include <asm/csr.h> #include <asm/insn-def.h> #include <asm/hwcap.h> #include <asm/vendorid_list.h> +#include <asm/errata_list_vendors.h> +#include <asm/vendor_extensions/mips.h> -#ifdef CONFIG_ERRATA_ANDES -#define ERRATA_ANDES_NO_IOCP 0 -#define ERRATA_ANDES_NUMBER 1 -#endif - -#ifdef CONFIG_ERRATA_SIFIVE -#define ERRATA_SIFIVE_CIP_453 0 -#define ERRATA_SIFIVE_CIP_1200 1 -#define ERRATA_SIFIVE_NUMBER 2 -#endif - -#ifdef CONFIG_ERRATA_THEAD -#define ERRATA_THEAD_MAE 0 -#define ERRATA_THEAD_PMU 1 -#define ERRATA_THEAD_GHOSTWRITE 2 -#define ERRATA_THEAD_NUMBER 3 -#endif - -#ifdef __ASSEMBLY__ +#ifdef __ASSEMBLER__ #define ALT_INSN_FAULT(x) \ ALTERNATIVE(__stringify(RISCV_PTR do_trap_insn_fault), \ @@ -42,7 +25,7 @@ ALTERNATIVE(__stringify(RISCV_PTR do_page_fault), \ __stringify(RISCV_PTR sifive_cip_453_page_fault_trp), \ SIFIVE_VENDOR_ID, ERRATA_SIFIVE_CIP_453, \ CONFIG_ERRATA_SIFIVE_CIP_453) -#else /* !__ASSEMBLY__ */ +#else /* !__ASSEMBLER__ */ #define ALT_SFENCE_VMA_ASID(asid) \ asm(ALTERNATIVE("sfence.vma x0, %0", "sfence.vma", SIFIVE_VENDOR_ID, \ @@ -59,6 +42,17 @@ asm(ALTERNATIVE("sfence.vma %0, %1", "sfence.vma", SIFIVE_VENDOR_ID, \ ERRATA_SIFIVE_CIP_1200, CONFIG_ERRATA_SIFIVE_CIP_1200) \ : : "r" (addr), "r" (asid) : "memory") +#define ALT_RISCV_PAUSE() \ +asm(ALTERNATIVE( \ + RISCV_PAUSE, /* Original RISC‑V pause insn */ \ + MIPS_PAUSE, /* Replacement for MIPS P8700 */ \ + MIPS_VENDOR_ID, /* Vendor ID to match */ \ + ERRATA_MIPS_P8700_PAUSE_OPCODE, /* patch_id */ \ + CONFIG_ERRATA_MIPS_P8700_PAUSE_OPCODE) \ + : /* no outputs */ \ + : /* no inputs */ \ + : "memory") + /* * _val is marked as "will be overwritten", so need to set it to 0 * in the default case. @@ -123,6 +117,6 @@ asm volatile(ALTERNATIVE( \ #define THEAD_C9XX_RV_IRQ_PMU 17 #define THEAD_C9XX_CSR_SCOUNTEROF 0x5c5 -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif diff --git a/arch/riscv/include/asm/errata_list_vendors.h b/arch/riscv/include/asm/errata_list_vendors.h new file mode 100644 index 000000000000..ec7eba373437 --- /dev/null +++ b/arch/riscv/include/asm/errata_list_vendors.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef ASM_ERRATA_LIST_VENDORS_H +#define ASM_ERRATA_LIST_VENDORS_H + +#ifdef CONFIG_ERRATA_ANDES +#define ERRATA_ANDES_NO_IOCP 0 +#define ERRATA_ANDES_NUMBER 1 +#endif + +#ifdef CONFIG_ERRATA_SIFIVE +#define ERRATA_SIFIVE_CIP_453 0 +#define ERRATA_SIFIVE_CIP_1200 1 +#define ERRATA_SIFIVE_NUMBER 2 +#endif + +#ifdef CONFIG_ERRATA_THEAD +#define ERRATA_THEAD_MAE 0 +#define ERRATA_THEAD_PMU 1 +#define ERRATA_THEAD_GHOSTWRITE 2 +#define ERRATA_THEAD_NUMBER 3 +#endif + +#ifdef CONFIG_ERRATA_MIPS +#define ERRATA_MIPS_P8700_PAUSE_OPCODE 0 +#define ERRATA_MIPS_NUMBER 1 +#endif + +#endif /* ASM_ERRATA_LIST_VENDORS_H */ diff --git a/arch/riscv/include/asm/ftrace.h b/arch/riscv/include/asm/ftrace.h index 22ebea3c2b26..e5026cd8f022 100644 --- a/arch/riscv/include/asm/ftrace.h +++ b/arch/riscv/include/asm/ftrace.h @@ -13,7 +13,7 @@ #endif #define ARCH_SUPPORTS_FTRACE_OPS 1 -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ extern void *return_address(unsigned int level); @@ -112,7 +112,7 @@ do { \ #define MCOUNT_JALR_SIZE 4 #define MCOUNT_NOP4_SIZE 4 -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ struct dyn_ftrace; int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec); #define ftrace_init_nop ftrace_init_nop @@ -235,7 +235,7 @@ static inline void arch_ftrace_set_direct_caller(struct ftrace_regs *fregs, unsi #endif /* CONFIG_DYNAMIC_FTRACE_WITH_ARGS */ -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* CONFIG_DYNAMIC_FTRACE */ diff --git a/arch/riscv/include/asm/gpr-num.h b/arch/riscv/include/asm/gpr-num.h index efeb5edf8a3a..b499cf832734 100644 --- a/arch/riscv/include/asm/gpr-num.h +++ b/arch/riscv/include/asm/gpr-num.h @@ -2,7 +2,7 @@ #ifndef __ASM_GPR_NUM_H #define __ASM_GPR_NUM_H -#ifdef __ASSEMBLY__ +#ifdef __ASSEMBLER__ .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 .equ .L__gpr_num_x\num, \num @@ -41,7 +41,7 @@ .equ .L__gpr_num_t5, 30 .equ .L__gpr_num_t6, 31 -#else /* __ASSEMBLY__ */ +#else /* __ASSEMBLER__ */ #define __DEFINE_ASM_GPR_NUMS \ " .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31\n" \ @@ -80,6 +80,6 @@ " .equ .L__gpr_num_t5, 30\n" \ " .equ .L__gpr_num_t6, 31\n" -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* __ASM_GPR_NUM_H */ diff --git a/arch/riscv/include/asm/hwprobe.h b/arch/riscv/include/asm/hwprobe.h index 7fe0a379474a..948d2b34e94e 100644 --- a/arch/riscv/include/asm/hwprobe.h +++ b/arch/riscv/include/asm/hwprobe.h @@ -8,7 +8,7 @@ #include <uapi/asm/hwprobe.h> -#define RISCV_HWPROBE_MAX_KEY 13 +#define RISCV_HWPROBE_MAX_KEY 14 static inline bool riscv_hwprobe_key_is_valid(__s64 key) { @@ -22,6 +22,7 @@ static inline bool hwprobe_key_is_bitmask(__s64 key) case RISCV_HWPROBE_KEY_IMA_EXT_0: case RISCV_HWPROBE_KEY_CPUPERF_0: case RISCV_HWPROBE_KEY_VENDOR_EXT_THEAD_0: + case RISCV_HWPROBE_KEY_VENDOR_EXT_MIPS_0: case RISCV_HWPROBE_KEY_VENDOR_EXT_SIFIVE_0: return true; } diff --git a/arch/riscv/include/asm/image.h b/arch/riscv/include/asm/image.h index 8927a6ea1127..899254966e85 100644 --- a/arch/riscv/include/asm/image.h +++ b/arch/riscv/include/asm/image.h @@ -29,7 +29,7 @@ #define RISCV_HEADER_VERSION (RISCV_HEADER_VERSION_MAJOR << 16 | \ RISCV_HEADER_VERSION_MINOR) -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #define riscv_image_flag_field(flags, field)\ (((flags) >> field##_SHIFT) & field##_MASK) /** @@ -63,5 +63,5 @@ struct riscv_image_header { u32 magic2; u32 res3; }; -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* _ASM_RISCV_IMAGE_H */ diff --git a/arch/riscv/include/asm/insn-def.h b/arch/riscv/include/asm/insn-def.h index d5adbaec1d01..c9cfcea52cbb 100644 --- a/arch/riscv/include/asm/insn-def.h +++ b/arch/riscv/include/asm/insn-def.h @@ -25,7 +25,7 @@ #define INSN_S_SIMM5_SHIFT 7 #define INSN_S_OPCODE_SHIFT 0 -#ifdef __ASSEMBLY__ +#ifdef __ASSEMBLER__ #ifdef CONFIG_AS_HAS_INSN @@ -77,7 +77,7 @@ #define __INSN_I(...) insn_i __VA_ARGS__ #define __INSN_S(...) insn_s __VA_ARGS__ -#else /* ! __ASSEMBLY__ */ +#else /* ! __ASSEMBLER__ */ #ifdef CONFIG_AS_HAS_INSN @@ -153,7 +153,7 @@ #endif -#endif /* ! __ASSEMBLY__ */ +#endif /* ! __ASSEMBLER__ */ #define INSN_R(opcode, func3, func7, rd, rs1, rs2) \ __INSN_R(RV_##opcode, RV_##func3, RV_##func7, \ @@ -263,7 +263,7 @@ #define RISCV_INSN_NOP4 _AC(0x00000013, U) -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #define nop() __asm__ __volatile__ ("nop") #define __nops(n) ".rept " #n "\nnop\n.endr\n" #define nops(n) __asm__ __volatile__ (__nops(n)) diff --git a/arch/riscv/include/asm/insn.h b/arch/riscv/include/asm/insn.h index 09fde95a5e8f..c3005573e8c9 100644 --- a/arch/riscv/include/asm/insn.h +++ b/arch/riscv/include/asm/insn.h @@ -64,6 +64,7 @@ #define RVG_RS2_OPOFF 20 #define RVG_RD_OPOFF 7 #define RVG_RS1_MASK GENMASK(4, 0) +#define RVG_RS2_MASK GENMASK(4, 0) #define RVG_RD_MASK GENMASK(4, 0) /* The bit field of immediate value in RVC J instruction */ @@ -286,45 +287,216 @@ static __always_inline bool riscv_insn_is_c_jalr(u32 code) (code & RVC_INSN_J_RS1_MASK) != 0; } -#define RV_IMM_SIGN(x) (-(((x) >> 31) & 1)) -#define RVC_IMM_SIGN(x) (-(((x) >> 12) & 1)) -#define RV_X(X, s, mask) (((X) >> (s)) & (mask)) -#define RVC_X(X, s, mask) RV_X(X, s, mask) +#define INSN_MATCH_LB 0x3 +#define INSN_MASK_LB 0x707f +#define INSN_MATCH_LH 0x1003 +#define INSN_MASK_LH 0x707f +#define INSN_MATCH_LW 0x2003 +#define INSN_MASK_LW 0x707f +#define INSN_MATCH_LD 0x3003 +#define INSN_MASK_LD 0x707f +#define INSN_MATCH_LBU 0x4003 +#define INSN_MASK_LBU 0x707f +#define INSN_MATCH_LHU 0x5003 +#define INSN_MASK_LHU 0x707f +#define INSN_MATCH_LWU 0x6003 +#define INSN_MASK_LWU 0x707f +#define INSN_MATCH_SB 0x23 +#define INSN_MASK_SB 0x707f +#define INSN_MATCH_SH 0x1023 +#define INSN_MASK_SH 0x707f +#define INSN_MATCH_SW 0x2023 +#define INSN_MASK_SW 0x707f +#define INSN_MATCH_SD 0x3023 +#define INSN_MASK_SD 0x707f + +#define INSN_MATCH_C_LD 0x6000 +#define INSN_MASK_C_LD 0xe003 +#define INSN_MATCH_C_SD 0xe000 +#define INSN_MASK_C_SD 0xe003 +#define INSN_MATCH_C_LW 0x4000 +#define INSN_MASK_C_LW 0xe003 +#define INSN_MATCH_C_SW 0xc000 +#define INSN_MASK_C_SW 0xe003 +#define INSN_MATCH_C_LDSP 0x6002 +#define INSN_MASK_C_LDSP 0xe003 +#define INSN_MATCH_C_SDSP 0xe002 +#define INSN_MASK_C_SDSP 0xe003 +#define INSN_MATCH_C_LWSP 0x4002 +#define INSN_MASK_C_LWSP 0xe003 +#define INSN_MATCH_C_SWSP 0xc002 +#define INSN_MASK_C_SWSP 0xe003 + +#define INSN_OPCODE_MASK 0x007c +#define INSN_OPCODE_SHIFT 2 +#define INSN_OPCODE_SYSTEM 28 + +#define INSN_MASK_WFI 0xffffffff +#define INSN_MATCH_WFI 0x10500073 + +#define INSN_MASK_WRS 0xffffffff +#define INSN_MATCH_WRS 0x00d00073 + +#define INSN_MATCH_CSRRW 0x1073 +#define INSN_MASK_CSRRW 0x707f +#define INSN_MATCH_CSRRS 0x2073 +#define INSN_MASK_CSRRS 0x707f +#define INSN_MATCH_CSRRC 0x3073 +#define INSN_MASK_CSRRC 0x707f +#define INSN_MATCH_CSRRWI 0x5073 +#define INSN_MASK_CSRRWI 0x707f +#define INSN_MATCH_CSRRSI 0x6073 +#define INSN_MASK_CSRRSI 0x707f +#define INSN_MATCH_CSRRCI 0x7073 +#define INSN_MASK_CSRRCI 0x707f + +#define INSN_MATCH_FLW 0x2007 +#define INSN_MASK_FLW 0x707f +#define INSN_MATCH_FLD 0x3007 +#define INSN_MASK_FLD 0x707f +#define INSN_MATCH_FLQ 0x4007 +#define INSN_MASK_FLQ 0x707f +#define INSN_MATCH_FSW 0x2027 +#define INSN_MASK_FSW 0x707f +#define INSN_MATCH_FSD 0x3027 +#define INSN_MASK_FSD 0x707f +#define INSN_MATCH_FSQ 0x4027 +#define INSN_MASK_FSQ 0x707f + +#define INSN_MATCH_C_FLD 0x2000 +#define INSN_MASK_C_FLD 0xe003 +#define INSN_MATCH_C_FLW 0x6000 +#define INSN_MASK_C_FLW 0xe003 +#define INSN_MATCH_C_FSD 0xa000 +#define INSN_MASK_C_FSD 0xe003 +#define INSN_MATCH_C_FSW 0xe000 +#define INSN_MASK_C_FSW 0xe003 +#define INSN_MATCH_C_FLDSP 0x2002 +#define INSN_MASK_C_FLDSP 0xe003 +#define INSN_MATCH_C_FSDSP 0xa002 +#define INSN_MASK_C_FSDSP 0xe003 +#define INSN_MATCH_C_FLWSP 0x6002 +#define INSN_MASK_C_FLWSP 0xe003 +#define INSN_MATCH_C_FSWSP 0xe002 +#define INSN_MASK_C_FSWSP 0xe003 + +#define INSN_MATCH_C_LHU 0x8400 +#define INSN_MASK_C_LHU 0xfc43 +#define INSN_MATCH_C_LH 0x8440 +#define INSN_MASK_C_LH 0xfc43 +#define INSN_MATCH_C_SH 0x8c00 +#define INSN_MASK_C_SH 0xfc43 + +#define INSN_16BIT_MASK 0x3 +#define INSN_IS_16BIT(insn) (((insn) & INSN_16BIT_MASK) != INSN_16BIT_MASK) +#define INSN_LEN(insn) (INSN_IS_16BIT(insn) ? 2 : 4) + +#define SHIFT_RIGHT(x, y) \ + ((y) < 0 ? ((x) << -(y)) : ((x) >> (y))) + +#define REG_MASK \ + ((1 << (5 + LOG_REGBYTES)) - (1 << LOG_REGBYTES)) + +#define REG_OFFSET(insn, pos) \ + (SHIFT_RIGHT((insn), (pos) - LOG_REGBYTES) & REG_MASK) + +#define REG_PTR(insn, pos, regs) \ + ((ulong *)((ulong)(regs) + REG_OFFSET(insn, pos))) + +#define GET_RS1(insn, regs) (*REG_PTR(insn, SH_RS1, regs)) +#define GET_RS2(insn, regs) (*REG_PTR(insn, SH_RS2, regs)) +#define GET_RS1S(insn, regs) (*REG_PTR(RVC_RS1S(insn), 0, regs)) +#define GET_RS2S(insn, regs) (*REG_PTR(RVC_RS2S(insn), 0, regs)) +#define GET_RS2C(insn, regs) (*REG_PTR(insn, SH_RS2C, regs)) +#define GET_SP(regs) (*REG_PTR(2, 0, regs)) +#define SET_RD(insn, regs, val) (*REG_PTR(insn, SH_RD, regs) = (val)) +#define IMM_I(insn) ((s32)(insn) >> 20) +#define IMM_S(insn) (((s32)(insn) >> 25 << 5) | \ + (s32)(((insn) >> 7) & 0x1f)) + +#define SH_RD 7 +#define SH_RS1 15 +#define SH_RS2 20 +#define SH_RS2C 2 +#define MASK_RX 0x1f + +#if defined(CONFIG_64BIT) +#define LOG_REGBYTES 3 +#else +#define LOG_REGBYTES 2 +#endif + +#define MASK_FUNCT3 0x7000 + +#define GET_FUNCT3(insn) (((insn) >> 12) & 7) + +#define RV_IMM_SIGN(x) (-(((x) >> 31) & 1)) +#define RVC_IMM_SIGN(x) (-(((x) >> 12) & 1)) +#define RV_X_MASK(X, s, mask) (((X) >> (s)) & (mask)) +#define RV_X(X, s, n) RV_X_MASK(X, s, ((1 << (n)) - 1)) +#define RVC_LW_IMM(x) ((RV_X(x, 6, 1) << 2) | \ + (RV_X(x, 10, 3) << 3) | \ + (RV_X(x, 5, 1) << 6)) +#define RVC_LD_IMM(x) ((RV_X(x, 10, 3) << 3) | \ + (RV_X(x, 5, 2) << 6)) +#define RVC_LWSP_IMM(x) ((RV_X(x, 4, 3) << 2) | \ + (RV_X(x, 12, 1) << 5) | \ + (RV_X(x, 2, 2) << 6)) +#define RVC_LDSP_IMM(x) ((RV_X(x, 5, 2) << 3) | \ + (RV_X(x, 12, 1) << 5) | \ + (RV_X(x, 2, 3) << 6)) +#define RVC_SWSP_IMM(x) ((RV_X(x, 9, 4) << 2) | \ + (RV_X(x, 7, 2) << 6)) +#define RVC_SDSP_IMM(x) ((RV_X(x, 10, 3) << 3) | \ + (RV_X(x, 7, 3) << 6)) +#define RVC_RS1S(insn) (8 + RV_X(insn, SH_RD, 3)) +#define RVC_RS2S(insn) (8 + RV_X(insn, SH_RS2C, 3)) +#define RVC_RS2(insn) RV_X(insn, SH_RS2C, 5) +#define RVC_X(X, s, mask) RV_X_MASK(X, s, mask) + +#define RV_EXTRACT_FUNCT3(x) \ + ({typeof(x) x_ = (x); \ + (RV_X_MASK(x_, RV_INSN_FUNCT3_OPOFF, \ + RV_INSN_FUNCT3_MASK >> RV_INSN_FUNCT3_OPOFF)); }) #define RV_EXTRACT_RS1_REG(x) \ ({typeof(x) x_ = (x); \ - (RV_X(x_, RVG_RS1_OPOFF, RVG_RS1_MASK)); }) + (RV_X_MASK(x_, RVG_RS1_OPOFF, RVG_RS1_MASK)); }) + +#define RV_EXTRACT_RS2_REG(x) \ + ({typeof(x) x_ = (x); \ + (RV_X_MASK(x_, RVG_RS2_OPOFF, RVG_RS2_MASK)); }) #define RV_EXTRACT_RD_REG(x) \ ({typeof(x) x_ = (x); \ - (RV_X(x_, RVG_RD_OPOFF, RVG_RD_MASK)); }) + (RV_X_MASK(x_, RVG_RD_OPOFF, RVG_RD_MASK)); }) #define RV_EXTRACT_UTYPE_IMM(x) \ ({typeof(x) x_ = (x); \ - (RV_X(x_, RV_U_IMM_31_12_OPOFF, RV_U_IMM_31_12_MASK)); }) + (RV_X_MASK(x_, RV_U_IMM_31_12_OPOFF, RV_U_IMM_31_12_MASK)); }) #define RV_EXTRACT_JTYPE_IMM(x) \ ({typeof(x) x_ = (x); \ - (RV_X(x_, RV_J_IMM_10_1_OPOFF, RV_J_IMM_10_1_MASK) << RV_J_IMM_10_1_OFF) | \ - (RV_X(x_, RV_J_IMM_11_OPOFF, RV_J_IMM_11_MASK) << RV_J_IMM_11_OFF) | \ - (RV_X(x_, RV_J_IMM_19_12_OPOFF, RV_J_IMM_19_12_MASK) << RV_J_IMM_19_12_OFF) | \ + (RV_X_MASK(x_, RV_J_IMM_10_1_OPOFF, RV_J_IMM_10_1_MASK) << RV_J_IMM_10_1_OFF) | \ + (RV_X_MASK(x_, RV_J_IMM_11_OPOFF, RV_J_IMM_11_MASK) << RV_J_IMM_11_OFF) | \ + (RV_X_MASK(x_, RV_J_IMM_19_12_OPOFF, RV_J_IMM_19_12_MASK) << RV_J_IMM_19_12_OFF) | \ (RV_IMM_SIGN(x_) << RV_J_IMM_SIGN_OFF); }) #define RV_EXTRACT_ITYPE_IMM(x) \ ({typeof(x) x_ = (x); \ - (RV_X(x_, RV_I_IMM_11_0_OPOFF, RV_I_IMM_11_0_MASK)) | \ + (RV_X_MASK(x_, RV_I_IMM_11_0_OPOFF, RV_I_IMM_11_0_MASK)) | \ (RV_IMM_SIGN(x_) << RV_I_IMM_SIGN_OFF); }) #define RV_EXTRACT_BTYPE_IMM(x) \ ({typeof(x) x_ = (x); \ - (RV_X(x_, RV_B_IMM_4_1_OPOFF, RV_B_IMM_4_1_MASK) << RV_B_IMM_4_1_OFF) | \ - (RV_X(x_, RV_B_IMM_10_5_OPOFF, RV_B_IMM_10_5_MASK) << RV_B_IMM_10_5_OFF) | \ - (RV_X(x_, RV_B_IMM_11_OPOFF, RV_B_IMM_11_MASK) << RV_B_IMM_11_OFF) | \ + (RV_X_MASK(x_, RV_B_IMM_4_1_OPOFF, RV_B_IMM_4_1_MASK) << RV_B_IMM_4_1_OFF) | \ + (RV_X_MASK(x_, RV_B_IMM_10_5_OPOFF, RV_B_IMM_10_5_MASK) << RV_B_IMM_10_5_OFF) | \ + (RV_X_MASK(x_, RV_B_IMM_11_OPOFF, RV_B_IMM_11_MASK) << RV_B_IMM_11_OFF) | \ (RV_IMM_SIGN(x_) << RV_B_IMM_SIGN_OFF); }) #define RVC_EXTRACT_C2_RS1_REG(x) \ ({typeof(x) x_ = (x); \ - (RV_X(x_, RVC_C2_RS1_OPOFF, RVC_C2_RS1_MASK)); }) + (RV_X_MASK(x_, RVC_C2_RS1_OPOFF, RVC_C2_RS1_MASK)); }) #define RVC_EXTRACT_JTYPE_IMM(x) \ ({typeof(x) x_ = (x); \ @@ -346,13 +518,13 @@ static __always_inline bool riscv_insn_is_c_jalr(u32 code) (RVC_IMM_SIGN(x_) << RVC_B_IMM_SIGN_OFF); }) #define RVG_EXTRACT_SYSTEM_CSR(x) \ - ({typeof(x) x_ = (x); RV_X(x_, RVG_SYSTEM_CSR_OFF, RVG_SYSTEM_CSR_MASK); }) + ({typeof(x) x_ = (x); RV_X_MASK(x_, RVG_SYSTEM_CSR_OFF, RVG_SYSTEM_CSR_MASK); }) #define RVFDQ_EXTRACT_FL_FS_WIDTH(x) \ - ({typeof(x) x_ = (x); RV_X(x_, RVFDQ_FL_FS_WIDTH_OFF, \ + ({typeof(x) x_ = (x); RV_X_MASK(x_, RVFDQ_FL_FS_WIDTH_OFF, \ RVFDQ_FL_FS_WIDTH_MASK); }) -#define RVV_EXRACT_VL_VS_WIDTH(x) RVFDQ_EXTRACT_FL_FS_WIDTH(x) +#define RVV_EXTRACT_VL_VS_WIDTH(x) RVFDQ_EXTRACT_FL_FS_WIDTH(x) /* * Get the immediate from a J-type instruction. @@ -375,10 +547,10 @@ static inline void riscv_insn_insert_jtype_imm(u32 *insn, s32 imm) { /* drop the old IMMs, all jal IMM bits sit at 31:12 */ *insn &= ~GENMASK(31, 12); - *insn |= (RV_X(imm, RV_J_IMM_10_1_OFF, RV_J_IMM_10_1_MASK) << RV_J_IMM_10_1_OPOFF) | - (RV_X(imm, RV_J_IMM_11_OFF, RV_J_IMM_11_MASK) << RV_J_IMM_11_OPOFF) | - (RV_X(imm, RV_J_IMM_19_12_OFF, RV_J_IMM_19_12_MASK) << RV_J_IMM_19_12_OPOFF) | - (RV_X(imm, RV_J_IMM_SIGN_OFF, 1) << RV_J_IMM_SIGN_OPOFF); + *insn |= (RV_X_MASK(imm, RV_J_IMM_10_1_OFF, RV_J_IMM_10_1_MASK) << RV_J_IMM_10_1_OPOFF) | + (RV_X_MASK(imm, RV_J_IMM_11_OFF, RV_J_IMM_11_MASK) << RV_J_IMM_11_OPOFF) | + (RV_X_MASK(imm, RV_J_IMM_19_12_OFF, RV_J_IMM_19_12_MASK) << RV_J_IMM_19_12_OPOFF) | + (RV_X_MASK(imm, RV_J_IMM_SIGN_OFF, 1) << RV_J_IMM_SIGN_OPOFF); } /* diff --git a/arch/riscv/include/asm/io.h b/arch/riscv/include/asm/io.h index a0e51840b9db..09bb5f57a9d3 100644 --- a/arch/riscv/include/asm/io.h +++ b/arch/riscv/include/asm/io.h @@ -28,6 +28,10 @@ #ifdef CONFIG_MMU #define IO_SPACE_LIMIT (PCI_IO_SIZE - 1) #define PCI_IOBASE ((void __iomem *)PCI_IO_START) + +#define ioremap_wc(addr, size) \ + ioremap_prot((addr), (size), __pgprot(_PAGE_KERNEL_NC)) + #endif /* CONFIG_MMU */ /* diff --git a/arch/riscv/include/asm/jump_label.h b/arch/riscv/include/asm/jump_label.h index 87a71cc6d146..3ab5f2e3212b 100644 --- a/arch/riscv/include/asm/jump_label.h +++ b/arch/riscv/include/asm/jump_label.h @@ -7,7 +7,7 @@ #ifndef __ASM_JUMP_LABEL_H #define __ASM_JUMP_LABEL_H -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/types.h> #include <asm/asm.h> @@ -66,5 +66,5 @@ label: return true; } -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* __ASM_JUMP_LABEL_H */ diff --git a/arch/riscv/include/asm/kasan.h b/arch/riscv/include/asm/kasan.h index e6a0071bdb56..60af6691f903 100644 --- a/arch/riscv/include/asm/kasan.h +++ b/arch/riscv/include/asm/kasan.h @@ -4,7 +4,7 @@ #ifndef __ASM_KASAN_H #define __ASM_KASAN_H -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ /* * The following comment was copied from arm64: diff --git a/arch/riscv/include/asm/kgdb.h b/arch/riscv/include/asm/kgdb.h index cc11c4544cff..7559d728c5ff 100644 --- a/arch/riscv/include/asm/kgdb.h +++ b/arch/riscv/include/asm/kgdb.h @@ -17,12 +17,12 @@ #define BREAK_INSTR_SIZE 4 #endif -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ void arch_kgdb_breakpoint(void); extern unsigned long kgdb_compiled_break; -#endif /* !__ASSEMBLY__ */ +#endif /* !__ASSEMBLER__ */ #define DBG_REG_ZERO "zero" #define DBG_REG_RA "ra" diff --git a/arch/riscv/include/asm/mmu.h b/arch/riscv/include/asm/mmu.h index 1cc90465d75b..cf8e6eac77d5 100644 --- a/arch/riscv/include/asm/mmu.h +++ b/arch/riscv/include/asm/mmu.h @@ -7,7 +7,7 @@ #ifndef _ASM_RISCV_MMU_H #define _ASM_RISCV_MMU_H -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ typedef struct { #ifndef CONFIG_MMU @@ -40,6 +40,6 @@ typedef struct { void __meminit create_pgd_mapping(pgd_t *pgdp, uintptr_t va, phys_addr_t pa, phys_addr_t sz, pgprot_t prot); -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* _ASM_RISCV_MMU_H */ diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h index 572a141ddecd..ffe213ad65a4 100644 --- a/arch/riscv/include/asm/page.h +++ b/arch/riscv/include/asm/page.h @@ -41,7 +41,7 @@ #define PAGE_OFFSET ((unsigned long)phys_ram_base) #endif /* CONFIG_MMU */ -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #ifdef CONFIG_RISCV_ISA_ZICBOZ void clear_page(void *page); @@ -199,7 +199,7 @@ static __always_inline void *pfn_to_kaddr(unsigned long pfn) return __va(pfn << PAGE_SHIFT); } -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #define virt_addr_valid(vaddr) ({ \ unsigned long _addr = (unsigned long)vaddr; \ diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index 815067742939..29e994a9afb6 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -111,7 +111,7 @@ #endif -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <asm/page.h> #include <asm/tlbflush.h> @@ -203,6 +203,7 @@ extern struct pt_alloc_ops pt_ops __meminitdata; #define PAGE_TABLE __pgprot(_PAGE_TABLE) +#define _PAGE_KERNEL_NC ((_PAGE_KERNEL & ~_PAGE_MTMASK) | _PAGE_NOCACHE) #define _PAGE_IOREMAP ((_PAGE_KERNEL & ~_PAGE_MTMASK) | _PAGE_IO) #define PAGE_KERNEL_IO __pgprot(_PAGE_IOREMAP) @@ -1135,6 +1136,6 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; WARN_ON_ONCE(pgd_present(*pgdp) && !pgd_same(*pgdp, pgd)); \ set_pgd(pgdp, pgd); \ }) -#endif /* !__ASSEMBLY__ */ +#endif /* !__ASSEMBLER__ */ #endif /* _ASM_RISCV_PGTABLE_H */ diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h index 24d3af4d3807..da5426122d28 100644 --- a/arch/riscv/include/asm/processor.h +++ b/arch/riscv/include/asm/processor.h @@ -54,7 +54,7 @@ #define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3) #endif -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ struct task_struct; struct pt_regs; @@ -215,6 +215,6 @@ long get_tagged_addr_ctrl(struct task_struct *task); #define GET_TAGGED_ADDR_CTRL() get_tagged_addr_ctrl(current) #endif -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* _ASM_RISCV_PROCESSOR_H */ diff --git a/arch/riscv/include/asm/ptrace.h b/arch/riscv/include/asm/ptrace.h index a7dc0e330757..addc8188152f 100644 --- a/arch/riscv/include/asm/ptrace.h +++ b/arch/riscv/include/asm/ptrace.h @@ -10,7 +10,7 @@ #include <asm/csr.h> #include <linux/compiler.h> -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ struct pt_regs { unsigned long epc; @@ -180,6 +180,6 @@ static __always_inline bool regs_irqs_disabled(struct pt_regs *regs) return !(regs->status & SR_PIE); } -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* _ASM_RISCV_PTRACE_H */ diff --git a/arch/riscv/include/asm/scs.h b/arch/riscv/include/asm/scs.h index 0e45db78b24b..ab7714aa93bd 100644 --- a/arch/riscv/include/asm/scs.h +++ b/arch/riscv/include/asm/scs.h @@ -2,7 +2,7 @@ #ifndef _ASM_SCS_H #define _ASM_SCS_H -#ifdef __ASSEMBLY__ +#ifdef __ASSEMBLER__ #include <asm/asm-offsets.h> #ifdef CONFIG_SHADOW_CALL_STACK @@ -49,6 +49,6 @@ _skip_scs: .endm #endif /* CONFIG_SHADOW_CALL_STACK */ -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* _ASM_SCS_H */ diff --git a/arch/riscv/include/asm/set_memory.h b/arch/riscv/include/asm/set_memory.h index ea263d3683ef..87389e93325a 100644 --- a/arch/riscv/include/asm/set_memory.h +++ b/arch/riscv/include/asm/set_memory.h @@ -6,7 +6,7 @@ #ifndef _ASM_RISCV_SET_MEMORY_H #define _ASM_RISCV_SET_MEMORY_H -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ /* * Functions to change memory attributes. */ @@ -45,7 +45,7 @@ int set_direct_map_default_noflush(struct page *page); int set_direct_map_valid_noflush(struct page *page, unsigned nr, bool valid); bool kernel_page_present(struct page *page); -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_XIP_KERNEL) #ifdef CONFIG_64BIT diff --git a/arch/riscv/include/asm/swab.h b/arch/riscv/include/asm/swab.h new file mode 100644 index 000000000000..c1da22aa1326 --- /dev/null +++ b/arch/riscv/include/asm/swab.h @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef _ASM_RISCV_SWAB_H +#define _ASM_RISCV_SWAB_H + +#include <linux/types.h> +#include <linux/compiler.h> +#include <asm/cpufeature-macros.h> +#include <asm/hwcap.h> +#include <asm-generic/swab.h> + +#if defined(CONFIG_TOOLCHAIN_HAS_ZBB) && defined(CONFIG_RISCV_ISA_ZBB) && !defined(NO_ALTERNATIVE) + +// Duplicated from include/uapi/linux/swab.h +#define ___constant_swab16(x) ((__u16)( \ + (((__u16)(x) & (__u16)0x00ffU) << 8) | \ + (((__u16)(x) & (__u16)0xff00U) >> 8))) + +#define ___constant_swab32(x) ((__u32)( \ + (((__u32)(x) & (__u32)0x000000ffUL) << 24) | \ + (((__u32)(x) & (__u32)0x0000ff00UL) << 8) | \ + (((__u32)(x) & (__u32)0x00ff0000UL) >> 8) | \ + (((__u32)(x) & (__u32)0xff000000UL) >> 24))) + +#define ___constant_swab64(x) ((__u64)( \ + (((__u64)(x) & (__u64)0x00000000000000ffULL) << 56) | \ + (((__u64)(x) & (__u64)0x000000000000ff00ULL) << 40) | \ + (((__u64)(x) & (__u64)0x0000000000ff0000ULL) << 24) | \ + (((__u64)(x) & (__u64)0x00000000ff000000ULL) << 8) | \ + (((__u64)(x) & (__u64)0x000000ff00000000ULL) >> 8) | \ + (((__u64)(x) & (__u64)0x0000ff0000000000ULL) >> 24) | \ + (((__u64)(x) & (__u64)0x00ff000000000000ULL) >> 40) | \ + (((__u64)(x) & (__u64)0xff00000000000000ULL) >> 56))) + +#define ARCH_SWAB(size, value) \ +({ \ + unsigned long x = value; \ + \ + if (riscv_has_extension_likely(RISCV_ISA_EXT_ZBB)) { \ + asm volatile (".option push\n" \ + ".option arch,+zbb\n" \ + "rev8 %0, %1\n" \ + ".option pop\n" \ + : "=r" (x) : "r" (x)); \ + x = x >> (BITS_PER_LONG - size); \ + } else { \ + x = ___constant_swab##size(value); \ + } \ + x; \ +}) + +static __always_inline __u16 __arch_swab16(__u16 value) +{ + return ARCH_SWAB(16, value); +} + +static __always_inline __u32 __arch_swab32(__u32 value) +{ + return ARCH_SWAB(32, value); +} + +#ifdef CONFIG_64BIT +static __always_inline __u64 __arch_swab64(__u64 value) +{ + return ARCH_SWAB(64, value); +} +#else +static __always_inline __u64 __arch_swab64(__u64 value) +{ + __u32 h = value >> 32; + __u32 l = value & ((1ULL << 32) - 1); + + return ((__u64)(__arch_swab32(l)) << 32) | ((__u64)(__arch_swab32(h))); +} +#endif + +#define __arch_swab64 __arch_swab64 +#define __arch_swab32 __arch_swab32 +#define __arch_swab16 __arch_swab16 + +#undef ___constant_swab16 +#undef ___constant_swab32 +#undef ___constant_swab64 + +#undef ARCH_SWAB + +#endif /* defined(CONFIG_TOOLCHAIN_HAS_ZBB) && defined(CONFIG_RISCV_ISA_ZBB) && !defined(NO_ALTERNATIVE) */ +#endif /* _ASM_RISCV_SWAB_H */ diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h index f5916a70879a..c33d8b7dd488 100644 --- a/arch/riscv/include/asm/thread_info.h +++ b/arch/riscv/include/asm/thread_info.h @@ -37,7 +37,7 @@ #define IRQ_STACK_SIZE THREAD_SIZE -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <asm/processor.h> #include <asm/csr.h> @@ -98,7 +98,7 @@ struct thread_info { void arch_release_task_struct(struct task_struct *tsk); int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); -#endif /* !__ASSEMBLY__ */ +#endif /* !__ASSEMBLER__ */ /* * thread information flags diff --git a/arch/riscv/include/asm/vdso.h b/arch/riscv/include/asm/vdso.h index c130d8100232..f80357fe24d1 100644 --- a/arch/riscv/include/asm/vdso.h +++ b/arch/riscv/include/asm/vdso.h @@ -16,7 +16,7 @@ #define __VDSO_PAGES 4 -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <generated/vdso-offsets.h> #define VDSO_SYMBOL(base, name) \ @@ -34,7 +34,7 @@ extern char compat_vdso_start[], compat_vdso_end[]; extern char vdso_start[], vdso_end[]; -#endif /* !__ASSEMBLY__ */ +#endif /* !__ASSEMBLER__ */ #endif /* CONFIG_MMU */ diff --git a/arch/riscv/include/asm/vdso/getrandom.h b/arch/riscv/include/asm/vdso/getrandom.h index c6d66895c1f5..ab4aef955099 100644 --- a/arch/riscv/include/asm/vdso/getrandom.h +++ b/arch/riscv/include/asm/vdso/getrandom.h @@ -5,7 +5,7 @@ #ifndef __ASM_VDSO_GETRANDOM_H #define __ASM_VDSO_GETRANDOM_H -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <asm/unistd.h> @@ -25,6 +25,6 @@ static __always_inline ssize_t getrandom_syscall(void *_buffer, size_t _len, uns return ret; } -#endif /* !__ASSEMBLY__ */ +#endif /* !__ASSEMBLER__ */ #endif /* __ASM_VDSO_GETRANDOM_H */ diff --git a/arch/riscv/include/asm/vdso/gettimeofday.h b/arch/riscv/include/asm/vdso/gettimeofday.h index 29164f84f93c..9ec08fa04d35 100644 --- a/arch/riscv/include/asm/vdso/gettimeofday.h +++ b/arch/riscv/include/asm/vdso/gettimeofday.h @@ -2,7 +2,7 @@ #ifndef __ASM_VDSO_GETTIMEOFDAY_H #define __ASM_VDSO_GETTIMEOFDAY_H -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <asm/barrier.h> #include <asm/unistd.h> @@ -79,6 +79,6 @@ static __always_inline u64 __arch_get_hw_counter(s32 clock_mode, return csr_read(CSR_TIME); } -#endif /* !__ASSEMBLY__ */ +#endif /* !__ASSEMBLER__ */ #endif /* __ASM_VDSO_GETTIMEOFDAY_H */ diff --git a/arch/riscv/include/asm/vdso/processor.h b/arch/riscv/include/asm/vdso/processor.h index 8f383f05a290..c42f95dc8811 100644 --- a/arch/riscv/include/asm/vdso/processor.h +++ b/arch/riscv/include/asm/vdso/processor.h @@ -2,9 +2,10 @@ #ifndef __ASM_VDSO_PROCESSOR_H #define __ASM_VDSO_PROCESSOR_H -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <asm/barrier.h> +#include <asm/errata_list.h> #include <asm/insn-def.h> static inline void cpu_relax(void) @@ -19,10 +20,10 @@ static inline void cpu_relax(void) * Reduce instruction retirement. * This assumes the PC changes. */ - __asm__ __volatile__ (RISCV_PAUSE); + ALT_RISCV_PAUSE(); barrier(); } -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* __ASM_VDSO_PROCESSOR_H */ diff --git a/arch/riscv/include/asm/vdso/vsyscall.h b/arch/riscv/include/asm/vdso/vsyscall.h index 1140b54b4bc8..558eb9dfda52 100644 --- a/arch/riscv/include/asm/vdso/vsyscall.h +++ b/arch/riscv/include/asm/vdso/vsyscall.h @@ -2,13 +2,13 @@ #ifndef __ASM_VDSO_VSYSCALL_H #define __ASM_VDSO_VSYSCALL_H -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <vdso/datapage.h> /* The asm-generic header needs to be included after the definitions above */ #include <asm-generic/vdso/vsyscall.h> -#endif /* !__ASSEMBLY__ */ +#endif /* !__ASSEMBLER__ */ #endif /* __ASM_VDSO_VSYSCALL_H */ diff --git a/arch/riscv/include/asm/vendor_extensions/mips.h b/arch/riscv/include/asm/vendor_extensions/mips.h new file mode 100644 index 000000000000..ea8ca747d691 --- /dev/null +++ b/arch/riscv/include/asm/vendor_extensions/mips.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2025 MIPS. + */ + +#ifndef _ASM_RISCV_VENDOR_EXTENSIONS_MIPS_H +#define _ASM_RISCV_VENDOR_EXTENSIONS_MIPS_H + +#include <linux/types.h> + +#define RISCV_ISA_VENDOR_EXT_XMIPSEXECTL 0 + +#ifndef __ASSEMBLER__ +struct riscv_isa_vendor_ext_data_list; +extern struct riscv_isa_vendor_ext_data_list riscv_isa_vendor_ext_list_mips; +#endif + +/* Extension specific instructions */ + +/* + * All of the xmipsexectl extension instructions are + * ‘hint’ encodings of the SLLI instruction, + * with rd = 0, rs1 = 0 and imm = 1 for IHB, imm = 3 for EHB, + * and imm = 5 for PAUSE. + * MIPS.PAUSE is an alternative opcode which is implemented to have the + * same behavior as PAUSE on some MIPS RISCV cores. + * MIPS.EHB clears all execution hazards before allowing + * any subsequent instructions to execute. + * MIPS.IHB clears all instruction hazards before + * allowing any subsequent instructions to fetch. + */ + +#define MIPS_PAUSE ".4byte 0x00501013\n\t" +#define MIPS_EHB ".4byte 0x00301013\n\t" +#define MIPS_IHB ".4byte 0x00101013\n\t" + +#endif // _ASM_RISCV_VENDOR_EXTENSIONS_MIPS_H diff --git a/arch/riscv/include/asm/vendor_extensions/mips_hwprobe.h b/arch/riscv/include/asm/vendor_extensions/mips_hwprobe.h new file mode 100644 index 000000000000..e63f664b6b17 --- /dev/null +++ b/arch/riscv/include/asm/vendor_extensions/mips_hwprobe.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2025 MIPS. + */ + +#ifndef _ASM_RISCV_VENDOR_EXTENSIONS_MIPS_HWPROBE_H_ +#define _ASM_RISCV_VENDOR_EXTENSIONS_MIPS_HWPROBE_H_ + +#include <linux/cpumask.h> +#include <uapi/asm/hwprobe.h> + +#ifdef CONFIG_RISCV_ISA_VENDOR_EXT_MIPS +void hwprobe_isa_vendor_ext_mips_0(struct riscv_hwprobe *pair, const struct cpumask *cpus); +#else +static inline void hwprobe_isa_vendor_ext_mips_0(struct riscv_hwprobe *pair, + const struct cpumask *cpus) +{ + pair->value = 0; +} +#endif + +#endif // _ASM_RISCV_VENDOR_EXTENSIONS_MIPS_HWPROBE_H_ diff --git a/arch/riscv/include/asm/vendorid_list.h b/arch/riscv/include/asm/vendorid_list.h index a5150cdf34d8..3b09874d7a6d 100644 --- a/arch/riscv/include/asm/vendorid_list.h +++ b/arch/riscv/include/asm/vendorid_list.h @@ -9,5 +9,6 @@ #define MICROCHIP_VENDOR_ID 0x029 #define SIFIVE_VENDOR_ID 0x489 #define THEAD_VENDOR_ID 0x5b7 +#define MIPS_VENDOR_ID 0x722 #endif diff --git a/arch/riscv/include/uapi/asm/hwprobe.h b/arch/riscv/include/uapi/asm/hwprobe.h index aaf6ad970499..5d30a4fae37a 100644 --- a/arch/riscv/include/uapi/asm/hwprobe.h +++ b/arch/riscv/include/uapi/asm/hwprobe.h @@ -106,6 +106,7 @@ struct riscv_hwprobe { #define RISCV_HWPROBE_KEY_VENDOR_EXT_THEAD_0 11 #define RISCV_HWPROBE_KEY_ZICBOM_BLOCK_SIZE 12 #define RISCV_HWPROBE_KEY_VENDOR_EXT_SIFIVE_0 13 +#define RISCV_HWPROBE_KEY_VENDOR_EXT_MIPS_0 14 /* Increase RISCV_HWPROBE_MAX_KEY when adding items. */ /* Flags */ diff --git a/arch/riscv/include/uapi/asm/kvm.h b/arch/riscv/include/uapi/asm/kvm.h index ef27d4289da1..251099d860aa 100644 --- a/arch/riscv/include/uapi/asm/kvm.h +++ b/arch/riscv/include/uapi/asm/kvm.h @@ -9,7 +9,7 @@ #ifndef __LINUX_KVM_RISCV_H #define __LINUX_KVM_RISCV_H -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/types.h> #include <asm/bitsperlong.h> diff --git a/arch/riscv/include/uapi/asm/ptrace.h b/arch/riscv/include/uapi/asm/ptrace.h index a38268b19c3d..beff8df80ac9 100644 --- a/arch/riscv/include/uapi/asm/ptrace.h +++ b/arch/riscv/include/uapi/asm/ptrace.h @@ -6,7 +6,7 @@ #ifndef _UAPI_ASM_RISCV_PTRACE_H #define _UAPI_ASM_RISCV_PTRACE_H -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/types.h> @@ -127,6 +127,6 @@ struct __riscv_v_regset_state { */ #define RISCV_MAX_VLENB (8192) -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* _UAPI_ASM_RISCV_PTRACE_H */ diff --git a/arch/riscv/include/uapi/asm/sigcontext.h b/arch/riscv/include/uapi/asm/sigcontext.h index cd4f175dc837..748dffc9ae19 100644 --- a/arch/riscv/include/uapi/asm/sigcontext.h +++ b/arch/riscv/include/uapi/asm/sigcontext.h @@ -15,7 +15,7 @@ /* The size of END signal context header. */ #define END_HDR_SIZE 0x0 -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ struct __sc_riscv_v_state { struct __riscv_v_ext_state v_state; @@ -35,6 +35,6 @@ struct sigcontext { }; }; -#endif /*!__ASSEMBLY__*/ +#endif /*!__ASSEMBLER__*/ #endif /* _UAPI_ASM_RISCV_SIGCONTEXT_H */ diff --git a/arch/riscv/include/uapi/asm/vendor/mips.h b/arch/riscv/include/uapi/asm/vendor/mips.h new file mode 100644 index 000000000000..e65ab268b265 --- /dev/null +++ b/arch/riscv/include/uapi/asm/vendor/mips.h @@ -0,0 +1,3 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ + +#define RISCV_HWPROBE_VENDOR_EXT_XMIPSEXECTL BIT(0) diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile index c7b542573407..f60fce69b725 100644 --- a/arch/riscv/kernel/Makefile +++ b/arch/riscv/kernel/Makefile @@ -113,7 +113,7 @@ obj-$(CONFIG_VMCORE_INFO) += vmcore_info.o obj-$(CONFIG_JUMP_LABEL) += jump_label.o -obj-$(CONFIG_CFI_CLANG) += cfi.o +obj-$(CONFIG_CFI) += cfi.o obj-$(CONFIG_EFI) += efi.o obj-$(CONFIG_COMPAT) += compat_syscall_table.o diff --git a/arch/riscv/kernel/acpi.c b/arch/riscv/kernel/acpi.c index 3f6d5a6789e8..71698ee11621 100644 --- a/arch/riscv/kernel/acpi.c +++ b/arch/riscv/kernel/acpi.c @@ -14,6 +14,7 @@ */ #include <linux/acpi.h> +#include <linux/efi-bgrt.h> #include <linux/efi.h> #include <linux/io.h> #include <linux/memblock.h> @@ -160,6 +161,8 @@ done: early_init_dt_scan_chosen_stdout(); } else { acpi_parse_spcr(earlycon_acpi_spcr_enable, true); + if (IS_ENABLED(CONFIG_ACPI_BGRT)) + acpi_table_parse(ACPI_SIG_BGRT, acpi_parse_bgrt); } } diff --git a/arch/riscv/kernel/alternative.c b/arch/riscv/kernel/alternative.c index 7eb3cb1215c6..7642704c7f18 100644 --- a/arch/riscv/kernel/alternative.c +++ b/arch/riscv/kernel/alternative.c @@ -47,6 +47,11 @@ static void riscv_fill_cpu_mfr_info(struct cpu_manufacturer_info_t *cpu_mfr_info cpu_mfr_info->patch_func = andes_errata_patch_func; break; #endif +#ifdef CONFIG_ERRATA_MIPS + case MIPS_VENDOR_ID: + cpu_mfr_info->patch_func = mips_errata_patch_func; + break; +#endif #ifdef CONFIG_ERRATA_SIFIVE case SIFIVE_VENDOR_ID: cpu_mfr_info->patch_func = sifive_errata_patch_func; diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c index 743d53415572..67b59699357d 100644 --- a/arch/riscv/kernel/cpufeature.c +++ b/arch/riscv/kernel/cpufeature.c @@ -474,10 +474,10 @@ const struct riscv_isa_ext_data riscv_isa_ext[] = { __RISCV_ISA_EXT_DATA(zacas, RISCV_ISA_EXT_ZACAS), __RISCV_ISA_EXT_DATA(zalrsc, RISCV_ISA_EXT_ZALRSC), __RISCV_ISA_EXT_DATA(zawrs, RISCV_ISA_EXT_ZAWRS), - __RISCV_ISA_EXT_DATA(zfa, RISCV_ISA_EXT_ZFA), + __RISCV_ISA_EXT_DATA_VALIDATE(zfa, RISCV_ISA_EXT_ZFA, riscv_ext_f_depends), __RISCV_ISA_EXT_DATA_VALIDATE(zfbfmin, RISCV_ISA_EXT_ZFBFMIN, riscv_ext_f_depends), - __RISCV_ISA_EXT_DATA(zfh, RISCV_ISA_EXT_ZFH), - __RISCV_ISA_EXT_DATA(zfhmin, RISCV_ISA_EXT_ZFHMIN), + __RISCV_ISA_EXT_DATA_VALIDATE(zfh, RISCV_ISA_EXT_ZFH, riscv_ext_f_depends), + __RISCV_ISA_EXT_DATA_VALIDATE(zfhmin, RISCV_ISA_EXT_ZFHMIN, riscv_ext_f_depends), __RISCV_ISA_EXT_DATA(zca, RISCV_ISA_EXT_ZCA), __RISCV_ISA_EXT_DATA_VALIDATE(zcb, RISCV_ISA_EXT_ZCB, riscv_ext_zca_depends), __RISCV_ISA_EXT_DATA_VALIDATE(zcd, RISCV_ISA_EXT_ZCD, riscv_ext_zcd_validate), diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S index d0ded2438533..d3d92a4becc7 100644 --- a/arch/riscv/kernel/entry.S +++ b/arch/riscv/kernel/entry.S @@ -7,6 +7,7 @@ #include <linux/init.h> #include <linux/linkage.h> +#include <asm/alternative-macros.h> #include <asm/asm.h> #include <asm/csr.h> #include <asm/scs.h> diff --git a/arch/riscv/kernel/machine_kexec_file.c b/arch/riscv/kernel/machine_kexec_file.c index b9eb41b0a975..dd9d92a96517 100644 --- a/arch/riscv/kernel/machine_kexec_file.c +++ b/arch/riscv/kernel/machine_kexec_file.c @@ -15,6 +15,7 @@ #include <linux/memblock.h> #include <linux/vmalloc.h> #include <asm/setup.h> +#include <asm/insn.h> const struct kexec_file_ops * const kexec_file_loaders[] = { &elf_kexec_ops, @@ -109,7 +110,6 @@ static char *setup_kdump_cmdline(struct kimage *image, char *cmdline, } #endif -#define RV_X(x, s, n) (((x) >> (s)) & ((1 << (n)) - 1)) #define RISCV_IMM_BITS 12 #define RISCV_IMM_REACH (1LL << RISCV_IMM_BITS) #define RISCV_CONST_HIGH_PART(x) \ diff --git a/arch/riscv/kernel/pi/Makefile b/arch/riscv/kernel/pi/Makefile index 7dd15be69c90..bc098edac898 100644 --- a/arch/riscv/kernel/pi/Makefile +++ b/arch/riscv/kernel/pi/Makefile @@ -39,4 +39,4 @@ $(obj)/ctype.o: $(srctree)/lib/ctype.c FORCE $(call if_changed_rule,cc_o_c) obj-y := cmdline_early.pi.o fdt_early.pi.o string.pi.o ctype.pi.o lib-fdt.pi.o lib-fdt_ro.pi.o archrandom_early.pi.o -extra-y := $(patsubst %.pi.o,%.o,$(obj-y)) +targets := $(patsubst %.pi.o,%.o,$(obj-y)) diff --git a/arch/riscv/kernel/pi/cmdline_early.c b/arch/riscv/kernel/pi/cmdline_early.c index fbcdc9e4e143..389d086a0718 100644 --- a/arch/riscv/kernel/pi/cmdline_early.c +++ b/arch/riscv/kernel/pi/cmdline_early.c @@ -41,9 +41,9 @@ static char *get_early_cmdline(uintptr_t dtb_pa) static u64 match_noXlvl(char *cmdline) { if (strstr(cmdline, "no4lvl")) - return SATP_MODE_48; + return SATP_MODE_39; else if (strstr(cmdline, "no5lvl")) - return SATP_MODE_57; + return SATP_MODE_48; return 0; } diff --git a/arch/riscv/kernel/pi/fdt_early.c b/arch/riscv/kernel/pi/fdt_early.c index 9bdee2fafe47..a12ff8090f19 100644 --- a/arch/riscv/kernel/pi/fdt_early.c +++ b/arch/riscv/kernel/pi/fdt_early.c @@ -3,6 +3,7 @@ #include <linux/init.h> #include <linux/libfdt.h> #include <linux/ctype.h> +#include <asm/csr.h> #include "pi.h" @@ -183,3 +184,42 @@ bool fdt_early_match_extension_isa(const void *fdt, const char *ext_name) return ret; } + +/** + * set_satp_mode_from_fdt - determine SATP mode based on the MMU type in fdt + * + * @dtb_pa: physical address of the device tree blob + * + * Returns the SATP mode corresponding to the MMU type of the first enabled CPU, + * 0 otherwise + */ +u64 set_satp_mode_from_fdt(uintptr_t dtb_pa) +{ + const void *fdt = (const void *)dtb_pa; + const char *mmu_type; + int node, parent; + + parent = fdt_path_offset(fdt, "/cpus"); + if (parent < 0) + return 0; + + fdt_for_each_subnode(node, fdt, parent) { + if (!fdt_node_name_eq(fdt, node, "cpu")) + continue; + + if (!fdt_device_is_available(fdt, node)) + continue; + + mmu_type = fdt_getprop(fdt, node, "mmu-type", NULL); + if (!mmu_type) + break; + + if (!strcmp(mmu_type, "riscv,sv39")) + return SATP_MODE_39; + else if (!strcmp(mmu_type, "riscv,sv48")) + return SATP_MODE_48; + break; + } + + return 0; +} diff --git a/arch/riscv/kernel/pi/pi.h b/arch/riscv/kernel/pi/pi.h index 21141d84fea6..3fee2cfddf7c 100644 --- a/arch/riscv/kernel/pi/pi.h +++ b/arch/riscv/kernel/pi/pi.h @@ -14,6 +14,7 @@ u64 get_kaslr_seed(uintptr_t dtb_pa); u64 get_kaslr_seed_zkr(const uintptr_t dtb_pa); bool set_nokaslr_from_cmdline(uintptr_t dtb_pa); u64 set_satp_mode_from_cmdline(uintptr_t dtb_pa); +u64 set_satp_mode_from_fdt(uintptr_t dtb_pa); bool fdt_early_match_extension_isa(const void *fdt, const char *ext_name); diff --git a/arch/riscv/kernel/probes/simulate-insn.c b/arch/riscv/kernel/probes/simulate-insn.c index 6c166029079c..fa581590c1f8 100644 --- a/arch/riscv/kernel/probes/simulate-insn.c +++ b/arch/riscv/kernel/probes/simulate-insn.c @@ -41,19 +41,16 @@ bool __kprobes simulate_jal(u32 opcode, unsigned long addr, struct pt_regs *regs * 1 10 1 8 5 JAL/J */ bool ret; - u32 imm; - u32 index = (opcode >> 7) & 0x1f; + s32 imm; + u32 index = RV_EXTRACT_RD_REG(opcode); ret = rv_insn_reg_set_val(regs, index, addr + 4); if (!ret) return ret; - imm = ((opcode >> 21) & 0x3ff) << 1; - imm |= ((opcode >> 20) & 0x1) << 11; - imm |= ((opcode >> 12) & 0xff) << 12; - imm |= ((opcode >> 31) & 0x1) << 20; + imm = RV_EXTRACT_JTYPE_IMM(opcode); - instruction_pointer_set(regs, addr + sign_extend32((imm), 20)); + instruction_pointer_set(regs, addr + imm); return ret; } @@ -67,9 +64,9 @@ bool __kprobes simulate_jalr(u32 opcode, unsigned long addr, struct pt_regs *reg */ bool ret; unsigned long base_addr; - u32 imm = (opcode >> 20) & 0xfff; - u32 rd_index = (opcode >> 7) & 0x1f; - u32 rs1_index = (opcode >> 15) & 0x1f; + u32 imm = RV_EXTRACT_ITYPE_IMM(opcode); + u32 rd_index = RV_EXTRACT_RD_REG(opcode); + u32 rs1_index = RV_EXTRACT_RS1_REG(opcode); ret = rv_insn_reg_get_val(regs, rs1_index, &base_addr); if (!ret) @@ -84,20 +81,6 @@ bool __kprobes simulate_jalr(u32 opcode, unsigned long addr, struct pt_regs *reg return ret; } -#define auipc_rd_idx(opcode) \ - ((opcode >> 7) & 0x1f) - -#define auipc_imm(opcode) \ - ((((opcode) >> 12) & 0xfffff) << 12) - -#if __riscv_xlen == 64 -#define auipc_offset(opcode) sign_extend64(auipc_imm(opcode), 31) -#elif __riscv_xlen == 32 -#define auipc_offset(opcode) auipc_imm(opcode) -#else -#error "Unexpected __riscv_xlen" -#endif - bool __kprobes simulate_auipc(u32 opcode, unsigned long addr, struct pt_regs *regs) { /* @@ -107,8 +90,8 @@ bool __kprobes simulate_auipc(u32 opcode, unsigned long addr, struct pt_regs *re * 20 5 7 */ - u32 rd_idx = auipc_rd_idx(opcode); - unsigned long rd_val = addr + auipc_offset(opcode); + u32 rd_idx = RV_EXTRACT_RD_REG(opcode); + unsigned long rd_val = addr + (s32)RV_EXTRACT_UTYPE_IMM(opcode); if (!rv_insn_reg_set_val(regs, rd_idx, rd_val)) return false; @@ -118,24 +101,6 @@ bool __kprobes simulate_auipc(u32 opcode, unsigned long addr, struct pt_regs *re return true; } -#define branch_rs1_idx(opcode) \ - (((opcode) >> 15) & 0x1f) - -#define branch_rs2_idx(opcode) \ - (((opcode) >> 20) & 0x1f) - -#define branch_funct3(opcode) \ - (((opcode) >> 12) & 0x7) - -#define branch_imm(opcode) \ - (((((opcode) >> 8) & 0xf ) << 1) | \ - ((((opcode) >> 25) & 0x3f) << 5) | \ - ((((opcode) >> 7) & 0x1 ) << 11) | \ - ((((opcode) >> 31) & 0x1 ) << 12)) - -#define branch_offset(opcode) \ - sign_extend32((branch_imm(opcode)), 12) - bool __kprobes simulate_branch(u32 opcode, unsigned long addr, struct pt_regs *regs) { /* @@ -156,12 +121,12 @@ bool __kprobes simulate_branch(u32 opcode, unsigned long addr, struct pt_regs *r unsigned long rs1_val; unsigned long rs2_val; - if (!rv_insn_reg_get_val(regs, branch_rs1_idx(opcode), &rs1_val) || - !rv_insn_reg_get_val(regs, branch_rs2_idx(opcode), &rs2_val)) + if (!rv_insn_reg_get_val(regs, RV_EXTRACT_RS1_REG(opcode), &rs1_val) || + !rv_insn_reg_get_val(regs, RV_EXTRACT_RS2_REG(opcode), &rs2_val)) return false; - offset_tmp = branch_offset(opcode); - switch (branch_funct3(opcode)) { + offset_tmp = RV_EXTRACT_BTYPE_IMM(opcode); + switch (RV_EXTRACT_FUNCT3(opcode)) { case RVG_FUNCT3_BEQ: offset = (rs1_val == rs2_val) ? offset_tmp : 4; break; @@ -191,24 +156,9 @@ bool __kprobes simulate_branch(u32 opcode, unsigned long addr, struct pt_regs *r bool __kprobes simulate_c_j(u32 opcode, unsigned long addr, struct pt_regs *regs) { - /* - * 15 13 12 2 1 0 - * | funct3 | offset[11|4|9:8|10|6|7|3:1|5] | opcode | - * 3 11 2 - */ - - s32 offset; + s32 offset = RVC_EXTRACT_JTYPE_IMM(opcode); - offset = ((opcode >> 3) & 0x7) << 1; - offset |= ((opcode >> 11) & 0x1) << 4; - offset |= ((opcode >> 2) & 0x1) << 5; - offset |= ((opcode >> 7) & 0x1) << 6; - offset |= ((opcode >> 6) & 0x1) << 7; - offset |= ((opcode >> 9) & 0x3) << 8; - offset |= ((opcode >> 8) & 0x1) << 10; - offset |= ((opcode >> 12) & 0x1) << 11; - - instruction_pointer_set(regs, addr + sign_extend32(offset, 11)); + instruction_pointer_set(regs, addr + offset); return true; } @@ -224,7 +174,7 @@ static bool __kprobes simulate_c_jr_jalr(u32 opcode, unsigned long addr, struct unsigned long jump_addr; - u32 rs1 = (opcode >> 7) & 0x1f; + u32 rs1 = RVC_EXTRACT_C2_RS1_REG(opcode); if (rs1 == 0) /* C.JR is only valid when rs1 != x0 */ return false; @@ -268,16 +218,10 @@ static bool __kprobes simulate_c_bnez_beqz(u32 opcode, unsigned long addr, struc if (!rv_insn_reg_get_val(regs, rs1, &rs1_val)) return false; - if ((rs1_val != 0 && is_bnez) || (rs1_val == 0 && !is_bnez)) { - offset = ((opcode >> 3) & 0x3) << 1; - offset |= ((opcode >> 10) & 0x3) << 3; - offset |= ((opcode >> 2) & 0x1) << 5; - offset |= ((opcode >> 5) & 0x3) << 6; - offset |= ((opcode >> 12) & 0x1) << 8; - offset = sign_extend32(offset, 8); - } else { + if ((rs1_val != 0 && is_bnez) || (rs1_val == 0 && !is_bnez)) + offset = RVC_EXTRACT_BTYPE_IMM(opcode); + else offset = 2; - } instruction_pointer_set(regs, addr + offset); diff --git a/arch/riscv/kernel/sbi.c b/arch/riscv/kernel/sbi.c index 53836a9235e3..5e8cde055264 100644 --- a/arch/riscv/kernel/sbi.c +++ b/arch/riscv/kernel/sbi.c @@ -148,7 +148,7 @@ static int __sbi_rfence_v01(int fid, const struct cpumask *cpu_mask, static void sbi_set_power_off(void) { - pm_power_off = sbi_shutdown; + register_platform_power_off(sbi_shutdown); } #else static void __sbi_set_timer_v01(uint64_t stime_value) @@ -682,7 +682,7 @@ void __init sbi_init(void) if (sbi_spec_version >= sbi_mk_version(0, 3) && sbi_probe_extension(SBI_EXT_SRST)) { pr_info("SBI SRST extension detected\n"); - pm_power_off = sbi_srst_power_off; + register_platform_power_off(sbi_srst_power_off); sbi_srst_reboot_nb.notifier_call = sbi_srst_reboot; sbi_srst_reboot_nb.priority = 192; register_restart_handler(&sbi_srst_reboot_nb); diff --git a/arch/riscv/kernel/sys_hwprobe.c b/arch/riscv/kernel/sys_hwprobe.c index 0b170e18a2be..000f4451a9d8 100644 --- a/arch/riscv/kernel/sys_hwprobe.c +++ b/arch/riscv/kernel/sys_hwprobe.c @@ -15,6 +15,7 @@ #include <asm/uaccess.h> #include <asm/unistd.h> #include <asm/vector.h> +#include <asm/vendor_extensions/mips_hwprobe.h> #include <asm/vendor_extensions/sifive_hwprobe.h> #include <asm/vendor_extensions/thead_hwprobe.h> #include <vdso/vsyscall.h> @@ -153,14 +154,12 @@ static void hwprobe_isa_ext0(struct riscv_hwprobe *pair, EXT_KEY(ZVKT); } - if (has_fpu()) { - EXT_KEY(ZCD); - EXT_KEY(ZCF); - EXT_KEY(ZFA); - EXT_KEY(ZFBFMIN); - EXT_KEY(ZFH); - EXT_KEY(ZFHMIN); - } + EXT_KEY(ZCD); + EXT_KEY(ZCF); + EXT_KEY(ZFA); + EXT_KEY(ZFBFMIN); + EXT_KEY(ZFH); + EXT_KEY(ZFHMIN); if (IS_ENABLED(CONFIG_RISCV_ISA_SUPM)) EXT_KEY(SUPM); @@ -309,6 +308,9 @@ static void hwprobe_one_pair(struct riscv_hwprobe *pair, case RISCV_HWPROBE_KEY_VENDOR_EXT_THEAD_0: hwprobe_isa_vendor_ext_thead_0(pair, cpus); break; + case RISCV_HWPROBE_KEY_VENDOR_EXT_MIPS_0: + hwprobe_isa_vendor_ext_mips_0(pair, cpus); + break; /* * For forward compatibility, unknown keys don't fail the whole diff --git a/arch/riscv/kernel/sys_riscv.c b/arch/riscv/kernel/sys_riscv.c index d77afe05578f..795b2e815ac9 100644 --- a/arch/riscv/kernel/sys_riscv.c +++ b/arch/riscv/kernel/sys_riscv.c @@ -10,7 +10,7 @@ static long riscv_sys_mmap(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, - unsigned long fd, off_t offset, + unsigned long fd, unsigned long offset, unsigned long page_shift_offset) { if (unlikely(offset & (~PAGE_MASK >> page_shift_offset))) diff --git a/arch/riscv/kernel/tests/Kconfig.debug b/arch/riscv/kernel/tests/Kconfig.debug index 78cea5d2c270..5db4df44279e 100644 --- a/arch/riscv/kernel/tests/Kconfig.debug +++ b/arch/riscv/kernel/tests/Kconfig.debug @@ -30,6 +30,18 @@ config RISCV_MODULE_LINKING_KUNIT If unsure, say N. +config RISCV_KPROBES_KUNIT + bool "KUnit test for riscv kprobes" if !KUNIT_ALL_TESTS + depends on KUNIT + depends on KPROBES + default KUNIT_ALL_TESTS + help + Enable testing for riscv kprobes. Useful for riscv and/or kprobes + development. The test verifies that kprobes do not change the behaviour + of some sample functions. + + If unsure, say N. + endif # RUNTIME_TESTING_MENU endmenu # "arch/riscv/kernel runtime Testing" diff --git a/arch/riscv/kernel/tests/Makefile b/arch/riscv/kernel/tests/Makefile index 7d6c76cffe20..407e7e6c28dc 100644 --- a/arch/riscv/kernel/tests/Makefile +++ b/arch/riscv/kernel/tests/Makefile @@ -1 +1,2 @@ obj-$(CONFIG_RISCV_MODULE_LINKING_KUNIT) += module_test/ +obj-$(CONFIG_RISCV_KPROBES_KUNIT) += kprobes/ diff --git a/arch/riscv/kernel/tests/kprobes/Makefile b/arch/riscv/kernel/tests/kprobes/Makefile new file mode 100644 index 000000000000..4cb6c66a98e8 --- /dev/null +++ b/arch/riscv/kernel/tests/kprobes/Makefile @@ -0,0 +1 @@ +obj-y += test-kprobes.o test-kprobes-asm.o diff --git a/arch/riscv/kernel/tests/kprobes/test-kprobes-asm.S b/arch/riscv/kernel/tests/kprobes/test-kprobes-asm.S new file mode 100644 index 000000000000..b951d0f12482 --- /dev/null +++ b/arch/riscv/kernel/tests/kprobes/test-kprobes-asm.S @@ -0,0 +1,229 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +#include <linux/linkage.h> +#include <asm/asm.h> +#include "test-kprobes.h" + +SYM_FUNC_START(test_kprobes_add) + li a1, KPROBE_TEST_MAGIC_UPPER + li a2, KPROBE_TEST_MAGIC_LOWER +test_kprobes_add_addr1: + add a1, a1, a2 +test_kprobes_add_addr2: + add a0, a1, x0 + ret +SYM_FUNC_END(test_kprobes_add) + +SYM_FUNC_START(test_kprobes_jal) + li a0, 0 + mv a1, ra + .option push + .option norvc +test_kprobes_jal_addr1: + jal x0, 2f + ret + .option pop +1: li a0, KPROBE_TEST_MAGIC_UPPER + ret + .option push + .option norvc +test_kprobes_jal_addr2: +2: jal 1b + .option pop + li a2, KPROBE_TEST_MAGIC_LOWER + add a0, a0, a2 + jr a1 +SYM_FUNC_END(test_kprobes_jal) + +SYM_FUNC_START(test_kprobes_jalr) + la a0, 1f + mv a1, ra + .option push + .option norvc +test_kprobes_jalr_addr: + jalr a0 + .option pop + li t0, KPROBE_TEST_MAGIC_UPPER + add a0, a0, t0 + jr a1 +1: li a0, KPROBE_TEST_MAGIC_LOWER + ret +SYM_FUNC_END(test_kprobes_jalr) + +SYM_FUNC_START(test_kprobes_auipc) +test_kprobes_auipc_addr: + auipc a0, KPROBE_TEST_MAGIC_LOWER + la a1, test_kprobes_auipc_addr + sub a0, a0, a1 + srli a0, a0, 12 + li a1, KPROBE_TEST_MAGIC_UPPER + add a0, a0, a1 + ret +SYM_FUNC_END(test_kprobes_auipc) + +SYM_FUNC_START(test_kprobes_branch) + .option push + .option norvc + li a0, 0 + li a1, 1 + li a2, 2 +test_kprobes_branch_addr1: + beqz a0, 1f + ret +1: +test_kprobes_branch_addr2: + beqz a1, 3f +test_kprobes_branch_addr3: + bnez a0, 3f +test_kprobes_branch_addr4: + bnez a2, 1f + ret +1: +test_kprobes_branch_addr5: + bge a1, a2, 3f +test_kprobes_branch_addr6: + bge a2, a1, 2f + ret +1: + li t0, KPROBE_TEST_MAGIC_UPPER + add a0, a0, t0 + ret +2: +test_kprobes_branch_addr7: + blt a2, a1, 3f + li a0, KPROBE_TEST_MAGIC_LOWER +test_kprobes_branch_addr8: + blt a1, a2, 1b +3: + li a0, 0 + ret + .option pop +SYM_FUNC_END(test_kprobes_branch) + +#ifdef CONFIG_RISCV_ISA_C + +SYM_FUNC_START(test_kprobes_c_j) + li a0, 0 +test_kprobes_branch_c_j_addr1: + c.j 2f +1: + li a1, KPROBE_TEST_MAGIC_UPPER + add a0, a0, a1 + ret +2: li a0, KPROBE_TEST_MAGIC_LOWER +test_kprobes_branch_c_j_addr2: + c.j 1b +SYM_FUNC_END(test_kprobes_c_j) + +SYM_FUNC_START(test_kprobes_c_jr) + la a0, 2f +test_kprobes_c_jr_addr1: + c.jr a0 + ret +1: li a1, KPROBE_TEST_MAGIC_LOWER + add a0, a0, a1 + ret +2: + li a0, KPROBE_TEST_MAGIC_UPPER + la a1, 1b +test_kprobes_c_jr_addr2: + c.jr a1 +SYM_FUNC_END(test_kprobes_c_jr) + +SYM_FUNC_START(test_kprobes_c_jalr) + mv a1, ra + la a0, 1f +test_kprobes_c_jalr_addr: + c.jalr a0 + li a2, KPROBE_TEST_MAGIC_UPPER + add a0, a0, a2 + jr a1 +1: li a0, KPROBE_TEST_MAGIC_LOWER + ret +SYM_FUNC_END(test_kprobes_c_jalr) + +SYM_FUNC_START(test_kprobes_c_beqz) + li a0, 0 + li a1, 1 +test_kprobes_c_beqz_addr1: + c.beqz a0, 2f + ret +1: li a1, KPROBE_TEST_MAGIC_UPPER + add a0, a0, a1 + ret +test_kprobes_c_beqz_addr2: +2: c.beqz a1, 3f + li a0, KPROBE_TEST_MAGIC_LOWER + mv a1, x0 +test_kprobes_c_beqz_addr3: + c.beqz a1, 1b +3: li a0, 0 + ret +SYM_FUNC_END(test_kprobes_c_beqz) + +SYM_FUNC_START(test_kprobes_c_bnez) + li a0, 0 + li a1, 1 +test_kprobes_c_bnez_addr1: + c.bnez a1, 2f + ret +1: li a1, KPROBE_TEST_MAGIC_UPPER + add a0, a0, a1 + ret +test_kprobes_c_bnez_addr2: +2: c.bnez a0, 3f + li a0, KPROBE_TEST_MAGIC_LOWER +test_kprobes_c_bnez_addr3: + c.bnez a0, 1b +3: li a0, 0 + ret +SYM_FUNC_END(test_kprobes_c_bnez) + +#endif /* CONFIG_RISCV_ISA_C */ + +SYM_DATA_START(test_kprobes_addresses) + RISCV_PTR test_kprobes_add_addr1 + RISCV_PTR test_kprobes_add_addr2 + RISCV_PTR test_kprobes_jal_addr1 + RISCV_PTR test_kprobes_jal_addr2 + RISCV_PTR test_kprobes_jalr_addr + RISCV_PTR test_kprobes_auipc_addr + RISCV_PTR test_kprobes_branch_addr1 + RISCV_PTR test_kprobes_branch_addr2 + RISCV_PTR test_kprobes_branch_addr3 + RISCV_PTR test_kprobes_branch_addr4 + RISCV_PTR test_kprobes_branch_addr5 + RISCV_PTR test_kprobes_branch_addr6 + RISCV_PTR test_kprobes_branch_addr7 + RISCV_PTR test_kprobes_branch_addr8 +#ifdef CONFIG_RISCV_ISA_C + RISCV_PTR test_kprobes_branch_c_j_addr1 + RISCV_PTR test_kprobes_branch_c_j_addr2 + RISCV_PTR test_kprobes_c_jr_addr1 + RISCV_PTR test_kprobes_c_jr_addr2 + RISCV_PTR test_kprobes_c_jalr_addr + RISCV_PTR test_kprobes_c_beqz_addr1 + RISCV_PTR test_kprobes_c_beqz_addr2 + RISCV_PTR test_kprobes_c_beqz_addr3 + RISCV_PTR test_kprobes_c_bnez_addr1 + RISCV_PTR test_kprobes_c_bnez_addr2 + RISCV_PTR test_kprobes_c_bnez_addr3 +#endif /* CONFIG_RISCV_ISA_C */ + RISCV_PTR 0 +SYM_DATA_END(test_kprobes_addresses) + +SYM_DATA_START(test_kprobes_functions) + RISCV_PTR test_kprobes_add + RISCV_PTR test_kprobes_jal + RISCV_PTR test_kprobes_jalr + RISCV_PTR test_kprobes_auipc + RISCV_PTR test_kprobes_branch +#ifdef CONFIG_RISCV_ISA_C + RISCV_PTR test_kprobes_c_j + RISCV_PTR test_kprobes_c_jr + RISCV_PTR test_kprobes_c_jalr + RISCV_PTR test_kprobes_c_beqz + RISCV_PTR test_kprobes_c_bnez +#endif /* CONFIG_RISCV_ISA_C */ + RISCV_PTR 0 +SYM_DATA_END(test_kprobes_functions) diff --git a/arch/riscv/kernel/tests/kprobes/test-kprobes.c b/arch/riscv/kernel/tests/kprobes/test-kprobes.c new file mode 100644 index 000000000000..6f6cdfbf5a95 --- /dev/null +++ b/arch/riscv/kernel/tests/kprobes/test-kprobes.c @@ -0,0 +1,56 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include <linux/kernel.h> +#include <linux/kprobes.h> +#include <kunit/test.h> +#include "test-kprobes.h" + +static int kprobe_dummy_handler(struct kprobe *kp, struct pt_regs *regs) +{ + return 0; +} + +static void test_kprobe_riscv(struct kunit *test) +{ + unsigned int num_kprobe = 0; + long (*func)(void); + struct kprobe *kp; + int i; + + while (test_kprobes_addresses[num_kprobe]) + num_kprobe++; + + kp = kcalloc(num_kprobe, sizeof(*kp), GFP_KERNEL); + KUNIT_EXPECT_TRUE(test, kp); + if (!kp) + return; + + for (i = 0; i < num_kprobe; ++i) { + kp[i].addr = test_kprobes_addresses[i]; + kp[i].pre_handler = kprobe_dummy_handler; + KUNIT_EXPECT_EQ(test, 0, register_kprobe(&kp[i])); + } + + for (i = 0;; ++i) { + func = test_kprobes_functions[i]; + if (!func) + break; + KUNIT_EXPECT_EQ_MSG(test, KPROBE_TEST_MAGIC, func(), "function %d broken", i); + } + + for (i = 0; i < num_kprobe; ++i) + unregister_kprobe(&kp[i]); + kfree(kp); +} + +static struct kunit_case kprobes_testcases[] = { + KUNIT_CASE(test_kprobe_riscv), + {} +}; + +static struct kunit_suite kprobes_test_suite = { + .name = "kprobes_test_riscv", + .test_cases = kprobes_testcases, +}; + +kunit_test_suites(&kprobes_test_suite); diff --git a/arch/riscv/kernel/tests/kprobes/test-kprobes.h b/arch/riscv/kernel/tests/kprobes/test-kprobes.h new file mode 100644 index 000000000000..3886ab491ecb --- /dev/null +++ b/arch/riscv/kernel/tests/kprobes/test-kprobes.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +#ifndef TEST_KPROBES_H +#define TEST_KPROBES_H + +/* + * The magic value that all the functions in the test_kprobes_functions array return. The test + * installs kprobes into these functions, and verify that the functions still correctly return this + * value. + */ +#define KPROBE_TEST_MAGIC 0xcafebabe +#define KPROBE_TEST_MAGIC_LOWER 0x0000babe +#define KPROBE_TEST_MAGIC_UPPER 0xcafe0000 + +#ifndef __ASSEMBLY__ + +/* array of addresses to install kprobes */ +extern void *test_kprobes_addresses[]; + +/* array of functions that return KPROBE_TEST_MAGIC */ +extern long (*test_kprobes_functions[])(void); + +#endif /* __ASSEMBLY__ */ + +#endif /* TEST_KPROBES_H */ diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c index f760e4fcc052..2a27d3ff4ac6 100644 --- a/arch/riscv/kernel/traps_misaligned.c +++ b/arch/riscv/kernel/traps_misaligned.c @@ -18,149 +18,7 @@ #include <asm/cpufeature.h> #include <asm/sbi.h> #include <asm/vector.h> - -#define INSN_MATCH_LB 0x3 -#define INSN_MASK_LB 0x707f -#define INSN_MATCH_LH 0x1003 -#define INSN_MASK_LH 0x707f -#define INSN_MATCH_LW 0x2003 -#define INSN_MASK_LW 0x707f -#define INSN_MATCH_LD 0x3003 -#define INSN_MASK_LD 0x707f -#define INSN_MATCH_LBU 0x4003 -#define INSN_MASK_LBU 0x707f -#define INSN_MATCH_LHU 0x5003 -#define INSN_MASK_LHU 0x707f -#define INSN_MATCH_LWU 0x6003 -#define INSN_MASK_LWU 0x707f -#define INSN_MATCH_SB 0x23 -#define INSN_MASK_SB 0x707f -#define INSN_MATCH_SH 0x1023 -#define INSN_MASK_SH 0x707f -#define INSN_MATCH_SW 0x2023 -#define INSN_MASK_SW 0x707f -#define INSN_MATCH_SD 0x3023 -#define INSN_MASK_SD 0x707f - -#define INSN_MATCH_FLW 0x2007 -#define INSN_MASK_FLW 0x707f -#define INSN_MATCH_FLD 0x3007 -#define INSN_MASK_FLD 0x707f -#define INSN_MATCH_FLQ 0x4007 -#define INSN_MASK_FLQ 0x707f -#define INSN_MATCH_FSW 0x2027 -#define INSN_MASK_FSW 0x707f -#define INSN_MATCH_FSD 0x3027 -#define INSN_MASK_FSD 0x707f -#define INSN_MATCH_FSQ 0x4027 -#define INSN_MASK_FSQ 0x707f - -#define INSN_MATCH_C_LD 0x6000 -#define INSN_MASK_C_LD 0xe003 -#define INSN_MATCH_C_SD 0xe000 -#define INSN_MASK_C_SD 0xe003 -#define INSN_MATCH_C_LW 0x4000 -#define INSN_MASK_C_LW 0xe003 -#define INSN_MATCH_C_SW 0xc000 -#define INSN_MASK_C_SW 0xe003 -#define INSN_MATCH_C_LDSP 0x6002 -#define INSN_MASK_C_LDSP 0xe003 -#define INSN_MATCH_C_SDSP 0xe002 -#define INSN_MASK_C_SDSP 0xe003 -#define INSN_MATCH_C_LWSP 0x4002 -#define INSN_MASK_C_LWSP 0xe003 -#define INSN_MATCH_C_SWSP 0xc002 -#define INSN_MASK_C_SWSP 0xe003 - -#define INSN_MATCH_C_FLD 0x2000 -#define INSN_MASK_C_FLD 0xe003 -#define INSN_MATCH_C_FLW 0x6000 -#define INSN_MASK_C_FLW 0xe003 -#define INSN_MATCH_C_FSD 0xa000 -#define INSN_MASK_C_FSD 0xe003 -#define INSN_MATCH_C_FSW 0xe000 -#define INSN_MASK_C_FSW 0xe003 -#define INSN_MATCH_C_FLDSP 0x2002 -#define INSN_MASK_C_FLDSP 0xe003 -#define INSN_MATCH_C_FSDSP 0xa002 -#define INSN_MASK_C_FSDSP 0xe003 -#define INSN_MATCH_C_FLWSP 0x6002 -#define INSN_MASK_C_FLWSP 0xe003 -#define INSN_MATCH_C_FSWSP 0xe002 -#define INSN_MASK_C_FSWSP 0xe003 - -#define INSN_MATCH_C_LHU 0x8400 -#define INSN_MASK_C_LHU 0xfc43 -#define INSN_MATCH_C_LH 0x8440 -#define INSN_MASK_C_LH 0xfc43 -#define INSN_MATCH_C_SH 0x8c00 -#define INSN_MASK_C_SH 0xfc43 - -#define INSN_LEN(insn) ((((insn) & 0x3) < 0x3) ? 2 : 4) - -#if defined(CONFIG_64BIT) -#define LOG_REGBYTES 3 -#define XLEN 64 -#else -#define LOG_REGBYTES 2 -#define XLEN 32 -#endif -#define REGBYTES (1 << LOG_REGBYTES) -#define XLEN_MINUS_16 ((XLEN) - 16) - -#define SH_RD 7 -#define SH_RS1 15 -#define SH_RS2 20 -#define SH_RS2C 2 - -#define RV_X(x, s, n) (((x) >> (s)) & ((1 << (n)) - 1)) -#define RVC_LW_IMM(x) ((RV_X(x, 6, 1) << 2) | \ - (RV_X(x, 10, 3) << 3) | \ - (RV_X(x, 5, 1) << 6)) -#define RVC_LD_IMM(x) ((RV_X(x, 10, 3) << 3) | \ - (RV_X(x, 5, 2) << 6)) -#define RVC_LWSP_IMM(x) ((RV_X(x, 4, 3) << 2) | \ - (RV_X(x, 12, 1) << 5) | \ - (RV_X(x, 2, 2) << 6)) -#define RVC_LDSP_IMM(x) ((RV_X(x, 5, 2) << 3) | \ - (RV_X(x, 12, 1) << 5) | \ - (RV_X(x, 2, 3) << 6)) -#define RVC_SWSP_IMM(x) ((RV_X(x, 9, 4) << 2) | \ - (RV_X(x, 7, 2) << 6)) -#define RVC_SDSP_IMM(x) ((RV_X(x, 10, 3) << 3) | \ - (RV_X(x, 7, 3) << 6)) -#define RVC_RS1S(insn) (8 + RV_X(insn, SH_RD, 3)) -#define RVC_RS2S(insn) (8 + RV_X(insn, SH_RS2C, 3)) -#define RVC_RS2(insn) RV_X(insn, SH_RS2C, 5) - -#define SHIFT_RIGHT(x, y) \ - ((y) < 0 ? ((x) << -(y)) : ((x) >> (y))) - -#define REG_MASK \ - ((1 << (5 + LOG_REGBYTES)) - (1 << LOG_REGBYTES)) - -#define REG_OFFSET(insn, pos) \ - (SHIFT_RIGHT((insn), (pos) - LOG_REGBYTES) & REG_MASK) - -#define REG_PTR(insn, pos, regs) \ - (ulong *)((ulong)(regs) + REG_OFFSET(insn, pos)) - -#define GET_RS1(insn, regs) (*REG_PTR(insn, SH_RS1, regs)) -#define GET_RS2(insn, regs) (*REG_PTR(insn, SH_RS2, regs)) -#define GET_RS1S(insn, regs) (*REG_PTR(RVC_RS1S(insn), 0, regs)) -#define GET_RS2S(insn, regs) (*REG_PTR(RVC_RS2S(insn), 0, regs)) -#define GET_RS2C(insn, regs) (*REG_PTR(insn, SH_RS2C, regs)) -#define GET_SP(regs) (*REG_PTR(2, 0, regs)) -#define SET_RD(insn, regs, val) (*REG_PTR(insn, SH_RD, regs) = (val)) -#define IMM_I(insn) ((s32)(insn) >> 20) -#define IMM_S(insn) (((s32)(insn) >> 25 << 5) | \ - (s32)(((insn) >> 7) & 0x1f)) -#define MASK_FUNCT3 0x7000 - -#define GET_PRECISION(insn) (((insn) >> 25) & 3) -#define GET_RM(insn) (((insn) >> 12) & 7) -#define PRECISION_S 0 -#define PRECISION_D 1 +#include <asm/insn.h> #ifdef CONFIG_FPU diff --git a/arch/riscv/kernel/vector.c b/arch/riscv/kernel/vector.c index 184f780c932d..901e67adf576 100644 --- a/arch/riscv/kernel/vector.c +++ b/arch/riscv/kernel/vector.c @@ -93,7 +93,7 @@ bool insn_is_vector(u32 insn_buf) return true; case RVV_OPCODE_VL: case RVV_OPCODE_VS: - width = RVV_EXRACT_VL_VS_WIDTH(insn_buf); + width = RVV_EXTRACT_VL_VS_WIDTH(insn_buf); if (width == RVV_VL_VS_WIDTH_8 || width == RVV_VL_VS_WIDTH_16 || width == RVV_VL_VS_WIDTH_32 || width == RVV_VL_VS_WIDTH_64) return true; diff --git a/arch/riscv/kernel/vendor_extensions.c b/arch/riscv/kernel/vendor_extensions.c index 92d8ff81f42c..bb4a75923685 100644 --- a/arch/riscv/kernel/vendor_extensions.c +++ b/arch/riscv/kernel/vendor_extensions.c @@ -6,6 +6,7 @@ #include <asm/vendorid_list.h> #include <asm/vendor_extensions.h> #include <asm/vendor_extensions/andes.h> +#include <asm/vendor_extensions/mips.h> #include <asm/vendor_extensions/sifive.h> #include <asm/vendor_extensions/thead.h> @@ -16,6 +17,9 @@ struct riscv_isa_vendor_ext_data_list *riscv_isa_vendor_ext_list[] = { #ifdef CONFIG_RISCV_ISA_VENDOR_EXT_ANDES &riscv_isa_vendor_ext_list_andes, #endif +#ifdef CONFIG_RISCV_ISA_VENDOR_EXT_MIPS + &riscv_isa_vendor_ext_list_mips, +#endif #ifdef CONFIG_RISCV_ISA_VENDOR_EXT_SIFIVE &riscv_isa_vendor_ext_list_sifive, #endif @@ -49,6 +53,12 @@ bool __riscv_isa_vendor_extension_available(int cpu, unsigned long vendor, unsig cpu_bmap = riscv_isa_vendor_ext_list_andes.per_hart_isa_bitmap; break; #endif + #ifdef CONFIG_RISCV_ISA_VENDOR_EXT_MIPS + case MIPS_VENDOR_ID: + bmap = &riscv_isa_vendor_ext_list_mips.all_harts_isa_bitmap; + cpu_bmap = riscv_isa_vendor_ext_list_mips.per_hart_isa_bitmap; + break; + #endif #ifdef CONFIG_RISCV_ISA_VENDOR_EXT_SIFIVE case SIFIVE_VENDOR_ID: bmap = &riscv_isa_vendor_ext_list_sifive.all_harts_isa_bitmap; diff --git a/arch/riscv/kernel/vendor_extensions/Makefile b/arch/riscv/kernel/vendor_extensions/Makefile index a4eca96d1c8a..bf116c82b6bd 100644 --- a/arch/riscv/kernel/vendor_extensions/Makefile +++ b/arch/riscv/kernel/vendor_extensions/Makefile @@ -1,6 +1,8 @@ # SPDX-License-Identifier: GPL-2.0-only obj-$(CONFIG_RISCV_ISA_VENDOR_EXT_ANDES) += andes.o +obj-$(CONFIG_RISCV_ISA_VENDOR_EXT_MIPS) += mips.o +obj-$(CONFIG_RISCV_ISA_VENDOR_EXT_MIPS) += mips_hwprobe.o obj-$(CONFIG_RISCV_ISA_VENDOR_EXT_SIFIVE) += sifive.o obj-$(CONFIG_RISCV_ISA_VENDOR_EXT_SIFIVE) += sifive_hwprobe.o obj-$(CONFIG_RISCV_ISA_VENDOR_EXT_THEAD) += thead.o diff --git a/arch/riscv/kernel/vendor_extensions/mips.c b/arch/riscv/kernel/vendor_extensions/mips.c new file mode 100644 index 000000000000..f691129f96c2 --- /dev/null +++ b/arch/riscv/kernel/vendor_extensions/mips.c @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2025 MIPS. + */ + +#include <asm/cpufeature.h> +#include <asm/vendor_extensions.h> +#include <asm/vendor_extensions/mips.h> + +#include <linux/array_size.h> +#include <linux/cpumask.h> +#include <linux/types.h> + +/* All MIPS vendor extensions supported in Linux */ +static const struct riscv_isa_ext_data riscv_isa_vendor_ext_mips[] = { + __RISCV_ISA_EXT_DATA(xmipsexectl, RISCV_ISA_VENDOR_EXT_XMIPSEXECTL), +}; + +struct riscv_isa_vendor_ext_data_list riscv_isa_vendor_ext_list_mips = { + .ext_data_count = ARRAY_SIZE(riscv_isa_vendor_ext_mips), + .ext_data = riscv_isa_vendor_ext_mips, +}; diff --git a/arch/riscv/kernel/vendor_extensions/mips_hwprobe.c b/arch/riscv/kernel/vendor_extensions/mips_hwprobe.c new file mode 100644 index 000000000000..dc213a2ca70d --- /dev/null +++ b/arch/riscv/kernel/vendor_extensions/mips_hwprobe.c @@ -0,0 +1,23 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2025 MIPS. + */ + +#include <asm/vendor_extensions.h> +#include <asm/vendor_extensions/mips.h> +#include <asm/vendor_extensions/mips_hwprobe.h> +#include <asm/vendor_extensions/vendor_hwprobe.h> + +#include <linux/cpumask.h> +#include <linux/types.h> + +#include <uapi/asm/hwprobe.h> +#include <uapi/asm/vendor/mips.h> + +void hwprobe_isa_vendor_ext_mips_0(struct riscv_hwprobe *pair, + const struct cpumask *cpus) +{ + VENDOR_EXTENSION_SUPPORTED(pair, cpus, + riscv_isa_vendor_ext_list_mips.per_hart_isa_bitmap, + { VENDOR_EXT_KEY(XMIPSEXECTL); }); +} diff --git a/arch/riscv/kvm/vcpu_insn.c b/arch/riscv/kvm/vcpu_insn.c index 97dec18e6989..de1f96ea6225 100644 --- a/arch/riscv/kvm/vcpu_insn.c +++ b/arch/riscv/kvm/vcpu_insn.c @@ -8,133 +8,7 @@ #include <linux/kvm_host.h> #include <asm/cpufeature.h> - -#define INSN_OPCODE_MASK 0x007c -#define INSN_OPCODE_SHIFT 2 -#define INSN_OPCODE_SYSTEM 28 - -#define INSN_MASK_WFI 0xffffffff -#define INSN_MATCH_WFI 0x10500073 - -#define INSN_MASK_WRS 0xffffffff -#define INSN_MATCH_WRS 0x00d00073 - -#define INSN_MATCH_CSRRW 0x1073 -#define INSN_MASK_CSRRW 0x707f -#define INSN_MATCH_CSRRS 0x2073 -#define INSN_MASK_CSRRS 0x707f -#define INSN_MATCH_CSRRC 0x3073 -#define INSN_MASK_CSRRC 0x707f -#define INSN_MATCH_CSRRWI 0x5073 -#define INSN_MASK_CSRRWI 0x707f -#define INSN_MATCH_CSRRSI 0x6073 -#define INSN_MASK_CSRRSI 0x707f -#define INSN_MATCH_CSRRCI 0x7073 -#define INSN_MASK_CSRRCI 0x707f - -#define INSN_MATCH_LB 0x3 -#define INSN_MASK_LB 0x707f -#define INSN_MATCH_LH 0x1003 -#define INSN_MASK_LH 0x707f -#define INSN_MATCH_LW 0x2003 -#define INSN_MASK_LW 0x707f -#define INSN_MATCH_LD 0x3003 -#define INSN_MASK_LD 0x707f -#define INSN_MATCH_LBU 0x4003 -#define INSN_MASK_LBU 0x707f -#define INSN_MATCH_LHU 0x5003 -#define INSN_MASK_LHU 0x707f -#define INSN_MATCH_LWU 0x6003 -#define INSN_MASK_LWU 0x707f -#define INSN_MATCH_SB 0x23 -#define INSN_MASK_SB 0x707f -#define INSN_MATCH_SH 0x1023 -#define INSN_MASK_SH 0x707f -#define INSN_MATCH_SW 0x2023 -#define INSN_MASK_SW 0x707f -#define INSN_MATCH_SD 0x3023 -#define INSN_MASK_SD 0x707f - -#define INSN_MATCH_C_LD 0x6000 -#define INSN_MASK_C_LD 0xe003 -#define INSN_MATCH_C_SD 0xe000 -#define INSN_MASK_C_SD 0xe003 -#define INSN_MATCH_C_LW 0x4000 -#define INSN_MASK_C_LW 0xe003 -#define INSN_MATCH_C_SW 0xc000 -#define INSN_MASK_C_SW 0xe003 -#define INSN_MATCH_C_LDSP 0x6002 -#define INSN_MASK_C_LDSP 0xe003 -#define INSN_MATCH_C_SDSP 0xe002 -#define INSN_MASK_C_SDSP 0xe003 -#define INSN_MATCH_C_LWSP 0x4002 -#define INSN_MASK_C_LWSP 0xe003 -#define INSN_MATCH_C_SWSP 0xc002 -#define INSN_MASK_C_SWSP 0xe003 - -#define INSN_16BIT_MASK 0x3 - -#define INSN_IS_16BIT(insn) (((insn) & INSN_16BIT_MASK) != INSN_16BIT_MASK) - -#define INSN_LEN(insn) (INSN_IS_16BIT(insn) ? 2 : 4) - -#ifdef CONFIG_64BIT -#define LOG_REGBYTES 3 -#else -#define LOG_REGBYTES 2 -#endif -#define REGBYTES (1 << LOG_REGBYTES) - -#define SH_RD 7 -#define SH_RS1 15 -#define SH_RS2 20 -#define SH_RS2C 2 -#define MASK_RX 0x1f - -#define RV_X(x, s, n) (((x) >> (s)) & ((1 << (n)) - 1)) -#define RVC_LW_IMM(x) ((RV_X(x, 6, 1) << 2) | \ - (RV_X(x, 10, 3) << 3) | \ - (RV_X(x, 5, 1) << 6)) -#define RVC_LD_IMM(x) ((RV_X(x, 10, 3) << 3) | \ - (RV_X(x, 5, 2) << 6)) -#define RVC_LWSP_IMM(x) ((RV_X(x, 4, 3) << 2) | \ - (RV_X(x, 12, 1) << 5) | \ - (RV_X(x, 2, 2) << 6)) -#define RVC_LDSP_IMM(x) ((RV_X(x, 5, 2) << 3) | \ - (RV_X(x, 12, 1) << 5) | \ - (RV_X(x, 2, 3) << 6)) -#define RVC_SWSP_IMM(x) ((RV_X(x, 9, 4) << 2) | \ - (RV_X(x, 7, 2) << 6)) -#define RVC_SDSP_IMM(x) ((RV_X(x, 10, 3) << 3) | \ - (RV_X(x, 7, 3) << 6)) -#define RVC_RS1S(insn) (8 + RV_X(insn, SH_RD, 3)) -#define RVC_RS2S(insn) (8 + RV_X(insn, SH_RS2C, 3)) -#define RVC_RS2(insn) RV_X(insn, SH_RS2C, 5) - -#define SHIFT_RIGHT(x, y) \ - ((y) < 0 ? ((x) << -(y)) : ((x) >> (y))) - -#define REG_MASK \ - ((1 << (5 + LOG_REGBYTES)) - (1 << LOG_REGBYTES)) - -#define REG_OFFSET(insn, pos) \ - (SHIFT_RIGHT((insn), (pos) - LOG_REGBYTES) & REG_MASK) - -#define REG_PTR(insn, pos, regs) \ - ((ulong *)((ulong)(regs) + REG_OFFSET(insn, pos))) - -#define GET_FUNCT3(insn) (((insn) >> 12) & 7) - -#define GET_RS1(insn, regs) (*REG_PTR(insn, SH_RS1, regs)) -#define GET_RS2(insn, regs) (*REG_PTR(insn, SH_RS2, regs)) -#define GET_RS1S(insn, regs) (*REG_PTR(RVC_RS1S(insn), 0, regs)) -#define GET_RS2S(insn, regs) (*REG_PTR(RVC_RS2S(insn), 0, regs)) -#define GET_RS2C(insn, regs) (*REG_PTR(insn, SH_RS2C, regs)) -#define GET_SP(regs) (*REG_PTR(2, 0, regs)) -#define SET_RD(insn, regs, val) (*REG_PTR(insn, SH_RD, regs) = (val)) -#define IMM_I(insn) ((s32)(insn) >> 20) -#define IMM_S(insn) (((s32)(insn) >> 25 << 5) | \ - (s32)(((insn) >> 7) & 0x1f)) +#include <asm/insn.h> struct insn_func { unsigned long mask; diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index 15683ae13fa5..6091f3f06fa3 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -23,6 +23,7 @@ #include <linux/kfence.h> #include <linux/execmem.h> +#include <asm/alternative.h> #include <asm/fixmap.h> #include <asm/io.h> #include <asm/kasan.h> @@ -816,6 +817,7 @@ static __meminit pgprot_t pgprot_from_va(uintptr_t va) #if defined(CONFIG_64BIT) && !defined(CONFIG_XIP_KERNEL) u64 __pi_set_satp_mode_from_cmdline(uintptr_t dtb_pa); +u64 __pi_set_satp_mode_from_fdt(uintptr_t dtb_pa); static void __init disable_pgtable_l5(void) { @@ -855,18 +857,22 @@ static void __init set_mmap_rnd_bits_max(void) * underlying hardware: establish 1:1 mapping in 4-level page table mode * then read SATP to see if the configuration was taken into account * meaning sv48 is supported. + * The maximum SATP mode is limited by both the command line and the "mmu-type" + * property in the device tree, since some platforms may hang if an unsupported + * SATP mode is attempted. */ static __init void set_satp_mode(uintptr_t dtb_pa) { u64 identity_satp, hw_satp; uintptr_t set_satp_mode_pmd = ((unsigned long)set_satp_mode) & PMD_MASK; - u64 satp_mode_cmdline = __pi_set_satp_mode_from_cmdline(dtb_pa); + u64 satp_mode_limit = min_not_zero(__pi_set_satp_mode_from_cmdline(dtb_pa), + __pi_set_satp_mode_from_fdt(dtb_pa)); kernel_map.page_offset = PAGE_OFFSET_L5; - if (satp_mode_cmdline == SATP_MODE_57) { + if (satp_mode_limit == SATP_MODE_48) { disable_pgtable_l5(); - } else if (satp_mode_cmdline == SATP_MODE_48) { + } else if (satp_mode_limit == SATP_MODE_39) { disable_pgtable_l5(); disable_pgtable_l4(); return; diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c index 9883a55d61b5..206b2b8552a7 100644 --- a/arch/riscv/net/bpf_jit_comp64.c +++ b/arch/riscv/net/bpf_jit_comp64.c @@ -18,7 +18,7 @@ #define RV_MAX_REG_ARGS 8 #define RV_FENTRY_NINSNS 2 #define RV_FENTRY_NBYTES (RV_FENTRY_NINSNS * 4) -#define RV_KCFI_NINSNS (IS_ENABLED(CONFIG_CFI_CLANG) ? 1 : 0) +#define RV_KCFI_NINSNS (IS_ENABLED(CONFIG_CFI) ? 1 : 0) /* imm that allows emit_imm to emit max count insns */ #define RV_MAX_COUNT_IMM 0x7FFF7FF7FF7FF7FF @@ -469,7 +469,7 @@ static int emit_call(u64 addr, bool fixed_addr, struct rv_jit_context *ctx) static inline void emit_kcfi(u32 hash, struct rv_jit_context *ctx) { - if (IS_ENABLED(CONFIG_CFI_CLANG)) + if (IS_ENABLED(CONFIG_CFI)) emit(hash, ctx); } diff --git a/arch/riscv/purgatory/Makefile b/arch/riscv/purgatory/Makefile index 240592e3f5c2..530e497ca2f9 100644 --- a/arch/riscv/purgatory/Makefile +++ b/arch/riscv/purgatory/Makefile @@ -71,7 +71,7 @@ ifdef CONFIG_STACKPROTECTOR_STRONG PURGATORY_CFLAGS_REMOVE += -fstack-protector-strong endif -ifdef CONFIG_CFI_CLANG +ifdef CONFIG_CFI PURGATORY_CFLAGS_REMOVE += $(CC_FLAGS_CFI) endif |