diff options
Diffstat (limited to 'arch/riscv/kernel/signal.c')
-rw-r--r-- | arch/riscv/kernel/signal.c | 148 |
1 files changed, 127 insertions, 21 deletions
diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c index 08378fea3a11..1bcda11e0680 100644 --- a/arch/riscv/kernel/signal.c +++ b/arch/riscv/kernel/signal.c @@ -22,11 +22,13 @@ #include <asm/vector.h> #include <asm/csr.h> #include <asm/cacheflush.h> +#include <asm/usercfi.h> unsigned long signal_minsigstksz __ro_after_init; extern u32 __user_rt_sigreturn[2]; static size_t riscv_v_sc_size __ro_after_init; +static size_t riscv_zicfiss_sc_size __ro_after_init; #define DEBUG_SIG 0 @@ -68,18 +70,19 @@ static long save_fp_state(struct pt_regs *regs, #define restore_fp_state(task, regs) (0) #endif -#ifdef CONFIG_RISCV_ISA_V - -static long save_v_state(struct pt_regs *regs, void __user **sc_vec) +static long save_v_state(struct pt_regs *regs, void __user *sc_vec) { - struct __riscv_ctx_hdr __user *hdr; struct __sc_riscv_v_state __user *state; void __user *datap; long err; - hdr = *sc_vec; - /* Place state to the user's signal context space after the hdr */ - state = (struct __sc_riscv_v_state __user *)(hdr + 1); + if (!IS_ENABLED(CONFIG_RISCV_ISA_V) || + !((has_vector() || has_xtheadvector()) && + riscv_v_vstate_query(regs))) + return 0; + + /* Place state to the user's signal context spac */ + state = (struct __sc_riscv_v_state __user *)sc_vec; /* Point datap right after the end of __sc_riscv_v_state */ datap = state + 1; @@ -97,15 +100,11 @@ static long save_v_state(struct pt_regs *regs, void __user **sc_vec) err |= __put_user((__force void *)datap, &state->v_state.datap); /* Copy the whole vector content to user space datap. */ err |= __copy_to_user(datap, current->thread.vstate.datap, riscv_v_vsize); - /* Copy magic to the user space after saving all vector conetext */ - err |= __put_user(RISCV_V_MAGIC, &hdr->magic); - err |= __put_user(riscv_v_sc_size, &hdr->size); if (unlikely(err)) - return err; + return -EFAULT; - /* Only progress the sv_vec if everything has done successfully */ - *sc_vec += riscv_v_sc_size; - return 0; + /* Only return the size if everything has done successfully */ + return riscv_v_sc_size; } /* @@ -142,10 +141,80 @@ static long __restore_v_state(struct pt_regs *regs, void __user *sc_vec) */ return copy_from_user(current->thread.vstate.datap, datap, riscv_v_vsize); } -#else -#define save_v_state(task, regs) (0) -#define __restore_v_state(task, regs) (0) -#endif + +static long save_cfiss_state(struct pt_regs *regs, void __user *sc_cfi) +{ + struct __sc_riscv_cfi_state __user *state = sc_cfi; + unsigned long ss_ptr = 0; + long err = 0; + + if (!is_shstk_enabled(current)) + return 0; + + /* + * Save a pointer to shadow stack itself on shadow stack as a form of token. + * A token on shadow gives following properties + * - Safe save and restore for shadow stack switching. Any save of shadow stack + * must have had saved a token on shadow stack. Similarly any restore of shadow + * stack must check the token before restore. Since writing to shadow stack with + * address of shadow stack itself is not easily allowed. A restore without a save + * is quite difficult for an attacker to perform. + * - A natural break. A token in shadow stack provides a natural break in shadow stack + * So a single linear range can be bucketed into different shadow stack segments. Any + * sspopchk will detect the condition and fault to kernel as sw check exception. + */ + err |= save_user_shstk(current, &ss_ptr); + err |= __put_user(ss_ptr, &state->ss_ptr); + if (unlikely(err)) + return -EFAULT; + + return riscv_zicfiss_sc_size; +} + +static long __restore_cfiss_state(struct pt_regs *regs, void __user *sc_cfi) +{ + struct __sc_riscv_cfi_state __user *state = sc_cfi; + unsigned long ss_ptr = 0; + long err; + + /* + * Restore shadow stack as a form of token stored on shadow stack itself as a safe + * way to restore. + * A token on shadow gives following properties + * - Safe save and restore for shadow stack switching. Any save of shadow stack + * must have had saved a token on shadow stack. Similarly any restore of shadow + * stack must check the token before restore. Since writing to shadow stack with + * address of shadow stack itself is not easily allowed. A restore without a save + * is quite difficult for an attacker to perform. + * - A natural break. A token in shadow stack provides a natural break in shadow stack + * So a single linear range can be bucketed into different shadow stack segments. + * sspopchk will detect the condition and fault to kernel as sw check exception. + */ + err = __copy_from_user(&ss_ptr, &state->ss_ptr, sizeof(unsigned long)); + + if (unlikely(err)) + return err; + + return restore_user_shstk(current, ss_ptr); +} + +struct arch_ext_priv { + __u32 magic; + long (*save)(struct pt_regs *regs, void __user *sc_vec); +}; + +struct arch_ext_priv arch_ext_list[] = { + { + .magic = RISCV_V_MAGIC, + .save = &save_v_state, + }, + { + .magic = RISCV_ZICFISS_MAGIC, + .save = &save_cfiss_state, + }, +}; + +const size_t nr_arch_exts = ARRAY_SIZE(arch_ext_list); static long restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) @@ -195,6 +264,12 @@ static long restore_sigcontext(struct pt_regs *regs, err = __restore_v_state(regs, sc_ext_ptr); break; + case RISCV_ZICFISS_MAGIC: + if (!is_shstk_enabled(current) || size != riscv_zicfiss_sc_size) + return -EINVAL; + + err = __restore_cfiss_state(regs, sc_ext_ptr); + break; default: return -EINVAL; } @@ -216,6 +291,16 @@ static size_t get_rt_frame_size(bool cal_all) total_context_size += riscv_v_sc_size; } + if (is_shstk_enabled(current)) + total_context_size += riscv_zicfiss_sc_size; + + /* + * Preserved a __riscv_ctx_hdr for END signal context header if an + * extension uses __riscv_extra_ext_header + */ + if (total_context_size) + total_context_size += sizeof(struct __riscv_ctx_hdr); + frame_size += total_context_size; frame_size = round_up(frame_size, 16); @@ -270,7 +355,8 @@ static long setup_sigcontext(struct rt_sigframe __user *frame, { struct sigcontext __user *sc = &frame->uc.uc_mcontext; struct __riscv_ctx_hdr __user *sc_ext_ptr = &sc->sc_extdesc.hdr; - long err; + struct arch_ext_priv *arch_ext; + long err, i, ext_size; /* sc_regs is structured the same as the start of pt_regs */ err = __copy_to_user(&sc->sc_regs, regs, sizeof(sc->sc_regs)); @@ -278,8 +364,20 @@ static long setup_sigcontext(struct rt_sigframe __user *frame, if (has_fpu()) err |= save_fp_state(regs, &sc->sc_fpregs); /* Save the vector state. */ - if ((has_vector() || has_xtheadvector()) && riscv_v_vstate_query(regs)) - err |= save_v_state(regs, (void __user **)&sc_ext_ptr); + for (i = 0; i < nr_arch_exts; i++) { + arch_ext = &arch_ext_list[i]; + if (!arch_ext->save) + continue; + + ext_size = arch_ext->save(regs, sc_ext_ptr + 1); + if (ext_size <= 0) { + err |= ext_size; + } else { + err |= __put_user(arch_ext->magic, &sc_ext_ptr->magic); + err |= __put_user(ext_size, &sc_ext_ptr->size); + sc_ext_ptr = (void *)sc_ext_ptr + ext_size; + } + } /* Write zero to fp-reserved space and check it on restore_sigcontext */ err |= __put_user(0, &sc->sc_extdesc.reserved); /* And put END __riscv_ctx_hdr at the end. */ @@ -339,6 +437,11 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, #ifdef CONFIG_MMU regs->ra = (unsigned long)VDSO_SYMBOL( current->mm->context.vdso, rt_sigreturn); + + /* if bcfi is enabled x1 (ra) and x5 (t0) must match. not sure if we need this? */ + if (is_shstk_enabled(current)) + regs->t0 = regs->ra; + #else /* * For the nommu case we don't have a VDSO. Instead we push two @@ -467,6 +570,9 @@ void __init init_rt_signal_env(void) { riscv_v_sc_size = sizeof(struct __riscv_ctx_hdr) + sizeof(struct __sc_riscv_v_state) + riscv_v_vsize; + + riscv_zicfiss_sc_size = sizeof(struct __riscv_ctx_hdr) + + sizeof(struct __sc_riscv_cfi_state); /* * Determine the stack space required for guaranteed signal delivery. * The signal_minsigstksz will be populated into the AT_MINSIGSTKSZ entry |