summaryrefslogtreecommitdiff
path: root/arch/arm64/include/asm/gcs.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/include/asm/gcs.h')
-rw-r--r--arch/arm64/include/asm/gcs.h91
1 files changed, 90 insertions, 1 deletions
diff --git a/arch/arm64/include/asm/gcs.h b/arch/arm64/include/asm/gcs.h
index 5bc432234d3a..8fa0707069e8 100644
--- a/arch/arm64/include/asm/gcs.h
+++ b/arch/arm64/include/asm/gcs.h
@@ -21,7 +21,7 @@ static inline void gcsstr(u64 *addr, u64 val)
register u64 *_addr __asm__ ("x0") = addr;
register long _val __asm__ ("x1") = val;
- /* GCSSTTR x1, x0 */
+ /* GCSSTTR x1, [x0] */
asm volatile(
".inst 0xd91f1c01\n"
:
@@ -81,6 +81,82 @@ static inline int gcs_check_locked(struct task_struct *task,
return 0;
}
+static inline int gcssttr(unsigned long __user *addr, unsigned long val)
+{
+ register unsigned long __user *_addr __asm__ ("x0") = addr;
+ register unsigned long _val __asm__ ("x1") = val;
+ int err = 0;
+
+ /* GCSSTTR x1, [x0] */
+ asm volatile(
+ "1: .inst 0xd91f1c01\n"
+ "2: \n"
+ _ASM_EXTABLE_UACCESS_ERR(1b, 2b, %w0)
+ : "+r" (err)
+ : "rZ" (_val), "r" (_addr)
+ : "memory");
+
+ return err;
+}
+
+static inline void put_user_gcs(unsigned long val, unsigned long __user *addr,
+ int *err)
+{
+ int ret;
+
+ if (!access_ok((char __user *)addr, sizeof(u64))) {
+ *err = -EFAULT;
+ return;
+ }
+
+ uaccess_ttbr0_enable();
+ ret = gcssttr(addr, val);
+ if (ret != 0)
+ *err = ret;
+ uaccess_ttbr0_disable();
+}
+
+static inline void push_user_gcs(unsigned long val, int *err)
+{
+ u64 gcspr = read_sysreg_s(SYS_GCSPR_EL0);
+
+ gcspr -= sizeof(u64);
+ put_user_gcs(val, (unsigned long __user *)gcspr, err);
+ if (!*err)
+ write_sysreg_s(gcspr, SYS_GCSPR_EL0);
+}
+
+/*
+ * Unlike put/push_user_gcs() above, get/pop_user_gsc() doesn't
+ * validate the GCS permission is set on the page being read. This
+ * differs from how the hardware works when it consumes data stored at
+ * GCSPR. Callers should ensure this is acceptable.
+ */
+static inline u64 get_user_gcs(unsigned long __user *addr, int *err)
+{
+ unsigned long ret;
+ u64 load = 0;
+
+ /* Ensure previous GCS operation are visible before we read the page */
+ gcsb_dsync();
+ ret = copy_from_user(&load, addr, sizeof(load));
+ if (ret != 0)
+ *err = ret;
+ return load;
+}
+
+static inline u64 pop_user_gcs(int *err)
+{
+ u64 gcspr = read_sysreg_s(SYS_GCSPR_EL0);
+ u64 read_val;
+
+ read_val = get_user_gcs((__force unsigned long __user *)gcspr, err);
+ if (!*err)
+ write_sysreg_s(gcspr + sizeof(u64), SYS_GCSPR_EL0);
+
+ return read_val;
+}
+
#else
static inline bool task_gcs_el0_enabled(struct task_struct *task)
@@ -91,6 +167,10 @@ static inline bool task_gcs_el0_enabled(struct task_struct *task)
static inline void gcs_set_el0_mode(struct task_struct *task) { }
static inline void gcs_free(struct task_struct *task) { }
static inline void gcs_preserve_current_state(void) { }
+static inline void put_user_gcs(unsigned long val, unsigned long __user *addr,
+ int *err) { }
+static inline void push_user_gcs(unsigned long val, int *err) { }
+
static inline unsigned long gcs_alloc_thread_stack(struct task_struct *tsk,
const struct kernel_clone_args *args)
{
@@ -101,6 +181,15 @@ static inline int gcs_check_locked(struct task_struct *task,
{
return 0;
}
+static inline u64 get_user_gcs(unsigned long __user *addr, int *err)
+{
+ *err = -EFAULT;
+ return 0;
+}
+static inline u64 pop_user_gcs(int *err)
+{
+ return 0;
+}
#endif