summaryrefslogtreecommitdiff
path: root/arch/x86/include/asm/smap.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/include/asm/smap.h')
-rw-r--r--arch/x86/include/asm/smap.h49
1 files changed, 43 insertions, 6 deletions
diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h
index 4f84d421d1cf..977bef14a0ab 100644
--- a/arch/x86/include/asm/smap.h
+++ b/arch/x86/include/asm/smap.h
@@ -23,24 +23,61 @@
#else /* __ASSEMBLER__ */
+/*
+ * The CLAC/STAC instructions toggle the enforcement of
+ * X86_FEATURE_SMAP along with X86_FEATURE_LASS.
+ *
+ * SMAP enforcement is based on the _PAGE_BIT_USER bit in the page
+ * tables. The kernel is not allowed to touch pages with that bit set
+ * unless the AC bit is set.
+ *
+ * Use stac()/clac() when accessing userspace (_PAGE_USER) mappings,
+ * regardless of location.
+ *
+ * Note: a barrier is implicit in alternative().
+ */
+
static __always_inline void clac(void)
{
- /* Note: a barrier is implicit in alternative() */
alternative("", "clac", X86_FEATURE_SMAP);
}
static __always_inline void stac(void)
{
- /* Note: a barrier is implicit in alternative() */
alternative("", "stac", X86_FEATURE_SMAP);
}
+/*
+ * LASS enforcement is based on bit 63 of the virtual address. The
+ * kernel is not allowed to touch memory in the lower half of the
+ * virtual address space.
+ *
+ * Use lass_stac()/lass_clac() to toggle the AC bit for kernel data
+ * accesses (!_PAGE_USER) that are blocked by LASS, but not by SMAP.
+ *
+ * Even with the AC bit set, LASS will continue to block instruction
+ * fetches from the user half of the address space. To allow those,
+ * clear CR4.LASS to disable the LASS mechanism entirely.
+ *
+ * Note: a barrier is implicit in alternative().
+ */
+
+static __always_inline void lass_clac(void)
+{
+ alternative("", "clac", X86_FEATURE_LASS);
+}
+
+static __always_inline void lass_stac(void)
+{
+ alternative("", "stac", X86_FEATURE_LASS);
+}
+
static __always_inline unsigned long smap_save(void)
{
unsigned long flags;
asm volatile ("# smap_save\n\t"
- ALTERNATIVE(ANNOTATE_IGNORE_ALTERNATIVE
+ ALTERNATIVE(ANNOTATE_IGNORE_ALTERNATIVE "\n\t"
"", "pushf; pop %0; clac",
X86_FEATURE_SMAP)
: "=rm" (flags) : : "memory", "cc");
@@ -51,7 +88,7 @@ static __always_inline unsigned long smap_save(void)
static __always_inline void smap_restore(unsigned long flags)
{
asm volatile ("# smap_restore\n\t"
- ALTERNATIVE(ANNOTATE_IGNORE_ALTERNATIVE
+ ALTERNATIVE(ANNOTATE_IGNORE_ALTERNATIVE "\n\t"
"", "push %0; popf",
X86_FEATURE_SMAP)
: : "g" (flags) : "memory", "cc");
@@ -64,9 +101,9 @@ static __always_inline void smap_restore(unsigned long flags)
ALTERNATIVE("", "stac", X86_FEATURE_SMAP)
#define ASM_CLAC_UNSAFE \
- ALTERNATIVE("", ANNOTATE_IGNORE_ALTERNATIVE "clac", X86_FEATURE_SMAP)
+ ALTERNATIVE("", ANNOTATE_IGNORE_ALTERNATIVE "\n\t" "clac", X86_FEATURE_SMAP)
#define ASM_STAC_UNSAFE \
- ALTERNATIVE("", ANNOTATE_IGNORE_ALTERNATIVE "stac", X86_FEATURE_SMAP)
+ ALTERNATIVE("", ANNOTATE_IGNORE_ALTERNATIVE "\n\t" "stac", X86_FEATURE_SMAP)
#endif /* __ASSEMBLER__ */