summaryrefslogtreecommitdiff
path: root/mm/kasan
diff options
context:
space:
mode:
Diffstat (limited to 'mm/kasan')
-rw-r--r--mm/kasan/common.c22
-rw-r--r--mm/kasan/generic.c19
-rw-r--r--mm/kasan/hw_tags.c54
-rw-r--r--mm/kasan/init.c16
-rw-r--r--mm/kasan/kasan.h15
-rw-r--r--mm/kasan/kasan_test_c.c249
-rw-r--r--mm/kasan/shadow.c65
-rw-r--r--mm/kasan/sw_tags.c1
-rw-r--r--mm/kasan/tags.c2
9 files changed, 322 insertions, 121 deletions
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 9142964ab9c9..d4c14359feaf 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -32,6 +32,15 @@
#include "kasan.h"
#include "../slab.h"
+#if defined(CONFIG_ARCH_DEFER_KASAN) || defined(CONFIG_KASAN_HW_TAGS)
+/*
+ * Definition of the unified static key declared in kasan-enabled.h.
+ * This provides consistent runtime enable/disable across KASAN modes.
+ */
+DEFINE_STATIC_KEY_FALSE(kasan_flag_enabled);
+EXPORT_SYMBOL_GPL(kasan_flag_enabled);
+#endif
+
struct slab *kasan_addr_to_slab(const void *addr)
{
if (virt_addr_valid(addr))
@@ -246,15 +255,15 @@ static inline void poison_slab_object(struct kmem_cache *cache, void *object,
bool __kasan_slab_pre_free(struct kmem_cache *cache, void *object,
unsigned long ip)
{
- if (!kasan_arch_is_ready() || is_kfence_address(object))
+ if (is_kfence_address(object))
return false;
return check_slab_allocation(cache, object, ip);
}
bool __kasan_slab_free(struct kmem_cache *cache, void *object, bool init,
- bool still_accessible)
+ bool still_accessible, bool no_quarantine)
{
- if (!kasan_arch_is_ready() || is_kfence_address(object))
+ if (is_kfence_address(object))
return false;
/*
@@ -274,6 +283,9 @@ bool __kasan_slab_free(struct kmem_cache *cache, void *object, bool init,
poison_slab_object(cache, object, init);
+ if (no_quarantine)
+ return false;
+
/*
* If the object is put into quarantine, do not let slab put the object
* onto the freelist for now. The object's metadata is kept until the
@@ -293,7 +305,7 @@ bool __kasan_slab_free(struct kmem_cache *cache, void *object, bool init,
static inline bool check_page_allocation(void *ptr, unsigned long ip)
{
- if (!kasan_arch_is_ready())
+ if (!kasan_enabled())
return false;
if (ptr != page_address(virt_to_head_page(ptr))) {
@@ -522,7 +534,7 @@ bool __kasan_mempool_poison_object(void *ptr, unsigned long ip)
return true;
}
- if (is_kfence_address(ptr) || !kasan_arch_is_ready())
+ if (is_kfence_address(ptr))
return true;
slab = folio_slab(folio);
diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
index d54e89f8c3e7..b413c46b3e04 100644
--- a/mm/kasan/generic.c
+++ b/mm/kasan/generic.c
@@ -37,6 +37,17 @@
#include "../slab.h"
/*
+ * Initialize Generic KASAN and enable runtime checks.
+ * This should be called from arch kasan_init() once shadow memory is ready.
+ */
+void __init kasan_init_generic(void)
+{
+ kasan_enable();
+
+ pr_info("KernelAddressSanitizer initialized (generic)\n");
+}
+
+/*
* All functions below always inlined so compiler could
* perform better optimizations in each of __asan_loadX/__assn_storeX
* depending on memory access size X.
@@ -165,7 +176,7 @@ static __always_inline bool check_region_inline(const void *addr,
size_t size, bool write,
unsigned long ret_ip)
{
- if (!kasan_arch_is_ready())
+ if (!kasan_enabled())
return true;
if (unlikely(size == 0))
@@ -193,7 +204,7 @@ bool kasan_byte_accessible(const void *addr)
{
s8 shadow_byte;
- if (!kasan_arch_is_ready())
+ if (!kasan_enabled())
return true;
shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(addr));
@@ -495,7 +506,7 @@ static void release_alloc_meta(struct kasan_alloc_meta *meta)
static void release_free_meta(const void *object, struct kasan_free_meta *meta)
{
- if (!kasan_arch_is_ready())
+ if (!kasan_enabled())
return;
/* Check if free meta is valid. */
@@ -562,7 +573,7 @@ void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
kasan_save_track(&alloc_meta->alloc_track, flags);
}
-void kasan_save_free_info(struct kmem_cache *cache, void *object)
+void __kasan_save_free_info(struct kmem_cache *cache, void *object)
{
struct kasan_free_meta *free_meta;
diff --git a/mm/kasan/hw_tags.c b/mm/kasan/hw_tags.c
index 9a6927394b54..1c373cc4b3fa 100644
--- a/mm/kasan/hw_tags.c
+++ b/mm/kasan/hw_tags.c
@@ -46,13 +46,6 @@ static enum kasan_arg_mode kasan_arg_mode __ro_after_init;
static enum kasan_arg_vmalloc kasan_arg_vmalloc __initdata;
/*
- * Whether KASAN is enabled at all.
- * The value remains false until KASAN is initialized by kasan_init_hw_tags().
- */
-DEFINE_STATIC_KEY_FALSE(kasan_flag_enabled);
-EXPORT_SYMBOL(kasan_flag_enabled);
-
-/*
* Whether the selected mode is synchronous, asynchronous, or asymmetric.
* Defaults to KASAN_MODE_SYNC.
*/
@@ -67,6 +60,9 @@ DEFINE_STATIC_KEY_FALSE(kasan_flag_vmalloc);
#endif
EXPORT_SYMBOL_GPL(kasan_flag_vmalloc);
+/* Whether to check write accesses only. */
+static bool kasan_flag_write_only = false;
+
#define PAGE_ALLOC_SAMPLE_DEFAULT 1
#define PAGE_ALLOC_SAMPLE_ORDER_DEFAULT 3
@@ -141,6 +137,23 @@ static int __init early_kasan_flag_vmalloc(char *arg)
}
early_param("kasan.vmalloc", early_kasan_flag_vmalloc);
+/* kasan.write_only=off/on */
+static int __init early_kasan_flag_write_only(char *arg)
+{
+ if (!arg)
+ return -EINVAL;
+
+ if (!strcmp(arg, "off"))
+ kasan_flag_write_only = false;
+ else if (!strcmp(arg, "on"))
+ kasan_flag_write_only = true;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+early_param("kasan.write_only", early_kasan_flag_write_only);
+
static inline const char *kasan_mode_info(void)
{
if (kasan_mode == KASAN_MODE_ASYNC)
@@ -260,12 +273,13 @@ void __init kasan_init_hw_tags(void)
kasan_init_tags();
/* KASAN is now initialized, enable it. */
- static_branch_enable(&kasan_flag_enabled);
+ kasan_enable();
- pr_info("KernelAddressSanitizer initialized (hw-tags, mode=%s, vmalloc=%s, stacktrace=%s)\n",
+ pr_info("KernelAddressSanitizer initialized (hw-tags, mode=%s, vmalloc=%s, stacktrace=%s, write_only=%s)\n",
kasan_mode_info(),
str_on_off(kasan_vmalloc_enabled()),
- str_on_off(kasan_stack_collection_enabled()));
+ str_on_off(kasan_stack_collection_enabled()),
+ str_on_off(kasan_flag_write_only));
}
#ifdef CONFIG_KASAN_VMALLOC
@@ -392,6 +406,20 @@ void kasan_enable_hw_tags(void)
hw_enable_tag_checks_asymm();
else
hw_enable_tag_checks_sync();
+
+ /*
+ * CPUs can only be in one of two states:
+ * - All CPUs support the write_only feature
+ * - No CPUs support the write_only feature
+ *
+ * If the first CPU attempts hw_enable_tag_checks_write_only() and
+ * finds the feature unsupported, kasan_flag_write_only is set to OFF
+ * to avoid further unnecessary calls on other CPUs.
+ */
+ if (kasan_flag_write_only && hw_enable_tag_checks_write_only()) {
+ kasan_flag_write_only = false;
+ pr_err_once("write-only mode is not supported and thus not enabled\n");
+ }
}
#if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST)
@@ -404,4 +432,10 @@ VISIBLE_IF_KUNIT void kasan_force_async_fault(void)
}
EXPORT_SYMBOL_IF_KUNIT(kasan_force_async_fault);
+VISIBLE_IF_KUNIT bool kasan_write_only_enabled(void)
+{
+ return kasan_flag_write_only;
+}
+EXPORT_SYMBOL_IF_KUNIT(kasan_write_only_enabled);
+
#endif
diff --git a/mm/kasan/init.c b/mm/kasan/init.c
index ced6b29fcf76..f084e7a5df1e 100644
--- a/mm/kasan/init.c
+++ b/mm/kasan/init.c
@@ -13,9 +13,9 @@
#include <linux/mm.h>
#include <linux/pfn.h>
#include <linux/slab.h>
+#include <linux/pgalloc.h>
#include <asm/page.h>
-#include <asm/pgalloc.h>
#include "kasan.h"
@@ -191,7 +191,7 @@ static int __ref zero_p4d_populate(pgd_t *pgd, unsigned long addr,
pud_t *pud;
pmd_t *pmd;
- p4d_populate(&init_mm, p4d,
+ p4d_populate_kernel(addr, p4d,
lm_alias(kasan_early_shadow_pud));
pud = pud_offset(p4d, addr);
pud_populate(&init_mm, pud,
@@ -212,7 +212,7 @@ static int __ref zero_p4d_populate(pgd_t *pgd, unsigned long addr,
} else {
p = early_alloc(PAGE_SIZE, NUMA_NO_NODE);
pud_init(p);
- p4d_populate(&init_mm, p4d, p);
+ p4d_populate_kernel(addr, p4d, p);
}
}
zero_pud_populate(p4d, addr, next);
@@ -251,10 +251,10 @@ int __ref kasan_populate_early_shadow(const void *shadow_start,
* puds,pmds, so pgd_populate(), pud_populate()
* is noops.
*/
- pgd_populate(&init_mm, pgd,
+ pgd_populate_kernel(addr, pgd,
lm_alias(kasan_early_shadow_p4d));
p4d = p4d_offset(pgd, addr);
- p4d_populate(&init_mm, p4d,
+ p4d_populate_kernel(addr, p4d,
lm_alias(kasan_early_shadow_pud));
pud = pud_offset(p4d, addr);
pud_populate(&init_mm, pud,
@@ -266,14 +266,12 @@ int __ref kasan_populate_early_shadow(const void *shadow_start,
}
if (pgd_none(*pgd)) {
- p4d_t *p;
if (slab_is_available()) {
- p = p4d_alloc(&init_mm, pgd, addr);
- if (!p)
+ if (!p4d_alloc(&init_mm, pgd, addr))
return -ENOMEM;
} else {
- pgd_populate(&init_mm, pgd,
+ pgd_populate_kernel(addr, pgd,
early_alloc(PAGE_SIZE, NUMA_NO_NODE));
}
}
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index 129178be5e64..07fa7375a848 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -398,7 +398,13 @@ depot_stack_handle_t kasan_save_stack(gfp_t flags, depot_flags_t depot_flags);
void kasan_set_track(struct kasan_track *track, depot_stack_handle_t stack);
void kasan_save_track(struct kasan_track *track, gfp_t flags);
void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags);
-void kasan_save_free_info(struct kmem_cache *cache, void *object);
+
+void __kasan_save_free_info(struct kmem_cache *cache, void *object);
+static inline void kasan_save_free_info(struct kmem_cache *cache, void *object)
+{
+ if (kasan_enabled())
+ __kasan_save_free_info(cache, object);
+}
#ifdef CONFIG_KASAN_GENERIC
bool kasan_quarantine_put(struct kmem_cache *cache, void *object);
@@ -431,6 +437,7 @@ static inline const void *arch_kasan_set_tag(const void *addr, u8 tag)
#define hw_suppress_tag_checks_start() arch_suppress_tag_checks_start()
#define hw_suppress_tag_checks_stop() arch_suppress_tag_checks_stop()
#define hw_force_async_tag_fault() arch_force_async_tag_fault()
+#define hw_enable_tag_checks_write_only() arch_enable_tag_checks_write_only()
#define hw_get_random_tag() arch_get_random_tag()
#define hw_get_mem_tag(addr) arch_get_mem_tag(addr)
#define hw_set_mem_tag_range(addr, size, tag, init) \
@@ -451,11 +458,17 @@ void __init kasan_init_tags(void);
#if defined(CONFIG_KASAN_HW_TAGS) && IS_ENABLED(CONFIG_KASAN_KUNIT_TEST)
void kasan_force_async_fault(void);
+bool kasan_write_only_enabled(void);
#else /* CONFIG_KASAN_HW_TAGS && CONFIG_KASAN_KUNIT_TEST */
static inline void kasan_force_async_fault(void) { }
+static inline bool kasan_write_only_enabled(void)
+{
+ return false;
+}
+
#endif /* CONFIG_KASAN_HW_TAGS && CONFIG_KASAN_KUNIT_TEST */
#ifdef CONFIG_KASAN_SW_TAGS
diff --git a/mm/kasan/kasan_test_c.c b/mm/kasan/kasan_test_c.c
index 2aa12dfa427a..2cafca31b092 100644
--- a/mm/kasan/kasan_test_c.c
+++ b/mm/kasan/kasan_test_c.c
@@ -47,7 +47,7 @@ static struct {
* Some tests use these global variables to store return values from function
* calls that could otherwise be eliminated by the compiler as dead code.
*/
-static volatile void *kasan_ptr_result;
+static void *volatile kasan_ptr_result;
static volatile int kasan_int_result;
/* Probe for console output: obtains test_status lines of interest. */
@@ -94,11 +94,14 @@ static void kasan_test_exit(struct kunit *test)
}
/**
- * KUNIT_EXPECT_KASAN_FAIL - check that the executed expression produces a
- * KASAN report; causes a KUnit test failure otherwise.
+ * KUNIT_EXPECT_KASAN_RESULT - checks whether the executed expression
+ * produces a KASAN report; causes a KUnit test failure when the result
+ * is different from @fail.
*
* @test: Currently executing KUnit test.
- * @expression: Expression that must produce a KASAN report.
+ * @expr: Expression to be tested.
+ * @expr_str: Expression to be tested encoded as a string.
+ * @fail: Whether expression should produce a KASAN report.
*
* For hardware tag-based KASAN, when a synchronous tag fault happens, tag
* checking is auto-disabled. When this happens, this test handler reenables
@@ -110,25 +113,29 @@ static void kasan_test_exit(struct kunit *test)
* Use READ/WRITE_ONCE() for the accesses and compiler barriers around the
* expression to prevent that.
*
- * In between KUNIT_EXPECT_KASAN_FAIL checks, test_status.report_found is kept
+ * In between KUNIT_EXPECT_KASAN_RESULT checks, test_status.report_found is kept
* as false. This allows detecting KASAN reports that happen outside of the
* checks by asserting !test_status.report_found at the start of
- * KUNIT_EXPECT_KASAN_FAIL and in kasan_test_exit.
+ * KUNIT_EXPECT_KASAN_RESULT and in kasan_test_exit.
*/
-#define KUNIT_EXPECT_KASAN_FAIL(test, expression) do { \
+#define KUNIT_EXPECT_KASAN_RESULT(test, expr, expr_str, fail) \
+do { \
if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \
kasan_sync_fault_possible()) \
migrate_disable(); \
KUNIT_EXPECT_FALSE(test, READ_ONCE(test_status.report_found)); \
barrier(); \
- expression; \
+ expr; \
barrier(); \
if (kasan_async_fault_possible()) \
kasan_force_async_fault(); \
- if (!READ_ONCE(test_status.report_found)) { \
- KUNIT_FAIL(test, KUNIT_SUBTEST_INDENT "KASAN failure " \
- "expected in \"" #expression \
- "\", but none occurred"); \
+ if (READ_ONCE(test_status.report_found) != fail) { \
+ KUNIT_FAIL(test, KUNIT_SUBTEST_INDENT "KASAN failure" \
+ "%sexpected in \"" expr_str \
+ "\", but %soccurred", \
+ (fail ? " " : " not "), \
+ (test_status.report_found ? \
+ "" : "none ")); \
} \
if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \
kasan_sync_fault_possible()) { \
@@ -141,6 +148,34 @@ static void kasan_test_exit(struct kunit *test)
WRITE_ONCE(test_status.async_fault, false); \
} while (0)
+/*
+ * KUNIT_EXPECT_KASAN_FAIL - check that the executed expression produces a
+ * KASAN report; causes a KUnit test failure otherwise.
+ *
+ * @test: Currently executing KUnit test.
+ * @expr: Expression that must produce a KASAN report.
+ */
+#define KUNIT_EXPECT_KASAN_FAIL(test, expr) \
+ KUNIT_EXPECT_KASAN_RESULT(test, expr, #expr, true)
+
+/*
+ * KUNIT_EXPECT_KASAN_FAIL_READ - check that the executed expression
+ * produces a KASAN report when the write-only mode is not enabled;
+ * causes a KUnit test failure otherwise.
+ *
+ * Note: At the moment, this macro does not check whether the produced
+ * KASAN report is a report about a bad read access. It is only intended
+ * for checking the write-only KASAN mode functionality without failing
+ * KASAN tests.
+ *
+ * @test: Currently executing KUnit test.
+ * @expr: Expression that must only produce a KASAN report
+ * when the write-only mode is not enabled.
+ */
+#define KUNIT_EXPECT_KASAN_FAIL_READ(test, expr) \
+ KUNIT_EXPECT_KASAN_RESULT(test, expr, #expr, \
+ !kasan_write_only_enabled()) \
+
#define KASAN_TEST_NEEDS_CONFIG_ON(test, config) do { \
if (!IS_ENABLED(config)) \
kunit_skip((test), "Test requires " #config "=y"); \
@@ -183,8 +218,8 @@ static void kmalloc_oob_right(struct kunit *test)
KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + 5] = 'y');
/* Out-of-bounds access past the aligned kmalloc object. */
- KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] =
- ptr[size + KASAN_GRANULE_SIZE + 5]);
+ KUNIT_EXPECT_KASAN_FAIL_READ(test, ptr[0] =
+ ptr[size + KASAN_GRANULE_SIZE + 5]);
kfree(ptr);
}
@@ -198,7 +233,7 @@ static void kmalloc_oob_left(struct kunit *test)
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
OPTIMIZER_HIDE_VAR(ptr);
- KUNIT_EXPECT_KASAN_FAIL(test, *ptr = *(ptr - 1));
+ KUNIT_EXPECT_KASAN_FAIL_READ(test, *ptr = *(ptr - 1));
kfree(ptr);
}
@@ -211,7 +246,7 @@ static void kmalloc_node_oob_right(struct kunit *test)
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
OPTIMIZER_HIDE_VAR(ptr);
- KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = ptr[size]);
+ KUNIT_EXPECT_KASAN_FAIL_READ(test, ptr[0] = ptr[size]);
kfree(ptr);
}
@@ -291,7 +326,7 @@ static void kmalloc_large_uaf(struct kunit *test)
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
kfree(ptr);
- KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
+ KUNIT_EXPECT_KASAN_FAIL_READ(test, ((volatile char *)ptr)[0]);
}
static void kmalloc_large_invalid_free(struct kunit *test)
@@ -323,7 +358,7 @@ static void page_alloc_oob_right(struct kunit *test)
ptr = page_address(pages);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
- KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = ptr[size]);
+ KUNIT_EXPECT_KASAN_FAIL_READ(test, ptr[0] = ptr[size]);
free_pages((unsigned long)ptr, order);
}
@@ -338,7 +373,7 @@ static void page_alloc_uaf(struct kunit *test)
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
free_pages((unsigned long)ptr, order);
- KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
+ KUNIT_EXPECT_KASAN_FAIL_READ(test, ((volatile char *)ptr)[0]);
}
static void krealloc_more_oob_helper(struct kunit *test,
@@ -458,7 +493,7 @@ static void krealloc_uaf(struct kunit *test)
KUNIT_EXPECT_KASAN_FAIL(test, ptr2 = krealloc(ptr1, size2, GFP_KERNEL));
KUNIT_ASSERT_NULL(test, ptr2);
- KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)ptr1);
+ KUNIT_EXPECT_KASAN_FAIL_READ(test, *(volatile char *)ptr1);
}
static void kmalloc_oob_16(struct kunit *test)
@@ -501,7 +536,7 @@ static void kmalloc_uaf_16(struct kunit *test)
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
kfree(ptr2);
- KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
+ KUNIT_EXPECT_KASAN_FAIL_READ(test, *ptr1 = *ptr2);
kfree(ptr1);
}
@@ -640,8 +675,8 @@ static void kmalloc_memmove_invalid_size(struct kunit *test)
memset((char *)ptr, 0, 64);
OPTIMIZER_HIDE_VAR(ptr);
OPTIMIZER_HIDE_VAR(invalid_size);
- KUNIT_EXPECT_KASAN_FAIL(test,
- memmove((char *)ptr, (char *)ptr + 4, invalid_size));
+ KUNIT_EXPECT_KASAN_FAIL_READ(test,
+ memmove((char *)ptr, (char *)ptr + 4, invalid_size));
kfree(ptr);
}
@@ -654,7 +689,7 @@ static void kmalloc_uaf(struct kunit *test)
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
kfree(ptr);
- KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[8]);
+ KUNIT_EXPECT_KASAN_FAIL_READ(test, ((volatile char *)ptr)[8]);
}
static void kmalloc_uaf_memset(struct kunit *test)
@@ -701,7 +736,7 @@ again:
goto again;
}
- KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr1)[40]);
+ KUNIT_EXPECT_KASAN_FAIL_READ(test, ((volatile char *)ptr1)[40]);
KUNIT_EXPECT_PTR_NE(test, ptr1, ptr2);
kfree(ptr2);
@@ -727,19 +762,19 @@ static void kmalloc_uaf3(struct kunit *test)
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
kfree(ptr2);
- KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr1)[8]);
+ KUNIT_EXPECT_KASAN_FAIL_READ(test, ((volatile char *)ptr1)[8]);
}
static void kasan_atomics_helper(struct kunit *test, void *unsafe, void *safe)
{
int *i_unsafe = unsafe;
- KUNIT_EXPECT_KASAN_FAIL(test, READ_ONCE(*i_unsafe));
+ KUNIT_EXPECT_KASAN_FAIL_READ(test, READ_ONCE(*i_unsafe));
KUNIT_EXPECT_KASAN_FAIL(test, WRITE_ONCE(*i_unsafe, 42));
- KUNIT_EXPECT_KASAN_FAIL(test, smp_load_acquire(i_unsafe));
+ KUNIT_EXPECT_KASAN_FAIL_READ(test, smp_load_acquire(i_unsafe));
KUNIT_EXPECT_KASAN_FAIL(test, smp_store_release(i_unsafe, 42));
- KUNIT_EXPECT_KASAN_FAIL(test, atomic_read(unsafe));
+ KUNIT_EXPECT_KASAN_FAIL_READ(test, atomic_read(unsafe));
KUNIT_EXPECT_KASAN_FAIL(test, atomic_set(unsafe, 42));
KUNIT_EXPECT_KASAN_FAIL(test, atomic_add(42, unsafe));
KUNIT_EXPECT_KASAN_FAIL(test, atomic_sub(42, unsafe));
@@ -752,18 +787,31 @@ static void kasan_atomics_helper(struct kunit *test, void *unsafe, void *safe)
KUNIT_EXPECT_KASAN_FAIL(test, atomic_xchg(unsafe, 42));
KUNIT_EXPECT_KASAN_FAIL(test, atomic_cmpxchg(unsafe, 21, 42));
KUNIT_EXPECT_KASAN_FAIL(test, atomic_try_cmpxchg(unsafe, safe, 42));
- KUNIT_EXPECT_KASAN_FAIL(test, atomic_try_cmpxchg(safe, unsafe, 42));
+ /*
+ * The result of the test below may vary due to garbage values of
+ * unsafe in write-only mode.
+ * Therefore, skip this test when KASAN is configured in write-only mode.
+ */
+ if (!kasan_write_only_enabled())
+ KUNIT_EXPECT_KASAN_FAIL(test, atomic_try_cmpxchg(safe, unsafe, 42));
KUNIT_EXPECT_KASAN_FAIL(test, atomic_sub_and_test(42, unsafe));
KUNIT_EXPECT_KASAN_FAIL(test, atomic_dec_and_test(unsafe));
KUNIT_EXPECT_KASAN_FAIL(test, atomic_inc_and_test(unsafe));
KUNIT_EXPECT_KASAN_FAIL(test, atomic_add_negative(42, unsafe));
- KUNIT_EXPECT_KASAN_FAIL(test, atomic_add_unless(unsafe, 21, 42));
- KUNIT_EXPECT_KASAN_FAIL(test, atomic_inc_not_zero(unsafe));
- KUNIT_EXPECT_KASAN_FAIL(test, atomic_inc_unless_negative(unsafe));
- KUNIT_EXPECT_KASAN_FAIL(test, atomic_dec_unless_positive(unsafe));
- KUNIT_EXPECT_KASAN_FAIL(test, atomic_dec_if_positive(unsafe));
+ /*
+ * The result of the test below may vary due to garbage values of
+ * unsafe in write-only mode.
+ * Therefore, skip this test when KASAN is configured in write-only mode.
+ */
+ if (!kasan_write_only_enabled()) {
+ KUNIT_EXPECT_KASAN_FAIL(test, atomic_add_unless(unsafe, 21, 42));
+ KUNIT_EXPECT_KASAN_FAIL(test, atomic_inc_not_zero(unsafe));
+ KUNIT_EXPECT_KASAN_FAIL(test, atomic_inc_unless_negative(unsafe));
+ KUNIT_EXPECT_KASAN_FAIL(test, atomic_dec_unless_positive(unsafe));
+ KUNIT_EXPECT_KASAN_FAIL(test, atomic_dec_if_positive(unsafe));
+ }
- KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_read(unsafe));
+ KUNIT_EXPECT_KASAN_FAIL_READ(test, atomic_long_read(unsafe));
KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_set(unsafe, 42));
KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_add(42, unsafe));
KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_sub(42, unsafe));
@@ -776,16 +824,29 @@ static void kasan_atomics_helper(struct kunit *test, void *unsafe, void *safe)
KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_xchg(unsafe, 42));
KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_cmpxchg(unsafe, 21, 42));
KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_try_cmpxchg(unsafe, safe, 42));
- KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_try_cmpxchg(safe, unsafe, 42));
+ /*
+ * The result of the test below may vary due to garbage values of
+ * unsafe in write-only mode.
+ * Therefore, skip this test when KASAN is configured in write-only mode.
+ */
+ if (!kasan_write_only_enabled())
+ KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_try_cmpxchg(safe, unsafe, 42));
KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_sub_and_test(42, unsafe));
KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_dec_and_test(unsafe));
KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_inc_and_test(unsafe));
KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_add_negative(42, unsafe));
- KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_add_unless(unsafe, 21, 42));
- KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_inc_not_zero(unsafe));
- KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_inc_unless_negative(unsafe));
- KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_dec_unless_positive(unsafe));
- KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_dec_if_positive(unsafe));
+ /*
+ * The result of the test below may vary due to garbage values of
+ * unsafe in write-only mode.
+ * Therefore, skip this test when KASAN is configured in write-only mode.
+ */
+ if (!kasan_write_only_enabled()) {
+ KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_add_unless(unsafe, 21, 42));
+ KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_inc_not_zero(unsafe));
+ KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_inc_unless_negative(unsafe));
+ KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_dec_unless_positive(unsafe));
+ KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_dec_if_positive(unsafe));
+ }
}
static void kasan_atomics(struct kunit *test)
@@ -842,8 +903,8 @@ static void ksize_unpoisons_memory(struct kunit *test)
/* These must trigger a KASAN report. */
if (IS_ENABLED(CONFIG_KASAN_GENERIC))
KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]);
- KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size + 5]);
- KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size - 1]);
+ KUNIT_EXPECT_KASAN_FAIL_READ(test, ((volatile char *)ptr)[size + 5]);
+ KUNIT_EXPECT_KASAN_FAIL_READ(test, ((volatile char *)ptr)[real_size - 1]);
kfree(ptr);
}
@@ -863,8 +924,8 @@ static void ksize_uaf(struct kunit *test)
OPTIMIZER_HIDE_VAR(ptr);
KUNIT_EXPECT_KASAN_FAIL(test, ksize(ptr));
- KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
- KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]);
+ KUNIT_EXPECT_KASAN_FAIL_READ(test, ((volatile char *)ptr)[0]);
+ KUNIT_EXPECT_KASAN_FAIL_READ(test, ((volatile char *)ptr)[size]);
}
/*
@@ -899,9 +960,9 @@ static void rcu_uaf(struct kunit *test)
global_rcu_ptr = rcu_dereference_protected(
(struct kasan_rcu_info __rcu *)ptr, NULL);
- KUNIT_EXPECT_KASAN_FAIL(test,
- call_rcu(&global_rcu_ptr->rcu, rcu_uaf_reclaim);
- rcu_barrier());
+ KUNIT_EXPECT_KASAN_FAIL_READ(test,
+ call_rcu(&global_rcu_ptr->rcu, rcu_uaf_reclaim);
+ rcu_barrier());
}
static void workqueue_uaf_work(struct work_struct *work)
@@ -924,8 +985,8 @@ static void workqueue_uaf(struct kunit *test)
queue_work(workqueue, work);
destroy_workqueue(workqueue);
- KUNIT_EXPECT_KASAN_FAIL(test,
- ((volatile struct work_struct *)work)->data);
+ KUNIT_EXPECT_KASAN_FAIL_READ(test,
+ ((volatile struct work_struct *)work)->data);
}
static void kfree_via_page(struct kunit *test)
@@ -972,7 +1033,7 @@ static void kmem_cache_oob(struct kunit *test)
return;
}
- KUNIT_EXPECT_KASAN_FAIL(test, *p = p[size + OOB_TAG_OFF]);
+ KUNIT_EXPECT_KASAN_FAIL_READ(test, *p = p[size + OOB_TAG_OFF]);
kmem_cache_free(cache, p);
kmem_cache_destroy(cache);
@@ -1068,11 +1129,50 @@ static void kmem_cache_rcu_uaf(struct kunit *test)
*/
rcu_barrier();
- KUNIT_EXPECT_KASAN_FAIL(test, READ_ONCE(*p));
+ KUNIT_EXPECT_KASAN_FAIL_READ(test, READ_ONCE(*p));
kmem_cache_destroy(cache);
}
+/*
+ * Check that SLAB_TYPESAFE_BY_RCU objects are immediately reused when
+ * CONFIG_SLUB_RCU_DEBUG is off, and stay at the same address.
+ * Without this, KASAN builds would be unable to trigger bugs caused by
+ * SLAB_TYPESAFE_BY_RCU users handling reycled objects improperly.
+ */
+static void kmem_cache_rcu_reuse(struct kunit *test)
+{
+ char *p, *p2;
+ struct kmem_cache *cache;
+
+ KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_SLUB_RCU_DEBUG);
+
+ cache = kmem_cache_create("test_cache", 16, 0, SLAB_TYPESAFE_BY_RCU,
+ NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
+
+ migrate_disable();
+ p = kmem_cache_alloc(cache, GFP_KERNEL);
+ if (!p) {
+ kunit_err(test, "Allocation failed: %s\n", __func__);
+ goto out;
+ }
+
+ kmem_cache_free(cache, p);
+ p2 = kmem_cache_alloc(cache, GFP_KERNEL);
+ if (!p2) {
+ kunit_err(test, "Allocation failed: %s\n", __func__);
+ goto out;
+ }
+ KUNIT_EXPECT_PTR_EQ(test, p, p2);
+
+ kmem_cache_free(cache, p2);
+
+out:
+ migrate_enable();
+ kmem_cache_destroy(cache);
+}
+
static void kmem_cache_double_destroy(struct kunit *test)
{
struct kmem_cache *cache;
@@ -1207,7 +1307,7 @@ static void mempool_oob_right_helper(struct kunit *test, mempool_t *pool, size_t
KUNIT_EXPECT_KASAN_FAIL(test,
((volatile char *)&elem[size])[0]);
else
- KUNIT_EXPECT_KASAN_FAIL(test,
+ KUNIT_EXPECT_KASAN_FAIL_READ(test,
((volatile char *)&elem[round_up(size, KASAN_GRANULE_SIZE)])[0]);
mempool_free(elem, pool);
@@ -1273,7 +1373,7 @@ static void mempool_uaf_helper(struct kunit *test, mempool_t *pool, bool page)
mempool_free(elem, pool);
ptr = page ? page_address((struct page *)elem) : elem;
- KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
+ KUNIT_EXPECT_KASAN_FAIL_READ(test, ((volatile char *)ptr)[0]);
}
static void mempool_kmalloc_uaf(struct kunit *test)
@@ -1532,7 +1632,7 @@ static void kasan_memchr(struct kunit *test)
OPTIMIZER_HIDE_VAR(ptr);
OPTIMIZER_HIDE_VAR(size);
- KUNIT_EXPECT_KASAN_FAIL(test,
+ KUNIT_EXPECT_KASAN_FAIL_READ(test,
kasan_ptr_result = memchr(ptr, '1', size + 1));
kfree(ptr);
@@ -1559,7 +1659,7 @@ static void kasan_memcmp(struct kunit *test)
OPTIMIZER_HIDE_VAR(ptr);
OPTIMIZER_HIDE_VAR(size);
- KUNIT_EXPECT_KASAN_FAIL(test,
+ KUNIT_EXPECT_KASAN_FAIL_READ(test,
kasan_int_result = memcmp(ptr, arr, size+1));
kfree(ptr);
}
@@ -1578,9 +1678,11 @@ static void kasan_strings(struct kunit *test)
ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+ OPTIMIZER_HIDE_VAR(ptr);
src = kmalloc(KASAN_GRANULE_SIZE, GFP_KERNEL | __GFP_ZERO);
strscpy(src, "f0cacc1a0000000", KASAN_GRANULE_SIZE);
+ OPTIMIZER_HIDE_VAR(src);
/*
* Make sure that strscpy() does not trigger KASAN if it overreads into
@@ -1594,7 +1696,7 @@ static void kasan_strings(struct kunit *test)
strscpy(ptr, src + 1, KASAN_GRANULE_SIZE));
/* strscpy should fail if the first byte is unreadable. */
- KUNIT_EXPECT_KASAN_FAIL(test, strscpy(ptr, src + KASAN_GRANULE_SIZE,
+ KUNIT_EXPECT_KASAN_FAIL_READ(test, strscpy(ptr, src + KASAN_GRANULE_SIZE,
KASAN_GRANULE_SIZE));
kfree(src);
@@ -1607,17 +1709,17 @@ static void kasan_strings(struct kunit *test)
* will likely point to zeroed byte.
*/
ptr += 16;
- KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strchr(ptr, '1'));
+ KUNIT_EXPECT_KASAN_FAIL_READ(test, kasan_ptr_result = strchr(ptr, '1'));
- KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strrchr(ptr, '1'));
+ KUNIT_EXPECT_KASAN_FAIL_READ(test, kasan_ptr_result = strrchr(ptr, '1'));
- KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strcmp(ptr, "2"));
+ KUNIT_EXPECT_KASAN_FAIL_READ(test, kasan_int_result = strcmp(ptr, "2"));
- KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strncmp(ptr, "2", 1));
+ KUNIT_EXPECT_KASAN_FAIL_READ(test, kasan_int_result = strncmp(ptr, "2", 1));
- KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strlen(ptr));
+ KUNIT_EXPECT_KASAN_FAIL_READ(test, kasan_int_result = strlen(ptr));
- KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strnlen(ptr, 1));
+ KUNIT_EXPECT_KASAN_FAIL_READ(test, kasan_int_result = strnlen(ptr, 1));
}
static void kasan_bitops_modify(struct kunit *test, int nr, void *addr)
@@ -1636,12 +1738,18 @@ static void kasan_bitops_test_and_modify(struct kunit *test, int nr, void *addr)
{
KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit(nr, addr));
KUNIT_EXPECT_KASAN_FAIL(test, __test_and_set_bit(nr, addr));
- KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit_lock(nr, addr));
+ /*
+ * When KASAN is running in write-only mode,
+ * a fault won't occur when the bit is set.
+ * Therefore, skip the test_and_set_bit_lock test in write-only mode.
+ */
+ if (!kasan_write_only_enabled())
+ KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit_lock(nr, addr));
KUNIT_EXPECT_KASAN_FAIL(test, test_and_clear_bit(nr, addr));
KUNIT_EXPECT_KASAN_FAIL(test, __test_and_clear_bit(nr, addr));
KUNIT_EXPECT_KASAN_FAIL(test, test_and_change_bit(nr, addr));
KUNIT_EXPECT_KASAN_FAIL(test, __test_and_change_bit(nr, addr));
- KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = test_bit(nr, addr));
+ KUNIT_EXPECT_KASAN_FAIL_READ(test, kasan_int_result = test_bit(nr, addr));
if (nr < 7)
KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result =
xor_unlock_is_negative_byte(1 << nr, addr));
@@ -1765,7 +1873,7 @@ static void vmalloc_oob(struct kunit *test)
KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)v_ptr)[size]);
/* An aligned access into the first out-of-bounds granule. */
- KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)v_ptr)[size + 5]);
+ KUNIT_EXPECT_KASAN_FAIL_READ(test, ((volatile char *)v_ptr)[size + 5]);
/* Check that in-bounds accesses to the physical page are valid. */
page = vmalloc_to_page(v_ptr);
@@ -2042,15 +2150,15 @@ static void copy_user_test_oob(struct kunit *test)
KUNIT_EXPECT_KASAN_FAIL(test,
unused = copy_from_user(kmem, usermem, size + 1));
- KUNIT_EXPECT_KASAN_FAIL(test,
+ KUNIT_EXPECT_KASAN_FAIL_READ(test,
unused = copy_to_user(usermem, kmem, size + 1));
KUNIT_EXPECT_KASAN_FAIL(test,
unused = __copy_from_user(kmem, usermem, size + 1));
- KUNIT_EXPECT_KASAN_FAIL(test,
+ KUNIT_EXPECT_KASAN_FAIL_READ(test,
unused = __copy_to_user(usermem, kmem, size + 1));
KUNIT_EXPECT_KASAN_FAIL(test,
unused = __copy_from_user_inatomic(kmem, usermem, size + 1));
- KUNIT_EXPECT_KASAN_FAIL(test,
+ KUNIT_EXPECT_KASAN_FAIL_READ(test,
unused = __copy_to_user_inatomic(usermem, kmem, size + 1));
/*
@@ -2104,6 +2212,7 @@ static struct kunit_case kasan_kunit_test_cases[] = {
KUNIT_CASE(kmem_cache_double_free),
KUNIT_CASE(kmem_cache_invalid_free),
KUNIT_CASE(kmem_cache_rcu_uaf),
+ KUNIT_CASE(kmem_cache_rcu_reuse),
KUNIT_CASE(kmem_cache_double_destroy),
KUNIT_CASE(kmem_cache_accounted),
KUNIT_CASE(kmem_cache_bulk),
diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c
index d2c70cd2afb1..5d2a876035d6 100644
--- a/mm/kasan/shadow.c
+++ b/mm/kasan/shadow.c
@@ -125,7 +125,7 @@ void kasan_poison(const void *addr, size_t size, u8 value, bool init)
{
void *shadow_start, *shadow_end;
- if (!kasan_arch_is_ready())
+ if (!kasan_enabled())
return;
/*
@@ -150,7 +150,7 @@ EXPORT_SYMBOL_GPL(kasan_poison);
#ifdef CONFIG_KASAN_GENERIC
void kasan_poison_last_granule(const void *addr, size_t size)
{
- if (!kasan_arch_is_ready())
+ if (!kasan_enabled())
return;
if (size & KASAN_GRANULE_MASK) {
@@ -305,8 +305,7 @@ static int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned long addr,
pte_t pte;
int index;
- if (likely(!pte_none(ptep_get(ptep))))
- return 0;
+ arch_leave_lazy_mmu_mode();
index = PFN_DOWN(addr - data->start);
page = data->pages[index];
@@ -320,6 +319,8 @@ static int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned long addr,
}
spin_unlock(&init_mm.page_table_lock);
+ arch_enter_lazy_mmu_mode();
+
return 0;
}
@@ -335,13 +336,13 @@ static void ___free_pages_bulk(struct page **pages, int nr_pages)
}
}
-static int ___alloc_pages_bulk(struct page **pages, int nr_pages)
+static int ___alloc_pages_bulk(struct page **pages, int nr_pages, gfp_t gfp_mask)
{
unsigned long nr_populated, nr_total = nr_pages;
struct page **page_array = pages;
while (nr_pages) {
- nr_populated = alloc_pages_bulk(GFP_KERNEL, nr_pages, pages);
+ nr_populated = alloc_pages_bulk(gfp_mask, nr_pages, pages);
if (!nr_populated) {
___free_pages_bulk(page_array, nr_total - nr_pages);
return -ENOMEM;
@@ -353,25 +354,42 @@ static int ___alloc_pages_bulk(struct page **pages, int nr_pages)
return 0;
}
-static int __kasan_populate_vmalloc(unsigned long start, unsigned long end)
+static int __kasan_populate_vmalloc(unsigned long start, unsigned long end, gfp_t gfp_mask)
{
unsigned long nr_pages, nr_total = PFN_UP(end - start);
struct vmalloc_populate_data data;
+ unsigned int flags;
int ret = 0;
- data.pages = (struct page **)__get_free_page(GFP_KERNEL | __GFP_ZERO);
+ data.pages = (struct page **)__get_free_page(gfp_mask | __GFP_ZERO);
if (!data.pages)
return -ENOMEM;
while (nr_total) {
nr_pages = min(nr_total, PAGE_SIZE / sizeof(data.pages[0]));
- ret = ___alloc_pages_bulk(data.pages, nr_pages);
+ ret = ___alloc_pages_bulk(data.pages, nr_pages, gfp_mask);
if (ret)
break;
data.start = start;
+
+ /*
+ * page tables allocations ignore external gfp mask, enforce it
+ * by the scope API
+ */
+ if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO)
+ flags = memalloc_nofs_save();
+ else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0)
+ flags = memalloc_noio_save();
+
ret = apply_to_page_range(&init_mm, start, nr_pages * PAGE_SIZE,
kasan_populate_vmalloc_pte, &data);
+
+ if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO)
+ memalloc_nofs_restore(flags);
+ else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0)
+ memalloc_noio_restore(flags);
+
___free_pages_bulk(data.pages, nr_pages);
if (ret)
break;
@@ -385,12 +403,12 @@ static int __kasan_populate_vmalloc(unsigned long start, unsigned long end)
return ret;
}
-int kasan_populate_vmalloc(unsigned long addr, unsigned long size)
+int kasan_populate_vmalloc(unsigned long addr, unsigned long size, gfp_t gfp_mask)
{
unsigned long shadow_start, shadow_end;
int ret;
- if (!kasan_arch_is_ready())
+ if (!kasan_enabled())
return 0;
if (!is_vmalloc_or_module_addr((void *)addr))
@@ -414,7 +432,7 @@ int kasan_populate_vmalloc(unsigned long addr, unsigned long size)
shadow_start = PAGE_ALIGN_DOWN(shadow_start);
shadow_end = PAGE_ALIGN(shadow_end);
- ret = __kasan_populate_vmalloc(shadow_start, shadow_end);
+ ret = __kasan_populate_vmalloc(shadow_start, shadow_end, gfp_mask);
if (ret)
return ret;
@@ -461,18 +479,23 @@ int kasan_populate_vmalloc(unsigned long addr, unsigned long size)
static int kasan_depopulate_vmalloc_pte(pte_t *ptep, unsigned long addr,
void *unused)
{
- unsigned long page;
+ pte_t pte;
+ int none;
- page = (unsigned long)__va(pte_pfn(ptep_get(ptep)) << PAGE_SHIFT);
+ arch_leave_lazy_mmu_mode();
spin_lock(&init_mm.page_table_lock);
-
- if (likely(!pte_none(ptep_get(ptep)))) {
+ pte = ptep_get(ptep);
+ none = pte_none(pte);
+ if (likely(!none))
pte_clear(&init_mm, addr, ptep);
- free_page(page);
- }
spin_unlock(&init_mm.page_table_lock);
+ if (likely(!none))
+ __free_page(pfn_to_page(pte_pfn(pte)));
+
+ arch_enter_lazy_mmu_mode();
+
return 0;
}
@@ -560,7 +583,7 @@ void kasan_release_vmalloc(unsigned long start, unsigned long end,
unsigned long region_start, region_end;
unsigned long size;
- if (!kasan_arch_is_ready())
+ if (!kasan_enabled())
return;
region_start = ALIGN(start, KASAN_MEMORY_PER_SHADOW_PAGE);
@@ -611,7 +634,7 @@ void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
* with setting memory tags, so the KASAN_VMALLOC_INIT flag is ignored.
*/
- if (!kasan_arch_is_ready())
+ if (!kasan_enabled())
return (void *)start;
if (!is_vmalloc_or_module_addr(start))
@@ -636,7 +659,7 @@ void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
*/
void __kasan_poison_vmalloc(const void *start, unsigned long size)
{
- if (!kasan_arch_is_ready())
+ if (!kasan_enabled())
return;
if (!is_vmalloc_or_module_addr(start))
diff --git a/mm/kasan/sw_tags.c b/mm/kasan/sw_tags.c
index b9382b5b6a37..c75741a74602 100644
--- a/mm/kasan/sw_tags.c
+++ b/mm/kasan/sw_tags.c
@@ -44,6 +44,7 @@ void __init kasan_init_sw_tags(void)
per_cpu(prng_state, cpu) = (u32)get_cycles();
kasan_init_tags();
+ kasan_enable();
pr_info("KernelAddressSanitizer initialized (sw-tags, stacktrace=%s)\n",
str_on_off(kasan_stack_collection_enabled()));
diff --git a/mm/kasan/tags.c b/mm/kasan/tags.c
index d65d48b85f90..b9f31293622b 100644
--- a/mm/kasan/tags.c
+++ b/mm/kasan/tags.c
@@ -142,7 +142,7 @@ void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
save_stack_info(cache, object, flags, false);
}
-void kasan_save_free_info(struct kmem_cache *cache, void *object)
+void __kasan_save_free_info(struct kmem_cache *cache, void *object)
{
save_stack_info(cache, object, 0, true);
}