summaryrefslogtreecommitdiff
path: root/mm/kasan/common.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/kasan/common.c')
-rw-r--r--mm/kasan/common.c15
1 files changed, 4 insertions, 11 deletions
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index d4c14359feaf..1d27f1bd260b 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -305,9 +305,6 @@ bool __kasan_slab_free(struct kmem_cache *cache, void *object, bool init,
static inline bool check_page_allocation(void *ptr, unsigned long ip)
{
- if (!kasan_enabled())
- return false;
-
if (ptr != page_address(virt_to_head_page(ptr))) {
kasan_report_invalid_free(ptr, ip, KASAN_REPORT_INVALID_FREE);
return true;
@@ -520,24 +517,20 @@ void __kasan_mempool_unpoison_pages(struct page *page, unsigned int order,
bool __kasan_mempool_poison_object(void *ptr, unsigned long ip)
{
- struct folio *folio = virt_to_folio(ptr);
+ struct page *page = virt_to_page(ptr);
struct slab *slab;
- /*
- * This function can be called for large kmalloc allocation that get
- * their memory from page_alloc. Thus, the folio might not be a slab.
- */
- if (unlikely(!folio_test_slab(folio))) {
+ if (unlikely(PageLargeKmalloc(page))) {
if (check_page_allocation(ptr, ip))
return false;
- kasan_poison(ptr, folio_size(folio), KASAN_PAGE_FREE, false);
+ kasan_poison(ptr, page_size(page), KASAN_PAGE_FREE, false);
return true;
}
if (is_kfence_address(ptr))
return true;
- slab = folio_slab(folio);
+ slab = page_slab(page);
if (check_slab_allocation(slab->slab_cache, ptr, ip))
return false;