1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2025 - Google Inc
* Author: Mostafa Saleh <smostafa@google.com>
* IOMMU API debug page alloc sanitizer
*/
#include <linux/atomic.h>
#include <linux/iommu.h>
#include <linux/iommu-debug-pagealloc.h>
#include <linux/kernel.h>
#include <linux/page_ext.h>
#include <linux/page_owner.h>
#include "iommu-priv.h"
static bool needed;
DEFINE_STATIC_KEY_FALSE(iommu_debug_initialized);
struct iommu_debug_metadata {
atomic_t ref;
};
static __init bool need_iommu_debug(void)
{
return needed;
}
struct page_ext_operations page_iommu_debug_ops = {
.size = sizeof(struct iommu_debug_metadata),
.need = need_iommu_debug,
};
static struct iommu_debug_metadata *get_iommu_data(struct page_ext *page_ext)
{
return page_ext_data(page_ext, &page_iommu_debug_ops);
}
static void iommu_debug_inc_page(phys_addr_t phys)
{
struct page_ext *page_ext = page_ext_from_phys(phys);
struct iommu_debug_metadata *d;
if (!page_ext)
return;
d = get_iommu_data(page_ext);
WARN_ON(atomic_inc_return_relaxed(&d->ref) <= 0);
page_ext_put(page_ext);
}
static void iommu_debug_dec_page(phys_addr_t phys)
{
struct page_ext *page_ext = page_ext_from_phys(phys);
struct iommu_debug_metadata *d;
if (!page_ext)
return;
d = get_iommu_data(page_ext);
WARN_ON(atomic_dec_return_relaxed(&d->ref) < 0);
page_ext_put(page_ext);
}
/*
* IOMMU page size doesn't have to match the CPU page size. So, we use
* the smallest IOMMU page size to refcount the pages in the vmemmap.
* That is important as both map and unmap has to use the same page size
* to update the refcount to avoid double counting the same page.
* And as we can't know from iommu_unmap() what was the original page size
* used for map, we just use the minimum supported one for both.
*/
static size_t iommu_debug_page_size(struct iommu_domain *domain)
{
return 1UL << __ffs(domain->pgsize_bitmap);
}
static bool iommu_debug_page_count(const struct page *page)
{
unsigned int ref;
struct page_ext *page_ext = page_ext_get(page);
struct iommu_debug_metadata *d = get_iommu_data(page_ext);
ref = atomic_read(&d->ref);
page_ext_put(page_ext);
return ref != 0;
}
void __iommu_debug_check_unmapped(const struct page *page, int numpages)
{
while (numpages--) {
if (WARN_ON(iommu_debug_page_count(page))) {
pr_warn("iommu: Detected page leak!\n");
dump_page_owner(page);
}
page++;
}
}
void __iommu_debug_map(struct iommu_domain *domain, phys_addr_t phys, size_t size)
{
size_t off, end;
size_t page_size = iommu_debug_page_size(domain);
if (WARN_ON(!phys || check_add_overflow(phys, size, &end)))
return;
for (off = 0 ; off < size ; off += page_size)
iommu_debug_inc_page(phys + off);
}
static void __iommu_debug_update_iova(struct iommu_domain *domain,
unsigned long iova, size_t size, bool inc)
{
size_t off, end;
size_t page_size = iommu_debug_page_size(domain);
if (WARN_ON(check_add_overflow(iova, size, &end)))
return;
for (off = 0 ; off < size ; off += page_size) {
phys_addr_t phys = iommu_iova_to_phys(domain, iova + off);
if (!phys)
continue;
if (inc)
iommu_debug_inc_page(phys);
else
iommu_debug_dec_page(phys);
}
}
void __iommu_debug_unmap_begin(struct iommu_domain *domain,
unsigned long iova, size_t size)
{
__iommu_debug_update_iova(domain, iova, size, false);
}
void __iommu_debug_unmap_end(struct iommu_domain *domain,
unsigned long iova, size_t size,
size_t unmapped)
{
if ((unmapped == size) || WARN_ON_ONCE(unmapped > size))
return;
/* If unmap failed, re-increment the refcount. */
__iommu_debug_update_iova(domain, iova + unmapped,
size - unmapped, true);
}
void iommu_debug_init(void)
{
if (!needed)
return;
pr_info("iommu: Debugging page allocations, expect overhead or disable iommu.debug_pagealloc");
static_branch_enable(&iommu_debug_initialized);
}
static int __init iommu_debug_pagealloc(char *str)
{
return kstrtobool(str, &needed);
}
early_param("iommu.debug_pagealloc", iommu_debug_pagealloc);
|