summaryrefslogtreecommitdiff
path: root/tools/perf/util
diff options
context:
space:
mode:
authorIan Rogers <irogers@google.com>2025-11-18 21:05:54 -0800
committerNamhyung Kim <namhyung@kernel.org>2025-11-19 16:20:15 -0800
commit245cfbcd3d3b298c75e01946262b3ff2cf7d0796 (patch)
tree84ac43f58e16fa1c8f46cb366898211554bf5b88 /tools/perf/util
parent51d87d977ec4efe000ac662ac924f00b9d8a23ec (diff)
perf maps: Avoid RC_CHK use after free
The case of __maps__fixup_overlap_and_insert where the "new" maps covers existing mappings can create a use-after-free with reference count checking enabled. The issue is that "pos" holds a map pointer from maps_by_address that is put from maps_by_address but then used to look for a map in maps_by_name (the compared map is now a use-after-free). The issue stems from using maps__remove which redoes some of the searches already done by __maps__fixup_overlap_and_insert, so optimize the code (by avoiding repeated searches) and avoid the use-after-free by inlining the appropriate removal code. Reported-by: kernel test robot <oliver.sang@intel.com> Closes: https://lore.kernel.org/oe-lkp/202511141407.f9edcfa6-lkp@intel.com Signed-off-by: Ian Rogers <irogers@google.com> Reviewed-by: James Clark <james.clark@linaro.org> Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Diffstat (limited to 'tools/perf/util')
-rw-r--r--tools/perf/util/maps.c22
1 files changed, 20 insertions, 2 deletions
diff --git a/tools/perf/util/maps.c b/tools/perf/util/maps.c
index 779f6230130a..c321d4f4d846 100644
--- a/tools/perf/util/maps.c
+++ b/tools/perf/util/maps.c
@@ -931,8 +931,9 @@ static int __maps__fixup_overlap_and_insert(struct maps *maps, struct map *new)
return err;
} else {
struct map *next = NULL;
+ unsigned int nr_maps = maps__nr_maps(maps);
- if (i + 1 < maps__nr_maps(maps))
+ if (i + 1 < nr_maps)
next = maps_by_address[i + 1];
if (!next || map__start(next) >= map__end(new)) {
@@ -953,7 +954,24 @@ static int __maps__fixup_overlap_and_insert(struct maps *maps, struct map *new)
check_invariants(maps);
return err;
}
- __maps__remove(maps, pos);
+ /*
+ * pos fully covers the previous mapping so remove
+ * it. The following is an inlined version of
+ * maps__remove that reuses the already computed
+ * indices.
+ */
+ map__put(maps_by_address[i]);
+ memmove(&maps_by_address[i],
+ &maps_by_address[i + 1],
+ (nr_maps - i - 1) * sizeof(*maps_by_address));
+
+ if (maps_by_name) {
+ map__put(maps_by_name[ni]);
+ memmove(&maps_by_name[ni],
+ &maps_by_name[ni + 1],
+ (nr_maps - ni - 1) * sizeof(*maps_by_name));
+ }
+ --RC_CHK_ACCESS(maps)->nr_maps;
check_invariants(maps);
/*
* Maps are ordered but no need to increase `i` as the