From 6aacab308a5dfd222b2d23662bbae60c11007cfb Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Thu, 22 Jan 2026 16:06:20 +0000 Subject: tools/testing/vma: separate VMA userland tests into separate files So far the userland VMA tests have been established as a rough expression of what's been possible. Adapt it into a more usable form by separating out tests and shared helper functions. Since we test functions that are declared statically in mm/vma.c, we make use of the trick of #include'ing kernel C files directly. In order for the tests to continue to function, we must therefore also this way into the tests/ directory. We try to keep as much shared logic actually modularised into a separate compilation unit in shared.c, however the merge_existing() and attach_vma() helpers rely on statically declared mm/vma.c functions so these must be declared in main.c. Link: https://lkml.kernel.org/r/a0455ccfe4fdcd1c962c64f76304f612e5662a4e.1769097829.git.lorenzo.stoakes@oracle.com Signed-off-by: Lorenzo Stoakes Reviewed-by: Liam R. Howlett Cc: Baolin Wang Cc: Barry Song Cc: David Hildenbrand Cc: Dev Jain Cc: Jason Gunthorpe Cc: Suren Baghdasaryan Cc: Vlastimil Babka Cc: Zi Yan Cc: Damien Le Moal Cc: "Darrick J. Wong" Cc: Jarkko Sakkinen Cc: Yury Norov Cc: Chris Mason Cc: Pedro Falcato Signed-off-by: Andrew Morton --- tools/testing/vma/Makefile | 4 +- tools/testing/vma/main.c | 55 ++ tools/testing/vma/shared.c | 131 +++ tools/testing/vma/shared.h | 114 +++ tools/testing/vma/tests/merge.c | 1469 +++++++++++++++++++++++++++++++ tools/testing/vma/tests/mmap.c | 57 ++ tools/testing/vma/tests/vma.c | 39 + tools/testing/vma/vma.c | 1785 -------------------------------------- tools/testing/vma/vma_internal.h | 9 - 9 files changed, 1867 insertions(+), 1796 deletions(-) create mode 100644 tools/testing/vma/main.c create mode 100644 tools/testing/vma/shared.c create mode 100644 tools/testing/vma/shared.h create mode 100644 tools/testing/vma/tests/merge.c create mode 100644 tools/testing/vma/tests/mmap.c create mode 100644 tools/testing/vma/tests/vma.c delete mode 100644 tools/testing/vma/vma.c (limited to 'tools/testing') diff --git a/tools/testing/vma/Makefile b/tools/testing/vma/Makefile index 66f3831a668f..94133d9d3955 100644 --- a/tools/testing/vma/Makefile +++ b/tools/testing/vma/Makefile @@ -6,10 +6,10 @@ default: vma include ../shared/shared.mk -OFILES = $(SHARED_OFILES) vma.o maple-shim.o +OFILES = $(SHARED_OFILES) main.o shared.o maple-shim.o TARGETS = vma -vma.o: vma.c vma_internal.h ../../../mm/vma.c ../../../mm/vma_init.c ../../../mm/vma_exec.c ../../../mm/vma.h +main.o: main.c shared.c shared.h vma_internal.h tests/merge.c tests/mmap.c tests/vma.c ../../../mm/vma.c ../../../mm/vma_init.c ../../../mm/vma_exec.c ../../../mm/vma.h vma: $(OFILES) $(CC) $(CFLAGS) -o $@ $(OFILES) $(LDLIBS) diff --git a/tools/testing/vma/main.c b/tools/testing/vma/main.c new file mode 100644 index 000000000000..49b09e97a51f --- /dev/null +++ b/tools/testing/vma/main.c @@ -0,0 +1,55 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +#include "shared.h" +/* + * Directly import the VMA implementation here. Our vma_internal.h wrapper + * provides userland-equivalent functionality for everything vma.c uses. + */ +#include "../../../mm/vma_init.c" +#include "../../../mm/vma_exec.c" +#include "../../../mm/vma.c" + +/* Tests are included directly so they can test static functions in mm/vma.c. */ +#include "tests/merge.c" +#include "tests/mmap.c" +#include "tests/vma.c" + +/* Helper functions which utilise static kernel functions. */ + +struct vm_area_struct *merge_existing(struct vma_merge_struct *vmg) +{ + struct vm_area_struct *vma; + + vma = vma_merge_existing_range(vmg); + if (vma) + vma_assert_attached(vma); + return vma; +} + +int attach_vma(struct mm_struct *mm, struct vm_area_struct *vma) +{ + int res; + + res = vma_link(mm, vma); + if (!res) + vma_assert_attached(vma); + return res; +} + +/* Main test running which invokes tests/ *.c runners. */ +int main(void) +{ + int num_tests = 0, num_fail = 0; + + maple_tree_init(); + vma_state_init(); + + run_merge_tests(&num_tests, &num_fail); + run_mmap_tests(&num_tests, &num_fail); + run_vma_tests(&num_tests, &num_fail); + + printf("%d tests run, %d passed, %d failed.\n", + num_tests, num_tests - num_fail, num_fail); + + return num_fail == 0 ? EXIT_SUCCESS : EXIT_FAILURE; +} diff --git a/tools/testing/vma/shared.c b/tools/testing/vma/shared.c new file mode 100644 index 000000000000..bda578cc3304 --- /dev/null +++ b/tools/testing/vma/shared.c @@ -0,0 +1,131 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +#include "shared.h" + + +bool fail_prealloc; +unsigned long mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR; +unsigned long dac_mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR; +unsigned long stack_guard_gap = 256UL<vm_start = start; + vma->vm_end = end; + vma->vm_pgoff = pgoff; + vm_flags_reset(vma, vm_flags); + vma_assert_detached(vma); + + return vma; +} + +void detach_free_vma(struct vm_area_struct *vma) +{ + vma_mark_detached(vma); + vm_area_free(vma); +} + +struct vm_area_struct *alloc_and_link_vma(struct mm_struct *mm, + unsigned long start, unsigned long end, + pgoff_t pgoff, vm_flags_t vm_flags) +{ + struct vm_area_struct *vma = alloc_vma(mm, start, end, pgoff, vm_flags); + + if (vma == NULL) + return NULL; + + if (attach_vma(mm, vma)) { + detach_free_vma(vma); + return NULL; + } + + /* + * Reset this counter which we use to track whether writes have + * begun. Linking to the tree will have caused this to be incremented, + * which means we will get a false positive otherwise. + */ + vma->vm_lock_seq = UINT_MAX; + + return vma; +} + +void reset_dummy_anon_vma(void) +{ + dummy_anon_vma.was_cloned = false; + dummy_anon_vma.was_unlinked = false; +} + +int cleanup_mm(struct mm_struct *mm, struct vma_iterator *vmi) +{ + struct vm_area_struct *vma; + int count = 0; + + fail_prealloc = false; + reset_dummy_anon_vma(); + + vma_iter_set(vmi, 0); + for_each_vma(*vmi, vma) { + detach_free_vma(vma); + count++; + } + + mtree_destroy(&mm->mm_mt); + mm->map_count = 0; + return count; +} + +bool vma_write_started(struct vm_area_struct *vma) +{ + int seq = vma->vm_lock_seq; + + /* We reset after each check. */ + vma->vm_lock_seq = UINT_MAX; + + /* The vma_start_write() stub simply increments this value. */ + return seq > -1; +} + +void __vma_set_dummy_anon_vma(struct vm_area_struct *vma, + struct anon_vma_chain *avc, struct anon_vma *anon_vma) +{ + vma->anon_vma = anon_vma; + INIT_LIST_HEAD(&vma->anon_vma_chain); + list_add(&avc->same_vma, &vma->anon_vma_chain); + avc->anon_vma = vma->anon_vma; +} + +void vma_set_dummy_anon_vma(struct vm_area_struct *vma, + struct anon_vma_chain *avc) +{ + __vma_set_dummy_anon_vma(vma, avc, &dummy_anon_vma); +} + +struct task_struct *get_current(void) +{ + return &__current; +} + +unsigned long rlimit(unsigned int limit) +{ + return (unsigned long)-1; +} + +void vma_set_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end, + pgoff_t pgoff) +{ + vma->vm_start = start; + vma->vm_end = end; + vma->vm_pgoff = pgoff; +} diff --git a/tools/testing/vma/shared.h b/tools/testing/vma/shared.h new file mode 100644 index 000000000000..6c64211cfa22 --- /dev/null +++ b/tools/testing/vma/shared.h @@ -0,0 +1,114 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +#pragma once + +#include +#include +#include + +#include "generated/bit-length.h" +#include "maple-shared.h" +#include "vma_internal.h" +#include "../../../mm/vma.h" + +/* Simple test runner. Assumes local num_[fail, tests] counters. */ +#define TEST(name) \ + do { \ + (*num_tests)++; \ + if (!test_##name()) { \ + (*num_fail)++; \ + fprintf(stderr, "Test " #name " FAILED\n"); \ + } \ + } while (0) + +#define ASSERT_TRUE(_expr) \ + do { \ + if (!(_expr)) { \ + fprintf(stderr, \ + "Assert FAILED at %s:%d:%s(): %s is FALSE.\n", \ + __FILE__, __LINE__, __FUNCTION__, #_expr); \ + return false; \ + } \ + } while (0) + +#define ASSERT_FALSE(_expr) ASSERT_TRUE(!(_expr)) +#define ASSERT_EQ(_val1, _val2) ASSERT_TRUE((_val1) == (_val2)) +#define ASSERT_NE(_val1, _val2) ASSERT_TRUE((_val1) != (_val2)) + +#define IS_SET(_val, _flags) ((_val & _flags) == _flags) + +extern bool fail_prealloc; + +/* Override vma_iter_prealloc() so we can choose to fail it. */ +#define vma_iter_prealloc(vmi, vma) \ + (fail_prealloc ? -ENOMEM : mas_preallocate(&(vmi)->mas, (vma), GFP_KERNEL)) + +#define CONFIG_DEFAULT_MMAP_MIN_ADDR 65536 + +extern unsigned long mmap_min_addr; +extern unsigned long dac_mmap_min_addr; +extern unsigned long stack_guard_gap; + +extern const struct vm_operations_struct vma_dummy_vm_ops; +extern struct anon_vma dummy_anon_vma; +extern struct task_struct __current; + +/* + * Helper function which provides a wrapper around a merge existing VMA + * operation. + * + * Declared in main.c as uses static VMA function. + */ +struct vm_area_struct *merge_existing(struct vma_merge_struct *vmg); + +/* + * Helper function to allocate a VMA and link it to the tree. + * + * Declared in main.c as uses static VMA function. + */ +int attach_vma(struct mm_struct *mm, struct vm_area_struct *vma); + +/* Helper function providing a dummy vm_ops->close() method.*/ +static inline void dummy_close(struct vm_area_struct *) +{ +} + +/* Helper function to simply allocate a VMA. */ +struct vm_area_struct *alloc_vma(struct mm_struct *mm, + unsigned long start, unsigned long end, + pgoff_t pgoff, vm_flags_t vm_flags); + +/* Helper function to detach and free a VMA. */ +void detach_free_vma(struct vm_area_struct *vma); + +/* Helper function to allocate a VMA and link it to the tree. */ +struct vm_area_struct *alloc_and_link_vma(struct mm_struct *mm, + unsigned long start, unsigned long end, + pgoff_t pgoff, vm_flags_t vm_flags); + +/* + * Helper function to reset the dummy anon_vma to indicate it has not been + * duplicated. + */ +void reset_dummy_anon_vma(void); + +/* + * Helper function to remove all VMAs and destroy the maple tree associated with + * a virtual address space. Returns a count of VMAs in the tree. + */ +int cleanup_mm(struct mm_struct *mm, struct vma_iterator *vmi); + +/* Helper function to determine if VMA has had vma_start_write() performed. */ +bool vma_write_started(struct vm_area_struct *vma); + +void __vma_set_dummy_anon_vma(struct vm_area_struct *vma, + struct anon_vma_chain *avc, struct anon_vma *anon_vma); + +/* Provide a simple dummy VMA/anon_vma dummy setup for testing. */ +void vma_set_dummy_anon_vma(struct vm_area_struct *vma, + struct anon_vma_chain *avc); + +/* Helper function to specify a VMA's range. */ +void vma_set_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end, + pgoff_t pgoff); diff --git a/tools/testing/vma/tests/merge.c b/tools/testing/vma/tests/merge.c new file mode 100644 index 000000000000..3708dc6945b0 --- /dev/null +++ b/tools/testing/vma/tests/merge.c @@ -0,0 +1,1469 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +/* Helper function which provides a wrapper around a merge new VMA operation. */ +static struct vm_area_struct *merge_new(struct vma_merge_struct *vmg) +{ + struct vm_area_struct *vma; + /* + * For convenience, get prev and next VMAs. Which the new VMA operation + * requires. + */ + vmg->next = vma_next(vmg->vmi); + vmg->prev = vma_prev(vmg->vmi); + vma_iter_next_range(vmg->vmi); + + vma = vma_merge_new_range(vmg); + if (vma) + vma_assert_attached(vma); + + return vma; +} + +/* + * Helper function which provides a wrapper around the expansion of an existing + * VMA. + */ +static int expand_existing(struct vma_merge_struct *vmg) +{ + return vma_expand(vmg); +} + +/* + * Helper function to reset merge state the associated VMA iterator to a + * specified new range. + */ +void vmg_set_range(struct vma_merge_struct *vmg, unsigned long start, + unsigned long end, pgoff_t pgoff, vm_flags_t vm_flags) +{ + vma_iter_set(vmg->vmi, start); + + vmg->prev = NULL; + vmg->middle = NULL; + vmg->next = NULL; + vmg->target = NULL; + + vmg->start = start; + vmg->end = end; + vmg->pgoff = pgoff; + vmg->vm_flags = vm_flags; + + vmg->just_expand = false; + vmg->__remove_middle = false; + vmg->__remove_next = false; + vmg->__adjust_middle_start = false; + vmg->__adjust_next_start = false; +} + +/* Helper function to set both the VMG range and its anon_vma. */ +static void vmg_set_range_anon_vma(struct vma_merge_struct *vmg, unsigned long start, + unsigned long end, pgoff_t pgoff, vm_flags_t vm_flags, + struct anon_vma *anon_vma) +{ + vmg_set_range(vmg, start, end, pgoff, vm_flags); + vmg->anon_vma = anon_vma; +} + +/* + * Helper function to try to merge a new VMA. + * + * Update vmg and the iterator for it and try to merge, otherwise allocate a new + * VMA, link it to the maple tree and return it. + */ +static struct vm_area_struct *try_merge_new_vma(struct mm_struct *mm, + struct vma_merge_struct *vmg, unsigned long start, + unsigned long end, pgoff_t pgoff, vm_flags_t vm_flags, + bool *was_merged) +{ + struct vm_area_struct *merged; + + vmg_set_range(vmg, start, end, pgoff, vm_flags); + + merged = merge_new(vmg); + if (merged) { + *was_merged = true; + ASSERT_EQ(vmg->state, VMA_MERGE_SUCCESS); + return merged; + } + + *was_merged = false; + + ASSERT_EQ(vmg->state, VMA_MERGE_NOMERGE); + + return alloc_and_link_vma(mm, start, end, pgoff, vm_flags); +} + +static bool test_simple_merge(void) +{ + struct vm_area_struct *vma; + vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE; + struct mm_struct mm = {}; + struct vm_area_struct *vma_left = alloc_vma(&mm, 0, 0x1000, 0, vm_flags); + struct vm_area_struct *vma_right = alloc_vma(&mm, 0x2000, 0x3000, 2, vm_flags); + VMA_ITERATOR(vmi, &mm, 0x1000); + struct vma_merge_struct vmg = { + .mm = &mm, + .vmi = &vmi, + .start = 0x1000, + .end = 0x2000, + .vm_flags = vm_flags, + .pgoff = 1, + }; + + ASSERT_FALSE(attach_vma(&mm, vma_left)); + ASSERT_FALSE(attach_vma(&mm, vma_right)); + + vma = merge_new(&vmg); + ASSERT_NE(vma, NULL); + + ASSERT_EQ(vma->vm_start, 0); + ASSERT_EQ(vma->vm_end, 0x3000); + ASSERT_EQ(vma->vm_pgoff, 0); + ASSERT_EQ(vma->vm_flags, vm_flags); + + detach_free_vma(vma); + mtree_destroy(&mm.mm_mt); + + return true; +} + +static bool test_simple_modify(void) +{ + struct vm_area_struct *vma; + vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE; + struct mm_struct mm = {}; + struct vm_area_struct *init_vma = alloc_vma(&mm, 0, 0x3000, 0, vm_flags); + VMA_ITERATOR(vmi, &mm, 0x1000); + vm_flags_t flags = VM_READ | VM_MAYREAD; + + ASSERT_FALSE(attach_vma(&mm, init_vma)); + + /* + * The flags will not be changed, the vma_modify_flags() function + * performs the merge/split only. + */ + vma = vma_modify_flags(&vmi, init_vma, init_vma, + 0x1000, 0x2000, &flags); + ASSERT_NE(vma, NULL); + /* We modify the provided VMA, and on split allocate new VMAs. */ + ASSERT_EQ(vma, init_vma); + + ASSERT_EQ(vma->vm_start, 0x1000); + ASSERT_EQ(vma->vm_end, 0x2000); + ASSERT_EQ(vma->vm_pgoff, 1); + + /* + * Now walk through the three split VMAs and make sure they are as + * expected. + */ + + vma_iter_set(&vmi, 0); + vma = vma_iter_load(&vmi); + + ASSERT_EQ(vma->vm_start, 0); + ASSERT_EQ(vma->vm_end, 0x1000); + ASSERT_EQ(vma->vm_pgoff, 0); + + detach_free_vma(vma); + vma_iter_clear(&vmi); + + vma = vma_next(&vmi); + + ASSERT_EQ(vma->vm_start, 0x1000); + ASSERT_EQ(vma->vm_end, 0x2000); + ASSERT_EQ(vma->vm_pgoff, 1); + + detach_free_vma(vma); + vma_iter_clear(&vmi); + + vma = vma_next(&vmi); + + ASSERT_EQ(vma->vm_start, 0x2000); + ASSERT_EQ(vma->vm_end, 0x3000); + ASSERT_EQ(vma->vm_pgoff, 2); + + detach_free_vma(vma); + mtree_destroy(&mm.mm_mt); + + return true; +} + +static bool test_simple_expand(void) +{ + vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE; + struct mm_struct mm = {}; + struct vm_area_struct *vma = alloc_vma(&mm, 0, 0x1000, 0, vm_flags); + VMA_ITERATOR(vmi, &mm, 0); + struct vma_merge_struct vmg = { + .vmi = &vmi, + .target = vma, + .start = 0, + .end = 0x3000, + .pgoff = 0, + }; + + ASSERT_FALSE(attach_vma(&mm, vma)); + + ASSERT_FALSE(expand_existing(&vmg)); + + ASSERT_EQ(vma->vm_start, 0); + ASSERT_EQ(vma->vm_end, 0x3000); + ASSERT_EQ(vma->vm_pgoff, 0); + + detach_free_vma(vma); + mtree_destroy(&mm.mm_mt); + + return true; +} + +static bool test_simple_shrink(void) +{ + vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE; + struct mm_struct mm = {}; + struct vm_area_struct *vma = alloc_vma(&mm, 0, 0x3000, 0, vm_flags); + VMA_ITERATOR(vmi, &mm, 0); + + ASSERT_FALSE(attach_vma(&mm, vma)); + + ASSERT_FALSE(vma_shrink(&vmi, vma, 0, 0x1000, 0)); + + ASSERT_EQ(vma->vm_start, 0); + ASSERT_EQ(vma->vm_end, 0x1000); + ASSERT_EQ(vma->vm_pgoff, 0); + + detach_free_vma(vma); + mtree_destroy(&mm.mm_mt); + + return true; +} + +static bool __test_merge_new(bool is_sticky, bool a_is_sticky, bool b_is_sticky, bool c_is_sticky) +{ + vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE; + struct mm_struct mm = {}; + VMA_ITERATOR(vmi, &mm, 0); + struct vma_merge_struct vmg = { + .mm = &mm, + .vmi = &vmi, + }; + struct anon_vma_chain dummy_anon_vma_chain_a = { + .anon_vma = &dummy_anon_vma, + }; + struct anon_vma_chain dummy_anon_vma_chain_b = { + .anon_vma = &dummy_anon_vma, + }; + struct anon_vma_chain dummy_anon_vma_chain_c = { + .anon_vma = &dummy_anon_vma, + }; + struct anon_vma_chain dummy_anon_vma_chain_d = { + .anon_vma = &dummy_anon_vma, + }; + const struct vm_operations_struct vm_ops = { + .close = dummy_close, + }; + int count; + struct vm_area_struct *vma, *vma_a, *vma_b, *vma_c, *vma_d; + bool merged; + + if (is_sticky) + vm_flags |= VM_STICKY; + + /* + * 0123456789abc + * AA B CC + */ + vma_a = alloc_and_link_vma(&mm, 0, 0x2000, 0, vm_flags); + ASSERT_NE(vma_a, NULL); + if (a_is_sticky) + vm_flags_set(vma_a, VM_STICKY); + /* We give each VMA a single avc so we can test anon_vma duplication. */ + INIT_LIST_HEAD(&vma_a->anon_vma_chain); + list_add(&dummy_anon_vma_chain_a.same_vma, &vma_a->anon_vma_chain); + + vma_b = alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, vm_flags); + ASSERT_NE(vma_b, NULL); + if (b_is_sticky) + vm_flags_set(vma_b, VM_STICKY); + INIT_LIST_HEAD(&vma_b->anon_vma_chain); + list_add(&dummy_anon_vma_chain_b.same_vma, &vma_b->anon_vma_chain); + + vma_c = alloc_and_link_vma(&mm, 0xb000, 0xc000, 0xb, vm_flags); + ASSERT_NE(vma_c, NULL); + if (c_is_sticky) + vm_flags_set(vma_c, VM_STICKY); + INIT_LIST_HEAD(&vma_c->anon_vma_chain); + list_add(&dummy_anon_vma_chain_c.same_vma, &vma_c->anon_vma_chain); + + /* + * NO merge. + * + * 0123456789abc + * AA B ** CC + */ + vma_d = try_merge_new_vma(&mm, &vmg, 0x7000, 0x9000, 7, vm_flags, &merged); + ASSERT_NE(vma_d, NULL); + INIT_LIST_HEAD(&vma_d->anon_vma_chain); + list_add(&dummy_anon_vma_chain_d.same_vma, &vma_d->anon_vma_chain); + ASSERT_FALSE(merged); + ASSERT_EQ(mm.map_count, 4); + + /* + * Merge BOTH sides. + * + * 0123456789abc + * AA*B DD CC + */ + vma_a->vm_ops = &vm_ops; /* This should have no impact. */ + vma_b->anon_vma = &dummy_anon_vma; + vma = try_merge_new_vma(&mm, &vmg, 0x2000, 0x3000, 2, vm_flags, &merged); + ASSERT_EQ(vma, vma_a); + /* Merge with A, delete B. */ + ASSERT_TRUE(merged); + ASSERT_EQ(vma->vm_start, 0); + ASSERT_EQ(vma->vm_end, 0x4000); + ASSERT_EQ(vma->vm_pgoff, 0); + ASSERT_EQ(vma->anon_vma, &dummy_anon_vma); + ASSERT_TRUE(vma_write_started(vma)); + ASSERT_EQ(mm.map_count, 3); + if (is_sticky || a_is_sticky || b_is_sticky) + ASSERT_TRUE(IS_SET(vma->vm_flags, VM_STICKY)); + + /* + * Merge to PREVIOUS VMA. + * + * 0123456789abc + * AAAA* DD CC + */ + vma = try_merge_new_vma(&mm, &vmg, 0x4000, 0x5000, 4, vm_flags, &merged); + ASSERT_EQ(vma, vma_a); + /* Extend A. */ + ASSERT_TRUE(merged); + ASSERT_EQ(vma->vm_start, 0); + ASSERT_EQ(vma->vm_end, 0x5000); + ASSERT_EQ(vma->vm_pgoff, 0); + ASSERT_EQ(vma->anon_vma, &dummy_anon_vma); + ASSERT_TRUE(vma_write_started(vma)); + ASSERT_EQ(mm.map_count, 3); + if (is_sticky || a_is_sticky) + ASSERT_TRUE(IS_SET(vma->vm_flags, VM_STICKY)); + + /* + * Merge to NEXT VMA. + * + * 0123456789abc + * AAAAA *DD CC + */ + vma_d->anon_vma = &dummy_anon_vma; + vma_d->vm_ops = &vm_ops; /* This should have no impact. */ + vma = try_merge_new_vma(&mm, &vmg, 0x6000, 0x7000, 6, vm_flags, &merged); + ASSERT_EQ(vma, vma_d); + /* Prepend. */ + ASSERT_TRUE(merged); + ASSERT_EQ(vma->vm_start, 0x6000); + ASSERT_EQ(vma->vm_end, 0x9000); + ASSERT_EQ(vma->vm_pgoff, 6); + ASSERT_EQ(vma->anon_vma, &dummy_anon_vma); + ASSERT_TRUE(vma_write_started(vma)); + ASSERT_EQ(mm.map_count, 3); + if (is_sticky) /* D uses is_sticky. */ + ASSERT_TRUE(IS_SET(vma->vm_flags, VM_STICKY)); + + /* + * Merge BOTH sides. + * + * 0123456789abc + * AAAAA*DDD CC + */ + vma_d->vm_ops = NULL; /* This would otherwise degrade the merge. */ + vma = try_merge_new_vma(&mm, &vmg, 0x5000, 0x6000, 5, vm_flags, &merged); + ASSERT_EQ(vma, vma_a); + /* Merge with A, delete D. */ + ASSERT_TRUE(merged); + ASSERT_EQ(vma->vm_start, 0); + ASSERT_EQ(vma->vm_end, 0x9000); + ASSERT_EQ(vma->vm_pgoff, 0); + ASSERT_EQ(vma->anon_vma, &dummy_anon_vma); + ASSERT_TRUE(vma_write_started(vma)); + ASSERT_EQ(mm.map_count, 2); + if (is_sticky || a_is_sticky) + ASSERT_TRUE(IS_SET(vma->vm_flags, VM_STICKY)); + + /* + * Merge to NEXT VMA. + * + * 0123456789abc + * AAAAAAAAA *CC + */ + vma_c->anon_vma = &dummy_anon_vma; + vma = try_merge_new_vma(&mm, &vmg, 0xa000, 0xb000, 0xa, vm_flags, &merged); + ASSERT_EQ(vma, vma_c); + /* Prepend C. */ + ASSERT_TRUE(merged); + ASSERT_EQ(vma->vm_start, 0xa000); + ASSERT_EQ(vma->vm_end, 0xc000); + ASSERT_EQ(vma->vm_pgoff, 0xa); + ASSERT_EQ(vma->anon_vma, &dummy_anon_vma); + ASSERT_TRUE(vma_write_started(vma)); + ASSERT_EQ(mm.map_count, 2); + if (is_sticky || c_is_sticky) + ASSERT_TRUE(IS_SET(vma->vm_flags, VM_STICKY)); + + /* + * Merge BOTH sides. + * + * 0123456789abc + * AAAAAAAAA*CCC + */ + vma = try_merge_new_vma(&mm, &vmg, 0x9000, 0xa000, 0x9, vm_flags, &merged); + ASSERT_EQ(vma, vma_a); + /* Extend A and delete C. */ + ASSERT_TRUE(merged); + ASSERT_EQ(vma->vm_start, 0); + ASSERT_EQ(vma->vm_end, 0xc000); + ASSERT_EQ(vma->vm_pgoff, 0); + ASSERT_EQ(vma->anon_vma, &dummy_anon_vma); + ASSERT_TRUE(vma_write_started(vma)); + ASSERT_EQ(mm.map_count, 1); + if (is_sticky || a_is_sticky || c_is_sticky) + ASSERT_TRUE(IS_SET(vma->vm_flags, VM_STICKY)); + + /* + * Final state. + * + * 0123456789abc + * AAAAAAAAAAAAA + */ + + count = 0; + vma_iter_set(&vmi, 0); + for_each_vma(vmi, vma) { + ASSERT_NE(vma, NULL); + ASSERT_EQ(vma->vm_start, 0); + ASSERT_EQ(vma->vm_end, 0xc000); + ASSERT_EQ(vma->vm_pgoff, 0); + ASSERT_EQ(vma->anon_vma, &dummy_anon_vma); + + detach_free_vma(vma); + count++; + } + + /* Should only have one VMA left (though freed) after all is done.*/ + ASSERT_EQ(count, 1); + + mtree_destroy(&mm.mm_mt); + return true; +} + +static bool test_merge_new(void) +{ + int i, j, k, l; + + /* Generate every possible permutation of sticky flags. */ + for (i = 0; i < 2; i++) + for (j = 0; j < 2; j++) + for (k = 0; k < 2; k++) + for (l = 0; l < 2; l++) + ASSERT_TRUE(__test_merge_new(i, j, k, l)); + + return true; +} + +static bool test_vma_merge_special_flags(void) +{ + vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE; + struct mm_struct mm = {}; + VMA_ITERATOR(vmi, &mm, 0); + struct vma_merge_struct vmg = { + .mm = &mm, + .vmi = &vmi, + }; + vm_flags_t special_flags[] = { VM_IO, VM_DONTEXPAND, VM_PFNMAP, VM_MIXEDMAP }; + vm_flags_t all_special_flags = 0; + int i; + struct vm_area_struct *vma_left, *vma; + + /* Make sure there aren't new VM_SPECIAL flags. */ + for (i = 0; i < ARRAY_SIZE(special_flags); i++) { + all_special_flags |= special_flags[i]; + } + ASSERT_EQ(all_special_flags, VM_SPECIAL); + + /* + * 01234 + * AAA + */ + vma_left = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags); + ASSERT_NE(vma_left, NULL); + + /* 1. Set up new VMA with special flag that would otherwise merge. */ + + /* + * 01234 + * AAA* + * + * This should merge if not for the VM_SPECIAL flag. + */ + vmg_set_range(&vmg, 0x3000, 0x4000, 3, vm_flags); + for (i = 0; i < ARRAY_SIZE(special_flags); i++) { + vm_flags_t special_flag = special_flags[i]; + + vm_flags_reset(vma_left, vm_flags | special_flag); + vmg.vm_flags = vm_flags | special_flag; + vma = merge_new(&vmg); + ASSERT_EQ(vma, NULL); + ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE); + } + + /* 2. Modify VMA with special flag that would otherwise merge. */ + + /* + * 01234 + * AAAB + * + * Create a VMA to modify. + */ + vma = alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, vm_flags); + ASSERT_NE(vma, NULL); + vmg.middle = vma; + + for (i = 0; i < ARRAY_SIZE(special_flags); i++) { + vm_flags_t special_flag = special_flags[i]; + + vm_flags_reset(vma_left, vm_flags | special_flag); + vmg.vm_flags = vm_flags | special_flag; + vma = merge_existing(&vmg); + ASSERT_EQ(vma, NULL); + ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE); + } + + cleanup_mm(&mm, &vmi); + return true; +} + +static bool test_vma_merge_with_close(void) +{ + vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE; + struct mm_struct mm = {}; + VMA_ITERATOR(vmi, &mm, 0); + struct vma_merge_struct vmg = { + .mm = &mm, + .vmi = &vmi, + }; + const struct vm_operations_struct vm_ops = { + .close = dummy_close, + }; + struct vm_area_struct *vma_prev, *vma_next, *vma; + + /* + * When merging VMAs we are not permitted to remove any VMA that has a + * vm_ops->close() hook. + * + * Considering the two possible adjacent VMAs to which a VMA can be + * merged: + * + * [ prev ][ vma ][ next ] + * + * In no case will we need to delete prev. If the operation is + * mergeable, then prev will be extended with one or both of vma and + * next deleted. + * + * As a result, during initial mergeability checks, only + * can_vma_merge_before() (which implies the VMA being merged with is + * 'next' as shown above) bothers to check to see whether the next VMA + * has a vm_ops->close() callback that will need to be called when + * removed. + * + * If it does, then we cannot merge as the resources that the close() + * operation potentially clears down are tied only to the existing VMA + * range and we have no way of extending those to the nearly merged one. + * + * We must consider two scenarios: + * + * A. + * + * vm_ops->close: - - !NULL + * [ prev ][ vma ][ next ] + * + * Where prev may or may not be present/mergeable. + * + * This is picked up by a specific check in can_vma_merge_before(). + * + * B. + * + * vm_ops->close: - !NULL + * [ prev ][ vma ] + * + * Where prev and vma are present and mergeable. + * + * This is picked up by a specific check in the modified VMA merge. + * + * IMPORTANT NOTE: We make the assumption that the following case: + * + * - !NULL NULL + * [ prev ][ vma ][ next ] + * + * Cannot occur, because vma->vm_ops being the same implies the same + * vma->vm_file, and therefore this would mean that next->vm_ops->close + * would be set too, and thus scenario A would pick this up. + */ + + /* + * The only case of a new VMA merge that results in a VMA being deleted + * is one where both the previous and next VMAs are merged - in this + * instance the next VMA is deleted, and the previous VMA is extended. + * + * If we are unable to do so, we reduce the operation to simply + * extending the prev VMA and not merging next. + * + * 0123456789 + * PPP**NNNN + * -> + * 0123456789 + * PPPPPPNNN + */ + + vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags); + vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, vm_flags); + vma_next->vm_ops = &vm_ops; + + vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags); + ASSERT_EQ(merge_new(&vmg), vma_prev); + ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); + ASSERT_EQ(vma_prev->vm_start, 0); + ASSERT_EQ(vma_prev->vm_end, 0x5000); + ASSERT_EQ(vma_prev->vm_pgoff, 0); + + ASSERT_EQ(cleanup_mm(&mm, &vmi), 2); + + /* + * When modifying an existing VMA there are further cases where we + * delete VMAs. + * + * <> + * 0123456789 + * PPPVV + * + * In this instance, if vma has a close hook, the merge simply cannot + * proceed. + */ + + vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags); + vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags); + vma->vm_ops = &vm_ops; + + vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags); + vmg.prev = vma_prev; + vmg.middle = vma; + + /* + * The VMA being modified in a way that would otherwise merge should + * also fail. + */ + ASSERT_EQ(merge_existing(&vmg), NULL); + ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE); + + ASSERT_EQ(cleanup_mm(&mm, &vmi), 2); + + /* + * This case is mirrored if merging with next. + * + * <> + * 0123456789 + * VVNNNN + * + * In this instance, if vma has a close hook, the merge simply cannot + * proceed. + */ + + vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags); + vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, vm_flags); + vma->vm_ops = &vm_ops; + + vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags); + vmg.middle = vma; + ASSERT_EQ(merge_existing(&vmg), NULL); + /* + * Initially this is misapprehended as an out of memory report, as the + * close() check is handled in the same way as anon_vma duplication + * failures, however a subsequent patch resolves this. + */ + ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE); + + ASSERT_EQ(cleanup_mm(&mm, &vmi), 2); + + /* + * Finally, we consider two variants of the case where we modify a VMA + * to merge with both the previous and next VMAs. + * + * The first variant is where vma has a close hook. In this instance, no + * merge can proceed. + * + * <> + * 0123456789 + * PPPVVNNNN + */ + + vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags); + vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags); + vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, vm_flags); + vma->vm_ops = &vm_ops; + + vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags); + vmg.prev = vma_prev; + vmg.middle = vma; + + ASSERT_EQ(merge_existing(&vmg), NULL); + ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE); + + ASSERT_EQ(cleanup_mm(&mm, &vmi), 3); + + /* + * The second variant is where next has a close hook. In this instance, + * we reduce the operation to a merge between prev and vma. + * + * <> + * 0123456789 + * PPPVVNNNN + * -> + * 0123456789 + * PPPPPNNNN + */ + + vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags); + vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags); + vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, vm_flags); + vma_next->vm_ops = &vm_ops; + + vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags); + vmg.prev = vma_prev; + vmg.middle = vma; + + ASSERT_EQ(merge_existing(&vmg), vma_prev); + ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); + ASSERT_EQ(vma_prev->vm_start, 0); + ASSERT_EQ(vma_prev->vm_end, 0x5000); + ASSERT_EQ(vma_prev->vm_pgoff, 0); + + ASSERT_EQ(cleanup_mm(&mm, &vmi), 2); + + return true; +} + +static bool test_vma_merge_new_with_close(void) +{ + vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE; + struct mm_struct mm = {}; + VMA_ITERATOR(vmi, &mm, 0); + struct vma_merge_struct vmg = { + .mm = &mm, + .vmi = &vmi, + }; + struct vm_area_struct *vma_prev = alloc_and_link_vma(&mm, 0, 0x2000, 0, vm_flags); + struct vm_area_struct *vma_next = alloc_and_link_vma(&mm, 0x5000, 0x7000, 5, vm_flags); + const struct vm_operations_struct vm_ops = { + .close = dummy_close, + }; + struct vm_area_struct *vma; + + /* + * We should allow the partial merge of a proposed new VMA if the + * surrounding VMAs have vm_ops->close() hooks (but are otherwise + * compatible), e.g.: + * + * New VMA + * A v-------v B + * |-----| |-----| + * close close + * + * Since the rule is to not DELETE a VMA with a close operation, this + * should be permitted, only rather than expanding A and deleting B, we + * should simply expand A and leave B intact, e.g.: + * + * New VMA + * A B + * |------------||-----| + * close close + */ + + /* Have prev and next have a vm_ops->close() hook. */ + vma_prev->vm_ops = &vm_ops; + vma_next->vm_ops = &vm_ops; + + vmg_set_range(&vmg, 0x2000, 0x5000, 2, vm_flags); + vma = merge_new(&vmg); + ASSERT_NE(vma, NULL); + ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); + ASSERT_EQ(vma->vm_start, 0); + ASSERT_EQ(vma->vm_end, 0x5000); + ASSERT_EQ(vma->vm_pgoff, 0); + ASSERT_EQ(vma->vm_ops, &vm_ops); + ASSERT_TRUE(vma_write_started(vma)); + ASSERT_EQ(mm.map_count, 2); + + cleanup_mm(&mm, &vmi); + return true; +} + +static bool __test_merge_existing(bool prev_is_sticky, bool middle_is_sticky, bool next_is_sticky) +{ + vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE; + vm_flags_t prev_flags = vm_flags; + vm_flags_t next_flags = vm_flags; + struct mm_struct mm = {}; + VMA_ITERATOR(vmi, &mm, 0); + struct vm_area_struct *vma, *vma_prev, *vma_next; + struct vma_merge_struct vmg = { + .mm = &mm, + .vmi = &vmi, + }; + const struct vm_operations_struct vm_ops = { + .close = dummy_close, + }; + struct anon_vma_chain avc = {}; + + if (prev_is_sticky) + prev_flags |= VM_STICKY; + if (middle_is_sticky) + vm_flags |= VM_STICKY; + if (next_is_sticky) + next_flags |= VM_STICKY; + + /* + * Merge right case - partial span. + * + * <-> + * 0123456789 + * VVVVNNN + * -> + * 0123456789 + * VNNNNNN + */ + vma = alloc_and_link_vma(&mm, 0x2000, 0x6000, 2, vm_flags); + vma->vm_ops = &vm_ops; /* This should have no impact. */ + vma_next = alloc_and_link_vma(&mm, 0x6000, 0x9000, 6, next_flags); + vma_next->vm_ops = &vm_ops; /* This should have no impact. */ + vmg_set_range_anon_vma(&vmg, 0x3000, 0x6000, 3, vm_flags, &dummy_anon_vma); + vmg.middle = vma; + vmg.prev = vma; + vma_set_dummy_anon_vma(vma, &avc); + ASSERT_EQ(merge_existing(&vmg), vma_next); + ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); + ASSERT_EQ(vma_next->vm_start, 0x3000); + ASSERT_EQ(vma_next->vm_end, 0x9000); + ASSERT_EQ(vma_next->vm_pgoff, 3); + ASSERT_EQ(vma_next->anon_vma, &dummy_anon_vma); + ASSERT_EQ(vma->vm_start, 0x2000); + ASSERT_EQ(vma->vm_end, 0x3000); + ASSERT_EQ(vma->vm_pgoff, 2); + ASSERT_TRUE(vma_write_started(vma)); + ASSERT_TRUE(vma_write_started(vma_next)); + ASSERT_EQ(mm.map_count, 2); + if (middle_is_sticky || next_is_sticky) + ASSERT_TRUE(IS_SET(vma_next->vm_flags, VM_STICKY)); + + /* Clear down and reset. */ + ASSERT_EQ(cleanup_mm(&mm, &vmi), 2); + + /* + * Merge right case - full span. + * + * <--> + * 0123456789 + * VVVVNNN + * -> + * 0123456789 + * NNNNNNN + */ + vma = alloc_and_link_vma(&mm, 0x2000, 0x6000, 2, vm_flags); + vma_next = alloc_and_link_vma(&mm, 0x6000, 0x9000, 6, next_flags); + vma_next->vm_ops = &vm_ops; /* This should have no impact. */ + vmg_set_range_anon_vma(&vmg, 0x2000, 0x6000, 2, vm_flags, &dummy_anon_vma); + vmg.middle = vma; + vma_set_dummy_anon_vma(vma, &avc); + ASSERT_EQ(merge_existing(&vmg), vma_next); + ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); + ASSERT_EQ(vma_next->vm_start, 0x2000); + ASSERT_EQ(vma_next->vm_end, 0x9000); + ASSERT_EQ(vma_next->vm_pgoff, 2); + ASSERT_EQ(vma_next->anon_vma, &dummy_anon_vma); + ASSERT_TRUE(vma_write_started(vma_next)); + ASSERT_EQ(mm.map_count, 1); + if (middle_is_sticky || next_is_sticky) + ASSERT_TRUE(IS_SET(vma_next->vm_flags, VM_STICKY)); + + /* Clear down and reset. We should have deleted vma. */ + ASSERT_EQ(cleanup_mm(&mm, &vmi), 1); + + /* + * Merge left case - partial span. + * + * <-> + * 0123456789 + * PPPVVVV + * -> + * 0123456789 + * PPPPPPV + */ + vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, prev_flags); + vma_prev->vm_ops = &vm_ops; /* This should have no impact. */ + vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, vm_flags); + vma->vm_ops = &vm_ops; /* This should have no impact. */ + vmg_set_range_anon_vma(&vmg, 0x3000, 0x6000, 3, vm_flags, &dummy_anon_vma); + vmg.prev = vma_prev; + vmg.middle = vma; + vma_set_dummy_anon_vma(vma, &avc); + ASSERT_EQ(merge_existing(&vmg), vma_prev); + ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); + ASSERT_EQ(vma_prev->vm_start, 0); + ASSERT_EQ(vma_prev->vm_end, 0x6000); + ASSERT_EQ(vma_prev->vm_pgoff, 0); + ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma); + ASSERT_EQ(vma->vm_start, 0x6000); + ASSERT_EQ(vma->vm_end, 0x7000); + ASSERT_EQ(vma->vm_pgoff, 6); + ASSERT_TRUE(vma_write_started(vma_prev)); + ASSERT_TRUE(vma_write_started(vma)); + ASSERT_EQ(mm.map_count, 2); + if (prev_is_sticky || middle_is_sticky) + ASSERT_TRUE(IS_SET(vma_prev->vm_flags, VM_STICKY)); + + /* Clear down and reset. */ + ASSERT_EQ(cleanup_mm(&mm, &vmi), 2); + + /* + * Merge left case - full span. + * + * <--> + * 0123456789 + * PPPVVVV + * -> + * 0123456789 + * PPPPPPP + */ + vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, prev_flags); + vma_prev->vm_ops = &vm_ops; /* This should have no impact. */ + vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, vm_flags); + vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, vm_flags, &dummy_anon_vma); + vmg.prev = vma_prev; + vmg.middle = vma; + vma_set_dummy_anon_vma(vma, &avc); + ASSERT_EQ(merge_existing(&vmg), vma_prev); + ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); + ASSERT_EQ(vma_prev->vm_start, 0); + ASSERT_EQ(vma_prev->vm_end, 0x7000); + ASSERT_EQ(vma_prev->vm_pgoff, 0); + ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma); + ASSERT_TRUE(vma_write_started(vma_prev)); + ASSERT_EQ(mm.map_count, 1); + if (prev_is_sticky || middle_is_sticky) + ASSERT_TRUE(IS_SET(vma_prev->vm_flags, VM_STICKY)); + + /* Clear down and reset. We should have deleted vma. */ + ASSERT_EQ(cleanup_mm(&mm, &vmi), 1); + + /* + * Merge both case. + * + * <--> + * 0123456789 + * PPPVVVVNNN + * -> + * 0123456789 + * PPPPPPPPPP + */ + vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, prev_flags); + vma_prev->vm_ops = &vm_ops; /* This should have no impact. */ + vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, vm_flags); + vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, next_flags); + vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, vm_flags, &dummy_anon_vma); + vmg.prev = vma_prev; + vmg.middle = vma; + vma_set_dummy_anon_vma(vma, &avc); + ASSERT_EQ(merge_existing(&vmg), vma_prev); + ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); + ASSERT_EQ(vma_prev->vm_start, 0); + ASSERT_EQ(vma_prev->vm_end, 0x9000); + ASSERT_EQ(vma_prev->vm_pgoff, 0); + ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma); + ASSERT_TRUE(vma_write_started(vma_prev)); + ASSERT_EQ(mm.map_count, 1); + if (prev_is_sticky || middle_is_sticky || next_is_sticky) + ASSERT_TRUE(IS_SET(vma_prev->vm_flags, VM_STICKY)); + + /* Clear down and reset. We should have deleted prev and next. */ + ASSERT_EQ(cleanup_mm(&mm, &vmi), 1); + + /* + * Non-merge ranges. the modified VMA merge operation assumes that the + * caller always specifies ranges within the input VMA so we need only + * examine these cases. + * + * - + * - + * - + * <-> + * <> + * <> + * 0123456789a + * PPPVVVVVNNN + */ + + vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, prev_flags); + vma = alloc_and_link_vma(&mm, 0x3000, 0x8000, 3, vm_flags); + vma_next = alloc_and_link_vma(&mm, 0x8000, 0xa000, 8, next_flags); + + vmg_set_range(&vmg, 0x4000, 0x5000, 4, vm_flags); + vmg.prev = vma; + vmg.middle = vma; + ASSERT_EQ(merge_existing(&vmg), NULL); + ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE); + + vmg_set_range(&vmg, 0x5000, 0x6000, 5, vm_flags); + vmg.prev = vma; + vmg.middle = vma; + ASSERT_EQ(merge_existing(&vmg), NULL); + ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE); + + vmg_set_range(&vmg, 0x6000, 0x7000, 6, vm_flags); + vmg.prev = vma; + vmg.middle = vma; + ASSERT_EQ(merge_existing(&vmg), NULL); + ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE); + + vmg_set_range(&vmg, 0x4000, 0x7000, 4, vm_flags); + vmg.prev = vma; + vmg.middle = vma; + ASSERT_EQ(merge_existing(&vmg), NULL); + ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE); + + vmg_set_range(&vmg, 0x4000, 0x6000, 4, vm_flags); + vmg.prev = vma; + vmg.middle = vma; + ASSERT_EQ(merge_existing(&vmg), NULL); + ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE); + + vmg_set_range(&vmg, 0x5000, 0x6000, 5, vm_flags); + vmg.prev = vma; + vmg.middle = vma; + ASSERT_EQ(merge_existing(&vmg), NULL); + ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE); + + ASSERT_EQ(cleanup_mm(&mm, &vmi), 3); + + return true; +} + +static bool test_merge_existing(void) +{ + int i, j, k; + + /* Generate every possible permutation of sticky flags. */ + for (i = 0; i < 2; i++) + for (j = 0; j < 2; j++) + for (k = 0; k < 2; k++) + ASSERT_TRUE(__test_merge_existing(i, j, k)); + + return true; +} + +static bool test_anon_vma_non_mergeable(void) +{ + vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE; + struct mm_struct mm = {}; + VMA_ITERATOR(vmi, &mm, 0); + struct vm_area_struct *vma, *vma_prev, *vma_next; + struct vma_merge_struct vmg = { + .mm = &mm, + .vmi = &vmi, + }; + struct anon_vma_chain dummy_anon_vma_chain_1 = {}; + struct anon_vma_chain dummy_anon_vma_chain_2 = {}; + struct anon_vma dummy_anon_vma_2; + + /* + * In the case of modified VMA merge, merging both left and right VMAs + * but where prev and next have incompatible anon_vma objects, we revert + * to a merge of prev and VMA: + * + * <--> + * 0123456789 + * PPPVVVVNNN + * -> + * 0123456789 + * PPPPPPPNNN + */ + vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags); + vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, vm_flags); + vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, vm_flags); + + /* + * Give both prev and next single anon_vma_chain fields, so they will + * merge with the NULL vmg->anon_vma. + * + * However, when prev is compared to next, the merge should fail. + */ + vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, vm_flags, NULL); + vmg.prev = vma_prev; + vmg.middle = vma; + vma_set_dummy_anon_vma(vma_prev, &dummy_anon_vma_chain_1); + __vma_set_dummy_anon_vma(vma_next, &dummy_anon_vma_chain_2, &dummy_anon_vma_2); + + ASSERT_EQ(merge_existing(&vmg), vma_prev); + ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); + ASSERT_EQ(vma_prev->vm_start, 0); + ASSERT_EQ(vma_prev->vm_end, 0x7000); + ASSERT_EQ(vma_prev->vm_pgoff, 0); + ASSERT_TRUE(vma_write_started(vma_prev)); + ASSERT_FALSE(vma_write_started(vma_next)); + + /* Clear down and reset. */ + ASSERT_EQ(cleanup_mm(&mm, &vmi), 2); + + /* + * Now consider the new VMA case. This is equivalent, only adding a new + * VMA in a gap between prev and next. + * + * <--> + * 0123456789 + * PPP****NNN + * -> + * 0123456789 + * PPPPPPPNNN + */ + vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags); + vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, vm_flags); + + vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, vm_flags, NULL); + vmg.prev = vma_prev; + vma_set_dummy_anon_vma(vma_prev, &dummy_anon_vma_chain_1); + __vma_set_dummy_anon_vma(vma_next, &dummy_anon_vma_chain_2, &dummy_anon_vma_2); + + vmg.anon_vma = NULL; + ASSERT_EQ(merge_new(&vmg), vma_prev); + ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); + ASSERT_EQ(vma_prev->vm_start, 0); + ASSERT_EQ(vma_prev->vm_end, 0x7000); + ASSERT_EQ(vma_prev->vm_pgoff, 0); + ASSERT_TRUE(vma_write_started(vma_prev)); + ASSERT_FALSE(vma_write_started(vma_next)); + + /* Final cleanup. */ + ASSERT_EQ(cleanup_mm(&mm, &vmi), 2); + + return true; +} + +static bool test_dup_anon_vma(void) +{ + vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE; + struct mm_struct mm = {}; + VMA_ITERATOR(vmi, &mm, 0); + struct vma_merge_struct vmg = { + .mm = &mm, + .vmi = &vmi, + }; + struct anon_vma_chain dummy_anon_vma_chain = { + .anon_vma = &dummy_anon_vma, + }; + struct vm_area_struct *vma_prev, *vma_next, *vma; + + reset_dummy_anon_vma(); + + /* + * Expanding a VMA delete the next one duplicates next's anon_vma and + * assigns it to the expanded VMA. + * + * This covers new VMA merging, as these operations amount to a VMA + * expand. + */ + vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags); + vma_next = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags); + vma_next->anon_vma = &dummy_anon_vma; + + vmg_set_range(&vmg, 0, 0x5000, 0, vm_flags); + vmg.target = vma_prev; + vmg.next = vma_next; + + ASSERT_EQ(expand_existing(&vmg), 0); + + /* Will have been cloned. */ + ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma); + ASSERT_TRUE(vma_prev->anon_vma->was_cloned); + + /* Cleanup ready for next run. */ + cleanup_mm(&mm, &vmi); + + /* + * next has anon_vma, we assign to prev. + * + * |<----->| + * |-------*********-------| + * prev vma next + * extend delete delete + */ + + vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags); + vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags); + vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, vm_flags); + + /* Initialise avc so mergeability check passes. */ + INIT_LIST_HEAD(&vma_next->anon_vma_chain); + list_add(&dummy_anon_vma_chain.same_vma, &vma_next->anon_vma_chain); + + vma_next->anon_vma = &dummy_anon_vma; + vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags); + vmg.prev = vma_prev; + vmg.middle = vma; + + ASSERT_EQ(merge_existing(&vmg), vma_prev); + ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); + + ASSERT_EQ(vma_prev->vm_start, 0); + ASSERT_EQ(vma_prev->vm_end, 0x8000); + + ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma); + ASSERT_TRUE(vma_prev->anon_vma->was_cloned); + + cleanup_mm(&mm, &vmi); + + /* + * vma has anon_vma, we assign to prev. + * + * |<----->| + * |-------*********-------| + * prev vma next + * extend delete delete + */ + + vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags); + vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags); + vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, vm_flags); + vmg.anon_vma = &dummy_anon_vma; + vma_set_dummy_anon_vma(vma, &dummy_anon_vma_chain); + vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags); + vmg.prev = vma_prev; + vmg.middle = vma; + + ASSERT_EQ(merge_existing(&vmg), vma_prev); + ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); + + ASSERT_EQ(vma_prev->vm_start, 0); + ASSERT_EQ(vma_prev->vm_end, 0x8000); + + ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma); + ASSERT_TRUE(vma_prev->anon_vma->was_cloned); + + cleanup_mm(&mm, &vmi); + + /* + * vma has anon_vma, we assign to prev. + * + * |<----->| + * |-------************* + * prev vma + * extend shrink/delete + */ + + vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags); + vma = alloc_and_link_vma(&mm, 0x3000, 0x8000, 3, vm_flags); + + vma_set_dummy_anon_vma(vma, &dummy_anon_vma_chain); + vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags); + vmg.prev = vma_prev; + vmg.middle = vma; + + ASSERT_EQ(merge_existing(&vmg), vma_prev); + ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); + + ASSERT_EQ(vma_prev->vm_start, 0); + ASSERT_EQ(vma_prev->vm_end, 0x5000); + + ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma); + ASSERT_TRUE(vma_prev->anon_vma->was_cloned); + + cleanup_mm(&mm, &vmi); + + /* + * vma has anon_vma, we assign to next. + * + * |<----->| + * *************-------| + * vma next + * shrink/delete extend + */ + + vma = alloc_and_link_vma(&mm, 0, 0x5000, 0, vm_flags); + vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, vm_flags); + + vma_set_dummy_anon_vma(vma, &dummy_anon_vma_chain); + vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags); + vmg.prev = vma; + vmg.middle = vma; + + ASSERT_EQ(merge_existing(&vmg), vma_next); + ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); + + ASSERT_EQ(vma_next->vm_start, 0x3000); + ASSERT_EQ(vma_next->vm_end, 0x8000); + + ASSERT_EQ(vma_next->anon_vma, &dummy_anon_vma); + ASSERT_TRUE(vma_next->anon_vma->was_cloned); + + cleanup_mm(&mm, &vmi); + return true; +} + +static bool test_vmi_prealloc_fail(void) +{ + vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE; + struct mm_struct mm = {}; + VMA_ITERATOR(vmi, &mm, 0); + struct vma_merge_struct vmg = { + .mm = &mm, + .vmi = &vmi, + }; + struct anon_vma_chain avc = {}; + struct vm_area_struct *vma_prev, *vma; + + /* + * We are merging vma into prev, with vma possessing an anon_vma, which + * will be duplicated. We cause the vmi preallocation to fail and assert + * the duplicated anon_vma is unlinked. + */ + + vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags); + vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags); + vma->anon_vma = &dummy_anon_vma; + + vmg_set_range_anon_vma(&vmg, 0x3000, 0x5000, 3, vm_flags, &dummy_anon_vma); + vmg.prev = vma_prev; + vmg.middle = vma; + vma_set_dummy_anon_vma(vma, &avc); + + fail_prealloc = true; + + /* This will cause the merge to fail. */ + ASSERT_EQ(merge_existing(&vmg), NULL); + ASSERT_EQ(vmg.state, VMA_MERGE_ERROR_NOMEM); + /* We will already have assigned the anon_vma. */ + ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma); + /* And it was both cloned and unlinked. */ + ASSERT_TRUE(dummy_anon_vma.was_cloned); + ASSERT_TRUE(dummy_anon_vma.was_unlinked); + + cleanup_mm(&mm, &vmi); /* Resets fail_prealloc too. */ + + /* + * We repeat the same operation for expanding a VMA, which is what new + * VMA merging ultimately uses too. This asserts that unlinking is + * performed in this case too. + */ + + vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags); + vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags); + vma->anon_vma = &dummy_anon_vma; + + vmg_set_range(&vmg, 0, 0x5000, 3, vm_flags); + vmg.target = vma_prev; + vmg.next = vma; + + fail_prealloc = true; + ASSERT_EQ(expand_existing(&vmg), -ENOMEM); + ASSERT_EQ(vmg.state, VMA_MERGE_ERROR_NOMEM); + + ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma); + ASSERT_TRUE(dummy_anon_vma.was_cloned); + ASSERT_TRUE(dummy_anon_vma.was_unlinked); + + cleanup_mm(&mm, &vmi); + return true; +} + +static bool test_merge_extend(void) +{ + vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE; + struct mm_struct mm = {}; + VMA_ITERATOR(vmi, &mm, 0x1000); + struct vm_area_struct *vma; + + vma = alloc_and_link_vma(&mm, 0, 0x1000, 0, vm_flags); + alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, vm_flags); + + /* + * Extend a VMA into the gap between itself and the following VMA. + * This should result in a merge. + * + * <-> + * * * + * + */ + + ASSERT_EQ(vma_merge_extend(&vmi, vma, 0x2000), vma); + ASSERT_EQ(vma->vm_start, 0); + ASSERT_EQ(vma->vm_end, 0x4000); + ASSERT_EQ(vma->vm_pgoff, 0); + ASSERT_TRUE(vma_write_started(vma)); + ASSERT_EQ(mm.map_count, 1); + + cleanup_mm(&mm, &vmi); + return true; +} + +static bool test_expand_only_mode(void) +{ + vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE; + struct mm_struct mm = {}; + VMA_ITERATOR(vmi, &mm, 0); + struct vm_area_struct *vma_prev, *vma; + VMG_STATE(vmg, &mm, &vmi, 0x5000, 0x9000, vm_flags, 5); + + /* + * Place a VMA prior to the one we're expanding so we assert that we do + * not erroneously try to traverse to the previous VMA even though we + * have, through the use of the just_expand flag, indicated we do not + * need to do so. + */ + alloc_and_link_vma(&mm, 0, 0x2000, 0, vm_flags); + + /* + * We will be positioned at the prev VMA, but looking to expand to + * 0x9000. + */ + vma_iter_set(&vmi, 0x3000); + vma_prev = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags); + vmg.prev = vma_prev; + vmg.just_expand = true; + + vma = vma_merge_new_range(&vmg); + ASSERT_NE(vma, NULL); + ASSERT_EQ(vma, vma_prev); + ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); + ASSERT_EQ(vma->vm_start, 0x3000); + ASSERT_EQ(vma->vm_end, 0x9000); + ASSERT_EQ(vma->vm_pgoff, 3); + ASSERT_TRUE(vma_write_started(vma)); + ASSERT_EQ(vma_iter_addr(&vmi), 0x3000); + vma_assert_attached(vma); + + cleanup_mm(&mm, &vmi); + return true; +} + +static void run_merge_tests(int *num_tests, int *num_fail) +{ + /* Very simple tests to kick the tyres. */ + TEST(simple_merge); + TEST(simple_modify); + TEST(simple_expand); + TEST(simple_shrink); + + TEST(merge_new); + TEST(vma_merge_special_flags); + TEST(vma_merge_with_close); + TEST(vma_merge_new_with_close); + TEST(merge_existing); + TEST(anon_vma_non_mergeable); + TEST(dup_anon_vma); + TEST(vmi_prealloc_fail); + TEST(merge_extend); + TEST(expand_only_mode); +} diff --git a/tools/testing/vma/tests/mmap.c b/tools/testing/vma/tests/mmap.c new file mode 100644 index 000000000000..bded4ecbe5db --- /dev/null +++ b/tools/testing/vma/tests/mmap.c @@ -0,0 +1,57 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +static bool test_mmap_region_basic(void) +{ + struct mm_struct mm = {}; + unsigned long addr; + struct vm_area_struct *vma; + VMA_ITERATOR(vmi, &mm, 0); + + current->mm = &mm; + + /* Map at 0x300000, length 0x3000. */ + addr = __mmap_region(NULL, 0x300000, 0x3000, + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE, + 0x300, NULL); + ASSERT_EQ(addr, 0x300000); + + /* Map at 0x250000, length 0x3000. */ + addr = __mmap_region(NULL, 0x250000, 0x3000, + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE, + 0x250, NULL); + ASSERT_EQ(addr, 0x250000); + + /* Map at 0x303000, merging to 0x300000 of length 0x6000. */ + addr = __mmap_region(NULL, 0x303000, 0x3000, + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE, + 0x303, NULL); + ASSERT_EQ(addr, 0x303000); + + /* Map at 0x24d000, merging to 0x250000 of length 0x6000. */ + addr = __mmap_region(NULL, 0x24d000, 0x3000, + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE, + 0x24d, NULL); + ASSERT_EQ(addr, 0x24d000); + + ASSERT_EQ(mm.map_count, 2); + + for_each_vma(vmi, vma) { + if (vma->vm_start == 0x300000) { + ASSERT_EQ(vma->vm_end, 0x306000); + ASSERT_EQ(vma->vm_pgoff, 0x300); + } else if (vma->vm_start == 0x24d000) { + ASSERT_EQ(vma->vm_end, 0x253000); + ASSERT_EQ(vma->vm_pgoff, 0x24d); + } else { + ASSERT_FALSE(true); + } + } + + cleanup_mm(&mm, &vmi); + return true; +} + +static void run_mmap_tests(int *num_tests, int *num_fail) +{ + TEST(mmap_region_basic); +} diff --git a/tools/testing/vma/tests/vma.c b/tools/testing/vma/tests/vma.c new file mode 100644 index 000000000000..6d9775aee243 --- /dev/null +++ b/tools/testing/vma/tests/vma.c @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +static bool test_copy_vma(void) +{ + vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE; + struct mm_struct mm = {}; + bool need_locks = false; + VMA_ITERATOR(vmi, &mm, 0); + struct vm_area_struct *vma, *vma_new, *vma_next; + + /* Move backwards and do not merge. */ + + vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags); + vma_new = copy_vma(&vma, 0, 0x2000, 0, &need_locks); + ASSERT_NE(vma_new, vma); + ASSERT_EQ(vma_new->vm_start, 0); + ASSERT_EQ(vma_new->vm_end, 0x2000); + ASSERT_EQ(vma_new->vm_pgoff, 0); + vma_assert_attached(vma_new); + + cleanup_mm(&mm, &vmi); + + /* Move a VMA into position next to another and merge the two. */ + + vma = alloc_and_link_vma(&mm, 0, 0x2000, 0, vm_flags); + vma_next = alloc_and_link_vma(&mm, 0x6000, 0x8000, 6, vm_flags); + vma_new = copy_vma(&vma, 0x4000, 0x2000, 4, &need_locks); + vma_assert_attached(vma_new); + + ASSERT_EQ(vma_new, vma_next); + + cleanup_mm(&mm, &vmi); + return true; +} + +static void run_vma_tests(int *num_tests, int *num_fail) +{ + TEST(copy_vma); +} diff --git a/tools/testing/vma/vma.c b/tools/testing/vma/vma.c deleted file mode 100644 index 93d21bc7e112..000000000000 --- a/tools/testing/vma/vma.c +++ /dev/null @@ -1,1785 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later - -#include -#include -#include - -#include "generated/bit-length.h" - -#include "maple-shared.h" -#include "vma_internal.h" - -/* Include so header guard set. */ -#include "../../../mm/vma.h" - -static bool fail_prealloc; - -/* Then override vma_iter_prealloc() so we can choose to fail it. */ -#define vma_iter_prealloc(vmi, vma) \ - (fail_prealloc ? -ENOMEM : mas_preallocate(&(vmi)->mas, (vma), GFP_KERNEL)) - -#define CONFIG_DEFAULT_MMAP_MIN_ADDR 65536 - -unsigned long mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR; -unsigned long dac_mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR; -unsigned long stack_guard_gap = 256UL<vm_start = start; - vma->vm_end = end; - vma->vm_pgoff = pgoff; - vm_flags_reset(vma, vm_flags); - vma_assert_detached(vma); - - return vma; -} - -/* Helper function to allocate a VMA and link it to the tree. */ -static int attach_vma(struct mm_struct *mm, struct vm_area_struct *vma) -{ - int res; - - res = vma_link(mm, vma); - if (!res) - vma_assert_attached(vma); - return res; -} - -static void detach_free_vma(struct vm_area_struct *vma) -{ - vma_mark_detached(vma); - vm_area_free(vma); -} - -/* Helper function to allocate a VMA and link it to the tree. */ -static struct vm_area_struct *alloc_and_link_vma(struct mm_struct *mm, - unsigned long start, - unsigned long end, - pgoff_t pgoff, - vm_flags_t vm_flags) -{ - struct vm_area_struct *vma = alloc_vma(mm, start, end, pgoff, vm_flags); - - if (vma == NULL) - return NULL; - - if (attach_vma(mm, vma)) { - detach_free_vma(vma); - return NULL; - } - - /* - * Reset this counter which we use to track whether writes have - * begun. Linking to the tree will have caused this to be incremented, - * which means we will get a false positive otherwise. - */ - vma->vm_lock_seq = UINT_MAX; - - return vma; -} - -/* Helper function which provides a wrapper around a merge new VMA operation. */ -static struct vm_area_struct *merge_new(struct vma_merge_struct *vmg) -{ - struct vm_area_struct *vma; - /* - * For convenience, get prev and next VMAs. Which the new VMA operation - * requires. - */ - vmg->next = vma_next(vmg->vmi); - vmg->prev = vma_prev(vmg->vmi); - vma_iter_next_range(vmg->vmi); - - vma = vma_merge_new_range(vmg); - if (vma) - vma_assert_attached(vma); - - return vma; -} - -/* - * Helper function which provides a wrapper around a merge existing VMA - * operation. - */ -static struct vm_area_struct *merge_existing(struct vma_merge_struct *vmg) -{ - struct vm_area_struct *vma; - - vma = vma_merge_existing_range(vmg); - if (vma) - vma_assert_attached(vma); - return vma; -} - -/* - * Helper function which provides a wrapper around the expansion of an existing - * VMA. - */ -static int expand_existing(struct vma_merge_struct *vmg) -{ - return vma_expand(vmg); -} - -/* - * Helper function to reset merge state the associated VMA iterator to a - * specified new range. - */ -static void vmg_set_range(struct vma_merge_struct *vmg, unsigned long start, - unsigned long end, pgoff_t pgoff, vm_flags_t vm_flags) -{ - vma_iter_set(vmg->vmi, start); - - vmg->prev = NULL; - vmg->middle = NULL; - vmg->next = NULL; - vmg->target = NULL; - - vmg->start = start; - vmg->end = end; - vmg->pgoff = pgoff; - vmg->vm_flags = vm_flags; - - vmg->just_expand = false; - vmg->__remove_middle = false; - vmg->__remove_next = false; - vmg->__adjust_middle_start = false; - vmg->__adjust_next_start = false; -} - -/* Helper function to set both the VMG range and its anon_vma. */ -static void vmg_set_range_anon_vma(struct vma_merge_struct *vmg, unsigned long start, - unsigned long end, pgoff_t pgoff, vm_flags_t vm_flags, - struct anon_vma *anon_vma) -{ - vmg_set_range(vmg, start, end, pgoff, vm_flags); - vmg->anon_vma = anon_vma; -} - -/* - * Helper function to try to merge a new VMA. - * - * Update vmg and the iterator for it and try to merge, otherwise allocate a new - * VMA, link it to the maple tree and return it. - */ -static struct vm_area_struct *try_merge_new_vma(struct mm_struct *mm, - struct vma_merge_struct *vmg, - unsigned long start, unsigned long end, - pgoff_t pgoff, vm_flags_t vm_flags, - bool *was_merged) -{ - struct vm_area_struct *merged; - - vmg_set_range(vmg, start, end, pgoff, vm_flags); - - merged = merge_new(vmg); - if (merged) { - *was_merged = true; - ASSERT_EQ(vmg->state, VMA_MERGE_SUCCESS); - return merged; - } - - *was_merged = false; - - ASSERT_EQ(vmg->state, VMA_MERGE_NOMERGE); - - return alloc_and_link_vma(mm, start, end, pgoff, vm_flags); -} - -/* - * Helper function to reset the dummy anon_vma to indicate it has not been - * duplicated. - */ -static void reset_dummy_anon_vma(void) -{ - dummy_anon_vma.was_cloned = false; - dummy_anon_vma.was_unlinked = false; -} - -/* - * Helper function to remove all VMAs and destroy the maple tree associated with - * a virtual address space. Returns a count of VMAs in the tree. - */ -static int cleanup_mm(struct mm_struct *mm, struct vma_iterator *vmi) -{ - struct vm_area_struct *vma; - int count = 0; - - fail_prealloc = false; - reset_dummy_anon_vma(); - - vma_iter_set(vmi, 0); - for_each_vma(*vmi, vma) { - detach_free_vma(vma); - count++; - } - - mtree_destroy(&mm->mm_mt); - mm->map_count = 0; - return count; -} - -/* Helper function to determine if VMA has had vma_start_write() performed. */ -static bool vma_write_started(struct vm_area_struct *vma) -{ - int seq = vma->vm_lock_seq; - - /* We reset after each check. */ - vma->vm_lock_seq = UINT_MAX; - - /* The vma_start_write() stub simply increments this value. */ - return seq > -1; -} - -/* Helper function providing a dummy vm_ops->close() method.*/ -static void dummy_close(struct vm_area_struct *) -{ -} - -static void __vma_set_dummy_anon_vma(struct vm_area_struct *vma, - struct anon_vma_chain *avc, - struct anon_vma *anon_vma) -{ - vma->anon_vma = anon_vma; - INIT_LIST_HEAD(&vma->anon_vma_chain); - list_add(&avc->same_vma, &vma->anon_vma_chain); - avc->anon_vma = vma->anon_vma; -} - -static void vma_set_dummy_anon_vma(struct vm_area_struct *vma, - struct anon_vma_chain *avc) -{ - __vma_set_dummy_anon_vma(vma, avc, &dummy_anon_vma); -} - -static bool test_simple_merge(void) -{ - struct vm_area_struct *vma; - vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE; - struct mm_struct mm = {}; - struct vm_area_struct *vma_left = alloc_vma(&mm, 0, 0x1000, 0, vm_flags); - struct vm_area_struct *vma_right = alloc_vma(&mm, 0x2000, 0x3000, 2, vm_flags); - VMA_ITERATOR(vmi, &mm, 0x1000); - struct vma_merge_struct vmg = { - .mm = &mm, - .vmi = &vmi, - .start = 0x1000, - .end = 0x2000, - .vm_flags = vm_flags, - .pgoff = 1, - }; - - ASSERT_FALSE(attach_vma(&mm, vma_left)); - ASSERT_FALSE(attach_vma(&mm, vma_right)); - - vma = merge_new(&vmg); - ASSERT_NE(vma, NULL); - - ASSERT_EQ(vma->vm_start, 0); - ASSERT_EQ(vma->vm_end, 0x3000); - ASSERT_EQ(vma->vm_pgoff, 0); - ASSERT_EQ(vma->vm_flags, vm_flags); - - detach_free_vma(vma); - mtree_destroy(&mm.mm_mt); - - return true; -} - -static bool test_simple_modify(void) -{ - struct vm_area_struct *vma; - vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE; - struct mm_struct mm = {}; - struct vm_area_struct *init_vma = alloc_vma(&mm, 0, 0x3000, 0, vm_flags); - VMA_ITERATOR(vmi, &mm, 0x1000); - vm_flags_t flags = VM_READ | VM_MAYREAD; - - ASSERT_FALSE(attach_vma(&mm, init_vma)); - - /* - * The flags will not be changed, the vma_modify_flags() function - * performs the merge/split only. - */ - vma = vma_modify_flags(&vmi, init_vma, init_vma, - 0x1000, 0x2000, &flags); - ASSERT_NE(vma, NULL); - /* We modify the provided VMA, and on split allocate new VMAs. */ - ASSERT_EQ(vma, init_vma); - - ASSERT_EQ(vma->vm_start, 0x1000); - ASSERT_EQ(vma->vm_end, 0x2000); - ASSERT_EQ(vma->vm_pgoff, 1); - - /* - * Now walk through the three split VMAs and make sure they are as - * expected. - */ - - vma_iter_set(&vmi, 0); - vma = vma_iter_load(&vmi); - - ASSERT_EQ(vma->vm_start, 0); - ASSERT_EQ(vma->vm_end, 0x1000); - ASSERT_EQ(vma->vm_pgoff, 0); - - detach_free_vma(vma); - vma_iter_clear(&vmi); - - vma = vma_next(&vmi); - - ASSERT_EQ(vma->vm_start, 0x1000); - ASSERT_EQ(vma->vm_end, 0x2000); - ASSERT_EQ(vma->vm_pgoff, 1); - - detach_free_vma(vma); - vma_iter_clear(&vmi); - - vma = vma_next(&vmi); - - ASSERT_EQ(vma->vm_start, 0x2000); - ASSERT_EQ(vma->vm_end, 0x3000); - ASSERT_EQ(vma->vm_pgoff, 2); - - detach_free_vma(vma); - mtree_destroy(&mm.mm_mt); - - return true; -} - -static bool test_simple_expand(void) -{ - vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE; - struct mm_struct mm = {}; - struct vm_area_struct *vma = alloc_vma(&mm, 0, 0x1000, 0, vm_flags); - VMA_ITERATOR(vmi, &mm, 0); - struct vma_merge_struct vmg = { - .vmi = &vmi, - .target = vma, - .start = 0, - .end = 0x3000, - .pgoff = 0, - }; - - ASSERT_FALSE(attach_vma(&mm, vma)); - - ASSERT_FALSE(expand_existing(&vmg)); - - ASSERT_EQ(vma->vm_start, 0); - ASSERT_EQ(vma->vm_end, 0x3000); - ASSERT_EQ(vma->vm_pgoff, 0); - - detach_free_vma(vma); - mtree_destroy(&mm.mm_mt); - - return true; -} - -static bool test_simple_shrink(void) -{ - vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE; - struct mm_struct mm = {}; - struct vm_area_struct *vma = alloc_vma(&mm, 0, 0x3000, 0, vm_flags); - VMA_ITERATOR(vmi, &mm, 0); - - ASSERT_FALSE(attach_vma(&mm, vma)); - - ASSERT_FALSE(vma_shrink(&vmi, vma, 0, 0x1000, 0)); - - ASSERT_EQ(vma->vm_start, 0); - ASSERT_EQ(vma->vm_end, 0x1000); - ASSERT_EQ(vma->vm_pgoff, 0); - - detach_free_vma(vma); - mtree_destroy(&mm.mm_mt); - - return true; -} - -static bool __test_merge_new(bool is_sticky, bool a_is_sticky, bool b_is_sticky, bool c_is_sticky) -{ - vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE; - struct mm_struct mm = {}; - VMA_ITERATOR(vmi, &mm, 0); - struct vma_merge_struct vmg = { - .mm = &mm, - .vmi = &vmi, - }; - struct anon_vma_chain dummy_anon_vma_chain_a = { - .anon_vma = &dummy_anon_vma, - }; - struct anon_vma_chain dummy_anon_vma_chain_b = { - .anon_vma = &dummy_anon_vma, - }; - struct anon_vma_chain dummy_anon_vma_chain_c = { - .anon_vma = &dummy_anon_vma, - }; - struct anon_vma_chain dummy_anon_vma_chain_d = { - .anon_vma = &dummy_anon_vma, - }; - const struct vm_operations_struct vm_ops = { - .close = dummy_close, - }; - int count; - struct vm_area_struct *vma, *vma_a, *vma_b, *vma_c, *vma_d; - bool merged; - - if (is_sticky) - vm_flags |= VM_STICKY; - - /* - * 0123456789abc - * AA B CC - */ - vma_a = alloc_and_link_vma(&mm, 0, 0x2000, 0, vm_flags); - ASSERT_NE(vma_a, NULL); - if (a_is_sticky) - vm_flags_set(vma_a, VM_STICKY); - /* We give each VMA a single avc so we can test anon_vma duplication. */ - INIT_LIST_HEAD(&vma_a->anon_vma_chain); - list_add(&dummy_anon_vma_chain_a.same_vma, &vma_a->anon_vma_chain); - - vma_b = alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, vm_flags); - ASSERT_NE(vma_b, NULL); - if (b_is_sticky) - vm_flags_set(vma_b, VM_STICKY); - INIT_LIST_HEAD(&vma_b->anon_vma_chain); - list_add(&dummy_anon_vma_chain_b.same_vma, &vma_b->anon_vma_chain); - - vma_c = alloc_and_link_vma(&mm, 0xb000, 0xc000, 0xb, vm_flags); - ASSERT_NE(vma_c, NULL); - if (c_is_sticky) - vm_flags_set(vma_c, VM_STICKY); - INIT_LIST_HEAD(&vma_c->anon_vma_chain); - list_add(&dummy_anon_vma_chain_c.same_vma, &vma_c->anon_vma_chain); - - /* - * NO merge. - * - * 0123456789abc - * AA B ** CC - */ - vma_d = try_merge_new_vma(&mm, &vmg, 0x7000, 0x9000, 7, vm_flags, &merged); - ASSERT_NE(vma_d, NULL); - INIT_LIST_HEAD(&vma_d->anon_vma_chain); - list_add(&dummy_anon_vma_chain_d.same_vma, &vma_d->anon_vma_chain); - ASSERT_FALSE(merged); - ASSERT_EQ(mm.map_count, 4); - - /* - * Merge BOTH sides. - * - * 0123456789abc - * AA*B DD CC - */ - vma_a->vm_ops = &vm_ops; /* This should have no impact. */ - vma_b->anon_vma = &dummy_anon_vma; - vma = try_merge_new_vma(&mm, &vmg, 0x2000, 0x3000, 2, vm_flags, &merged); - ASSERT_EQ(vma, vma_a); - /* Merge with A, delete B. */ - ASSERT_TRUE(merged); - ASSERT_EQ(vma->vm_start, 0); - ASSERT_EQ(vma->vm_end, 0x4000); - ASSERT_EQ(vma->vm_pgoff, 0); - ASSERT_EQ(vma->anon_vma, &dummy_anon_vma); - ASSERT_TRUE(vma_write_started(vma)); - ASSERT_EQ(mm.map_count, 3); - if (is_sticky || a_is_sticky || b_is_sticky) - ASSERT_TRUE(IS_SET(vma->vm_flags, VM_STICKY)); - - /* - * Merge to PREVIOUS VMA. - * - * 0123456789abc - * AAAA* DD CC - */ - vma = try_merge_new_vma(&mm, &vmg, 0x4000, 0x5000, 4, vm_flags, &merged); - ASSERT_EQ(vma, vma_a); - /* Extend A. */ - ASSERT_TRUE(merged); - ASSERT_EQ(vma->vm_start, 0); - ASSERT_EQ(vma->vm_end, 0x5000); - ASSERT_EQ(vma->vm_pgoff, 0); - ASSERT_EQ(vma->anon_vma, &dummy_anon_vma); - ASSERT_TRUE(vma_write_started(vma)); - ASSERT_EQ(mm.map_count, 3); - if (is_sticky || a_is_sticky) - ASSERT_TRUE(IS_SET(vma->vm_flags, VM_STICKY)); - - /* - * Merge to NEXT VMA. - * - * 0123456789abc - * AAAAA *DD CC - */ - vma_d->anon_vma = &dummy_anon_vma; - vma_d->vm_ops = &vm_ops; /* This should have no impact. */ - vma = try_merge_new_vma(&mm, &vmg, 0x6000, 0x7000, 6, vm_flags, &merged); - ASSERT_EQ(vma, vma_d); - /* Prepend. */ - ASSERT_TRUE(merged); - ASSERT_EQ(vma->vm_start, 0x6000); - ASSERT_EQ(vma->vm_end, 0x9000); - ASSERT_EQ(vma->vm_pgoff, 6); - ASSERT_EQ(vma->anon_vma, &dummy_anon_vma); - ASSERT_TRUE(vma_write_started(vma)); - ASSERT_EQ(mm.map_count, 3); - if (is_sticky) /* D uses is_sticky. */ - ASSERT_TRUE(IS_SET(vma->vm_flags, VM_STICKY)); - - /* - * Merge BOTH sides. - * - * 0123456789abc - * AAAAA*DDD CC - */ - vma_d->vm_ops = NULL; /* This would otherwise degrade the merge. */ - vma = try_merge_new_vma(&mm, &vmg, 0x5000, 0x6000, 5, vm_flags, &merged); - ASSERT_EQ(vma, vma_a); - /* Merge with A, delete D. */ - ASSERT_TRUE(merged); - ASSERT_EQ(vma->vm_start, 0); - ASSERT_EQ(vma->vm_end, 0x9000); - ASSERT_EQ(vma->vm_pgoff, 0); - ASSERT_EQ(vma->anon_vma, &dummy_anon_vma); - ASSERT_TRUE(vma_write_started(vma)); - ASSERT_EQ(mm.map_count, 2); - if (is_sticky || a_is_sticky) - ASSERT_TRUE(IS_SET(vma->vm_flags, VM_STICKY)); - - /* - * Merge to NEXT VMA. - * - * 0123456789abc - * AAAAAAAAA *CC - */ - vma_c->anon_vma = &dummy_anon_vma; - vma = try_merge_new_vma(&mm, &vmg, 0xa000, 0xb000, 0xa, vm_flags, &merged); - ASSERT_EQ(vma, vma_c); - /* Prepend C. */ - ASSERT_TRUE(merged); - ASSERT_EQ(vma->vm_start, 0xa000); - ASSERT_EQ(vma->vm_end, 0xc000); - ASSERT_EQ(vma->vm_pgoff, 0xa); - ASSERT_EQ(vma->anon_vma, &dummy_anon_vma); - ASSERT_TRUE(vma_write_started(vma)); - ASSERT_EQ(mm.map_count, 2); - if (is_sticky || c_is_sticky) - ASSERT_TRUE(IS_SET(vma->vm_flags, VM_STICKY)); - - /* - * Merge BOTH sides. - * - * 0123456789abc - * AAAAAAAAA*CCC - */ - vma = try_merge_new_vma(&mm, &vmg, 0x9000, 0xa000, 0x9, vm_flags, &merged); - ASSERT_EQ(vma, vma_a); - /* Extend A and delete C. */ - ASSERT_TRUE(merged); - ASSERT_EQ(vma->vm_start, 0); - ASSERT_EQ(vma->vm_end, 0xc000); - ASSERT_EQ(vma->vm_pgoff, 0); - ASSERT_EQ(vma->anon_vma, &dummy_anon_vma); - ASSERT_TRUE(vma_write_started(vma)); - ASSERT_EQ(mm.map_count, 1); - if (is_sticky || a_is_sticky || c_is_sticky) - ASSERT_TRUE(IS_SET(vma->vm_flags, VM_STICKY)); - - /* - * Final state. - * - * 0123456789abc - * AAAAAAAAAAAAA - */ - - count = 0; - vma_iter_set(&vmi, 0); - for_each_vma(vmi, vma) { - ASSERT_NE(vma, NULL); - ASSERT_EQ(vma->vm_start, 0); - ASSERT_EQ(vma->vm_end, 0xc000); - ASSERT_EQ(vma->vm_pgoff, 0); - ASSERT_EQ(vma->anon_vma, &dummy_anon_vma); - - detach_free_vma(vma); - count++; - } - - /* Should only have one VMA left (though freed) after all is done.*/ - ASSERT_EQ(count, 1); - - mtree_destroy(&mm.mm_mt); - return true; -} - -static bool test_merge_new(void) -{ - int i, j, k, l; - - /* Generate every possible permutation of sticky flags. */ - for (i = 0; i < 2; i++) - for (j = 0; j < 2; j++) - for (k = 0; k < 2; k++) - for (l = 0; l < 2; l++) - ASSERT_TRUE(__test_merge_new(i, j, k, l)); - - return true; -} - -static bool test_vma_merge_special_flags(void) -{ - vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE; - struct mm_struct mm = {}; - VMA_ITERATOR(vmi, &mm, 0); - struct vma_merge_struct vmg = { - .mm = &mm, - .vmi = &vmi, - }; - vm_flags_t special_flags[] = { VM_IO, VM_DONTEXPAND, VM_PFNMAP, VM_MIXEDMAP }; - vm_flags_t all_special_flags = 0; - int i; - struct vm_area_struct *vma_left, *vma; - - /* Make sure there aren't new VM_SPECIAL flags. */ - for (i = 0; i < ARRAY_SIZE(special_flags); i++) { - all_special_flags |= special_flags[i]; - } - ASSERT_EQ(all_special_flags, VM_SPECIAL); - - /* - * 01234 - * AAA - */ - vma_left = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags); - ASSERT_NE(vma_left, NULL); - - /* 1. Set up new VMA with special flag that would otherwise merge. */ - - /* - * 01234 - * AAA* - * - * This should merge if not for the VM_SPECIAL flag. - */ - vmg_set_range(&vmg, 0x3000, 0x4000, 3, vm_flags); - for (i = 0; i < ARRAY_SIZE(special_flags); i++) { - vm_flags_t special_flag = special_flags[i]; - - vm_flags_reset(vma_left, vm_flags | special_flag); - vmg.vm_flags = vm_flags | special_flag; - vma = merge_new(&vmg); - ASSERT_EQ(vma, NULL); - ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE); - } - - /* 2. Modify VMA with special flag that would otherwise merge. */ - - /* - * 01234 - * AAAB - * - * Create a VMA to modify. - */ - vma = alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, vm_flags); - ASSERT_NE(vma, NULL); - vmg.middle = vma; - - for (i = 0; i < ARRAY_SIZE(special_flags); i++) { - vm_flags_t special_flag = special_flags[i]; - - vm_flags_reset(vma_left, vm_flags | special_flag); - vmg.vm_flags = vm_flags | special_flag; - vma = merge_existing(&vmg); - ASSERT_EQ(vma, NULL); - ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE); - } - - cleanup_mm(&mm, &vmi); - return true; -} - -static bool test_vma_merge_with_close(void) -{ - vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE; - struct mm_struct mm = {}; - VMA_ITERATOR(vmi, &mm, 0); - struct vma_merge_struct vmg = { - .mm = &mm, - .vmi = &vmi, - }; - const struct vm_operations_struct vm_ops = { - .close = dummy_close, - }; - struct vm_area_struct *vma_prev, *vma_next, *vma; - - /* - * When merging VMAs we are not permitted to remove any VMA that has a - * vm_ops->close() hook. - * - * Considering the two possible adjacent VMAs to which a VMA can be - * merged: - * - * [ prev ][ vma ][ next ] - * - * In no case will we need to delete prev. If the operation is - * mergeable, then prev will be extended with one or both of vma and - * next deleted. - * - * As a result, during initial mergeability checks, only - * can_vma_merge_before() (which implies the VMA being merged with is - * 'next' as shown above) bothers to check to see whether the next VMA - * has a vm_ops->close() callback that will need to be called when - * removed. - * - * If it does, then we cannot merge as the resources that the close() - * operation potentially clears down are tied only to the existing VMA - * range and we have no way of extending those to the nearly merged one. - * - * We must consider two scenarios: - * - * A. - * - * vm_ops->close: - - !NULL - * [ prev ][ vma ][ next ] - * - * Where prev may or may not be present/mergeable. - * - * This is picked up by a specific check in can_vma_merge_before(). - * - * B. - * - * vm_ops->close: - !NULL - * [ prev ][ vma ] - * - * Where prev and vma are present and mergeable. - * - * This is picked up by a specific check in the modified VMA merge. - * - * IMPORTANT NOTE: We make the assumption that the following case: - * - * - !NULL NULL - * [ prev ][ vma ][ next ] - * - * Cannot occur, because vma->vm_ops being the same implies the same - * vma->vm_file, and therefore this would mean that next->vm_ops->close - * would be set too, and thus scenario A would pick this up. - */ - - /* - * The only case of a new VMA merge that results in a VMA being deleted - * is one where both the previous and next VMAs are merged - in this - * instance the next VMA is deleted, and the previous VMA is extended. - * - * If we are unable to do so, we reduce the operation to simply - * extending the prev VMA and not merging next. - * - * 0123456789 - * PPP**NNNN - * -> - * 0123456789 - * PPPPPPNNN - */ - - vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags); - vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, vm_flags); - vma_next->vm_ops = &vm_ops; - - vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags); - ASSERT_EQ(merge_new(&vmg), vma_prev); - ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); - ASSERT_EQ(vma_prev->vm_start, 0); - ASSERT_EQ(vma_prev->vm_end, 0x5000); - ASSERT_EQ(vma_prev->vm_pgoff, 0); - - ASSERT_EQ(cleanup_mm(&mm, &vmi), 2); - - /* - * When modifying an existing VMA there are further cases where we - * delete VMAs. - * - * <> - * 0123456789 - * PPPVV - * - * In this instance, if vma has a close hook, the merge simply cannot - * proceed. - */ - - vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags); - vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags); - vma->vm_ops = &vm_ops; - - vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags); - vmg.prev = vma_prev; - vmg.middle = vma; - - /* - * The VMA being modified in a way that would otherwise merge should - * also fail. - */ - ASSERT_EQ(merge_existing(&vmg), NULL); - ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE); - - ASSERT_EQ(cleanup_mm(&mm, &vmi), 2); - - /* - * This case is mirrored if merging with next. - * - * <> - * 0123456789 - * VVNNNN - * - * In this instance, if vma has a close hook, the merge simply cannot - * proceed. - */ - - vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags); - vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, vm_flags); - vma->vm_ops = &vm_ops; - - vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags); - vmg.middle = vma; - ASSERT_EQ(merge_existing(&vmg), NULL); - /* - * Initially this is misapprehended as an out of memory report, as the - * close() check is handled in the same way as anon_vma duplication - * failures, however a subsequent patch resolves this. - */ - ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE); - - ASSERT_EQ(cleanup_mm(&mm, &vmi), 2); - - /* - * Finally, we consider two variants of the case where we modify a VMA - * to merge with both the previous and next VMAs. - * - * The first variant is where vma has a close hook. In this instance, no - * merge can proceed. - * - * <> - * 0123456789 - * PPPVVNNNN - */ - - vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags); - vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags); - vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, vm_flags); - vma->vm_ops = &vm_ops; - - vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags); - vmg.prev = vma_prev; - vmg.middle = vma; - - ASSERT_EQ(merge_existing(&vmg), NULL); - ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE); - - ASSERT_EQ(cleanup_mm(&mm, &vmi), 3); - - /* - * The second variant is where next has a close hook. In this instance, - * we reduce the operation to a merge between prev and vma. - * - * <> - * 0123456789 - * PPPVVNNNN - * -> - * 0123456789 - * PPPPPNNNN - */ - - vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags); - vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags); - vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, vm_flags); - vma_next->vm_ops = &vm_ops; - - vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags); - vmg.prev = vma_prev; - vmg.middle = vma; - - ASSERT_EQ(merge_existing(&vmg), vma_prev); - ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); - ASSERT_EQ(vma_prev->vm_start, 0); - ASSERT_EQ(vma_prev->vm_end, 0x5000); - ASSERT_EQ(vma_prev->vm_pgoff, 0); - - ASSERT_EQ(cleanup_mm(&mm, &vmi), 2); - - return true; -} - -static bool test_vma_merge_new_with_close(void) -{ - vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE; - struct mm_struct mm = {}; - VMA_ITERATOR(vmi, &mm, 0); - struct vma_merge_struct vmg = { - .mm = &mm, - .vmi = &vmi, - }; - struct vm_area_struct *vma_prev = alloc_and_link_vma(&mm, 0, 0x2000, 0, vm_flags); - struct vm_area_struct *vma_next = alloc_and_link_vma(&mm, 0x5000, 0x7000, 5, vm_flags); - const struct vm_operations_struct vm_ops = { - .close = dummy_close, - }; - struct vm_area_struct *vma; - - /* - * We should allow the partial merge of a proposed new VMA if the - * surrounding VMAs have vm_ops->close() hooks (but are otherwise - * compatible), e.g.: - * - * New VMA - * A v-------v B - * |-----| |-----| - * close close - * - * Since the rule is to not DELETE a VMA with a close operation, this - * should be permitted, only rather than expanding A and deleting B, we - * should simply expand A and leave B intact, e.g.: - * - * New VMA - * A B - * |------------||-----| - * close close - */ - - /* Have prev and next have a vm_ops->close() hook. */ - vma_prev->vm_ops = &vm_ops; - vma_next->vm_ops = &vm_ops; - - vmg_set_range(&vmg, 0x2000, 0x5000, 2, vm_flags); - vma = merge_new(&vmg); - ASSERT_NE(vma, NULL); - ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); - ASSERT_EQ(vma->vm_start, 0); - ASSERT_EQ(vma->vm_end, 0x5000); - ASSERT_EQ(vma->vm_pgoff, 0); - ASSERT_EQ(vma->vm_ops, &vm_ops); - ASSERT_TRUE(vma_write_started(vma)); - ASSERT_EQ(mm.map_count, 2); - - cleanup_mm(&mm, &vmi); - return true; -} - -static bool __test_merge_existing(bool prev_is_sticky, bool middle_is_sticky, bool next_is_sticky) -{ - vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE; - vm_flags_t prev_flags = vm_flags; - vm_flags_t next_flags = vm_flags; - struct mm_struct mm = {}; - VMA_ITERATOR(vmi, &mm, 0); - struct vm_area_struct *vma, *vma_prev, *vma_next; - struct vma_merge_struct vmg = { - .mm = &mm, - .vmi = &vmi, - }; - const struct vm_operations_struct vm_ops = { - .close = dummy_close, - }; - struct anon_vma_chain avc = {}; - - if (prev_is_sticky) - prev_flags |= VM_STICKY; - if (middle_is_sticky) - vm_flags |= VM_STICKY; - if (next_is_sticky) - next_flags |= VM_STICKY; - - /* - * Merge right case - partial span. - * - * <-> - * 0123456789 - * VVVVNNN - * -> - * 0123456789 - * VNNNNNN - */ - vma = alloc_and_link_vma(&mm, 0x2000, 0x6000, 2, vm_flags); - vma->vm_ops = &vm_ops; /* This should have no impact. */ - vma_next = alloc_and_link_vma(&mm, 0x6000, 0x9000, 6, next_flags); - vma_next->vm_ops = &vm_ops; /* This should have no impact. */ - vmg_set_range_anon_vma(&vmg, 0x3000, 0x6000, 3, vm_flags, &dummy_anon_vma); - vmg.middle = vma; - vmg.prev = vma; - vma_set_dummy_anon_vma(vma, &avc); - ASSERT_EQ(merge_existing(&vmg), vma_next); - ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); - ASSERT_EQ(vma_next->vm_start, 0x3000); - ASSERT_EQ(vma_next->vm_end, 0x9000); - ASSERT_EQ(vma_next->vm_pgoff, 3); - ASSERT_EQ(vma_next->anon_vma, &dummy_anon_vma); - ASSERT_EQ(vma->vm_start, 0x2000); - ASSERT_EQ(vma->vm_end, 0x3000); - ASSERT_EQ(vma->vm_pgoff, 2); - ASSERT_TRUE(vma_write_started(vma)); - ASSERT_TRUE(vma_write_started(vma_next)); - ASSERT_EQ(mm.map_count, 2); - if (middle_is_sticky || next_is_sticky) - ASSERT_TRUE(IS_SET(vma_next->vm_flags, VM_STICKY)); - - /* Clear down and reset. */ - ASSERT_EQ(cleanup_mm(&mm, &vmi), 2); - - /* - * Merge right case - full span. - * - * <--> - * 0123456789 - * VVVVNNN - * -> - * 0123456789 - * NNNNNNN - */ - vma = alloc_and_link_vma(&mm, 0x2000, 0x6000, 2, vm_flags); - vma_next = alloc_and_link_vma(&mm, 0x6000, 0x9000, 6, next_flags); - vma_next->vm_ops = &vm_ops; /* This should have no impact. */ - vmg_set_range_anon_vma(&vmg, 0x2000, 0x6000, 2, vm_flags, &dummy_anon_vma); - vmg.middle = vma; - vma_set_dummy_anon_vma(vma, &avc); - ASSERT_EQ(merge_existing(&vmg), vma_next); - ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); - ASSERT_EQ(vma_next->vm_start, 0x2000); - ASSERT_EQ(vma_next->vm_end, 0x9000); - ASSERT_EQ(vma_next->vm_pgoff, 2); - ASSERT_EQ(vma_next->anon_vma, &dummy_anon_vma); - ASSERT_TRUE(vma_write_started(vma_next)); - ASSERT_EQ(mm.map_count, 1); - if (middle_is_sticky || next_is_sticky) - ASSERT_TRUE(IS_SET(vma_next->vm_flags, VM_STICKY)); - - /* Clear down and reset. We should have deleted vma. */ - ASSERT_EQ(cleanup_mm(&mm, &vmi), 1); - - /* - * Merge left case - partial span. - * - * <-> - * 0123456789 - * PPPVVVV - * -> - * 0123456789 - * PPPPPPV - */ - vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, prev_flags); - vma_prev->vm_ops = &vm_ops; /* This should have no impact. */ - vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, vm_flags); - vma->vm_ops = &vm_ops; /* This should have no impact. */ - vmg_set_range_anon_vma(&vmg, 0x3000, 0x6000, 3, vm_flags, &dummy_anon_vma); - vmg.prev = vma_prev; - vmg.middle = vma; - vma_set_dummy_anon_vma(vma, &avc); - ASSERT_EQ(merge_existing(&vmg), vma_prev); - ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); - ASSERT_EQ(vma_prev->vm_start, 0); - ASSERT_EQ(vma_prev->vm_end, 0x6000); - ASSERT_EQ(vma_prev->vm_pgoff, 0); - ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma); - ASSERT_EQ(vma->vm_start, 0x6000); - ASSERT_EQ(vma->vm_end, 0x7000); - ASSERT_EQ(vma->vm_pgoff, 6); - ASSERT_TRUE(vma_write_started(vma_prev)); - ASSERT_TRUE(vma_write_started(vma)); - ASSERT_EQ(mm.map_count, 2); - if (prev_is_sticky || middle_is_sticky) - ASSERT_TRUE(IS_SET(vma_prev->vm_flags, VM_STICKY)); - - /* Clear down and reset. */ - ASSERT_EQ(cleanup_mm(&mm, &vmi), 2); - - /* - * Merge left case - full span. - * - * <--> - * 0123456789 - * PPPVVVV - * -> - * 0123456789 - * PPPPPPP - */ - vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, prev_flags); - vma_prev->vm_ops = &vm_ops; /* This should have no impact. */ - vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, vm_flags); - vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, vm_flags, &dummy_anon_vma); - vmg.prev = vma_prev; - vmg.middle = vma; - vma_set_dummy_anon_vma(vma, &avc); - ASSERT_EQ(merge_existing(&vmg), vma_prev); - ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); - ASSERT_EQ(vma_prev->vm_start, 0); - ASSERT_EQ(vma_prev->vm_end, 0x7000); - ASSERT_EQ(vma_prev->vm_pgoff, 0); - ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma); - ASSERT_TRUE(vma_write_started(vma_prev)); - ASSERT_EQ(mm.map_count, 1); - if (prev_is_sticky || middle_is_sticky) - ASSERT_TRUE(IS_SET(vma_prev->vm_flags, VM_STICKY)); - - /* Clear down and reset. We should have deleted vma. */ - ASSERT_EQ(cleanup_mm(&mm, &vmi), 1); - - /* - * Merge both case. - * - * <--> - * 0123456789 - * PPPVVVVNNN - * -> - * 0123456789 - * PPPPPPPPPP - */ - vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, prev_flags); - vma_prev->vm_ops = &vm_ops; /* This should have no impact. */ - vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, vm_flags); - vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, next_flags); - vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, vm_flags, &dummy_anon_vma); - vmg.prev = vma_prev; - vmg.middle = vma; - vma_set_dummy_anon_vma(vma, &avc); - ASSERT_EQ(merge_existing(&vmg), vma_prev); - ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); - ASSERT_EQ(vma_prev->vm_start, 0); - ASSERT_EQ(vma_prev->vm_end, 0x9000); - ASSERT_EQ(vma_prev->vm_pgoff, 0); - ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma); - ASSERT_TRUE(vma_write_started(vma_prev)); - ASSERT_EQ(mm.map_count, 1); - if (prev_is_sticky || middle_is_sticky || next_is_sticky) - ASSERT_TRUE(IS_SET(vma_prev->vm_flags, VM_STICKY)); - - /* Clear down and reset. We should have deleted prev and next. */ - ASSERT_EQ(cleanup_mm(&mm, &vmi), 1); - - /* - * Non-merge ranges. the modified VMA merge operation assumes that the - * caller always specifies ranges within the input VMA so we need only - * examine these cases. - * - * - - * - - * - - * <-> - * <> - * <> - * 0123456789a - * PPPVVVVVNNN - */ - - vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, prev_flags); - vma = alloc_and_link_vma(&mm, 0x3000, 0x8000, 3, vm_flags); - vma_next = alloc_and_link_vma(&mm, 0x8000, 0xa000, 8, next_flags); - - vmg_set_range(&vmg, 0x4000, 0x5000, 4, vm_flags); - vmg.prev = vma; - vmg.middle = vma; - ASSERT_EQ(merge_existing(&vmg), NULL); - ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE); - - vmg_set_range(&vmg, 0x5000, 0x6000, 5, vm_flags); - vmg.prev = vma; - vmg.middle = vma; - ASSERT_EQ(merge_existing(&vmg), NULL); - ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE); - - vmg_set_range(&vmg, 0x6000, 0x7000, 6, vm_flags); - vmg.prev = vma; - vmg.middle = vma; - ASSERT_EQ(merge_existing(&vmg), NULL); - ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE); - - vmg_set_range(&vmg, 0x4000, 0x7000, 4, vm_flags); - vmg.prev = vma; - vmg.middle = vma; - ASSERT_EQ(merge_existing(&vmg), NULL); - ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE); - - vmg_set_range(&vmg, 0x4000, 0x6000, 4, vm_flags); - vmg.prev = vma; - vmg.middle = vma; - ASSERT_EQ(merge_existing(&vmg), NULL); - ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE); - - vmg_set_range(&vmg, 0x5000, 0x6000, 5, vm_flags); - vmg.prev = vma; - vmg.middle = vma; - ASSERT_EQ(merge_existing(&vmg), NULL); - ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE); - - ASSERT_EQ(cleanup_mm(&mm, &vmi), 3); - - return true; -} - -static bool test_merge_existing(void) -{ - int i, j, k; - - /* Generate every possible permutation of sticky flags. */ - for (i = 0; i < 2; i++) - for (j = 0; j < 2; j++) - for (k = 0; k < 2; k++) - ASSERT_TRUE(__test_merge_existing(i, j, k)); - - return true; -} - -static bool test_anon_vma_non_mergeable(void) -{ - vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE; - struct mm_struct mm = {}; - VMA_ITERATOR(vmi, &mm, 0); - struct vm_area_struct *vma, *vma_prev, *vma_next; - struct vma_merge_struct vmg = { - .mm = &mm, - .vmi = &vmi, - }; - struct anon_vma_chain dummy_anon_vma_chain_1 = {}; - struct anon_vma_chain dummy_anon_vma_chain_2 = {}; - struct anon_vma dummy_anon_vma_2; - - /* - * In the case of modified VMA merge, merging both left and right VMAs - * but where prev and next have incompatible anon_vma objects, we revert - * to a merge of prev and VMA: - * - * <--> - * 0123456789 - * PPPVVVVNNN - * -> - * 0123456789 - * PPPPPPPNNN - */ - vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags); - vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, vm_flags); - vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, vm_flags); - - /* - * Give both prev and next single anon_vma_chain fields, so they will - * merge with the NULL vmg->anon_vma. - * - * However, when prev is compared to next, the merge should fail. - */ - vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, vm_flags, NULL); - vmg.prev = vma_prev; - vmg.middle = vma; - vma_set_dummy_anon_vma(vma_prev, &dummy_anon_vma_chain_1); - __vma_set_dummy_anon_vma(vma_next, &dummy_anon_vma_chain_2, &dummy_anon_vma_2); - - ASSERT_EQ(merge_existing(&vmg), vma_prev); - ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); - ASSERT_EQ(vma_prev->vm_start, 0); - ASSERT_EQ(vma_prev->vm_end, 0x7000); - ASSERT_EQ(vma_prev->vm_pgoff, 0); - ASSERT_TRUE(vma_write_started(vma_prev)); - ASSERT_FALSE(vma_write_started(vma_next)); - - /* Clear down and reset. */ - ASSERT_EQ(cleanup_mm(&mm, &vmi), 2); - - /* - * Now consider the new VMA case. This is equivalent, only adding a new - * VMA in a gap between prev and next. - * - * <--> - * 0123456789 - * PPP****NNN - * -> - * 0123456789 - * PPPPPPPNNN - */ - vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags); - vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, vm_flags); - - vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, vm_flags, NULL); - vmg.prev = vma_prev; - vma_set_dummy_anon_vma(vma_prev, &dummy_anon_vma_chain_1); - __vma_set_dummy_anon_vma(vma_next, &dummy_anon_vma_chain_2, &dummy_anon_vma_2); - - vmg.anon_vma = NULL; - ASSERT_EQ(merge_new(&vmg), vma_prev); - ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); - ASSERT_EQ(vma_prev->vm_start, 0); - ASSERT_EQ(vma_prev->vm_end, 0x7000); - ASSERT_EQ(vma_prev->vm_pgoff, 0); - ASSERT_TRUE(vma_write_started(vma_prev)); - ASSERT_FALSE(vma_write_started(vma_next)); - - /* Final cleanup. */ - ASSERT_EQ(cleanup_mm(&mm, &vmi), 2); - - return true; -} - -static bool test_dup_anon_vma(void) -{ - vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE; - struct mm_struct mm = {}; - VMA_ITERATOR(vmi, &mm, 0); - struct vma_merge_struct vmg = { - .mm = &mm, - .vmi = &vmi, - }; - struct anon_vma_chain dummy_anon_vma_chain = { - .anon_vma = &dummy_anon_vma, - }; - struct vm_area_struct *vma_prev, *vma_next, *vma; - - reset_dummy_anon_vma(); - - /* - * Expanding a VMA delete the next one duplicates next's anon_vma and - * assigns it to the expanded VMA. - * - * This covers new VMA merging, as these operations amount to a VMA - * expand. - */ - vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags); - vma_next = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags); - vma_next->anon_vma = &dummy_anon_vma; - - vmg_set_range(&vmg, 0, 0x5000, 0, vm_flags); - vmg.target = vma_prev; - vmg.next = vma_next; - - ASSERT_EQ(expand_existing(&vmg), 0); - - /* Will have been cloned. */ - ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma); - ASSERT_TRUE(vma_prev->anon_vma->was_cloned); - - /* Cleanup ready for next run. */ - cleanup_mm(&mm, &vmi); - - /* - * next has anon_vma, we assign to prev. - * - * |<----->| - * |-------*********-------| - * prev vma next - * extend delete delete - */ - - vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags); - vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags); - vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, vm_flags); - - /* Initialise avc so mergeability check passes. */ - INIT_LIST_HEAD(&vma_next->anon_vma_chain); - list_add(&dummy_anon_vma_chain.same_vma, &vma_next->anon_vma_chain); - - vma_next->anon_vma = &dummy_anon_vma; - vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags); - vmg.prev = vma_prev; - vmg.middle = vma; - - ASSERT_EQ(merge_existing(&vmg), vma_prev); - ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); - - ASSERT_EQ(vma_prev->vm_start, 0); - ASSERT_EQ(vma_prev->vm_end, 0x8000); - - ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma); - ASSERT_TRUE(vma_prev->anon_vma->was_cloned); - - cleanup_mm(&mm, &vmi); - - /* - * vma has anon_vma, we assign to prev. - * - * |<----->| - * |-------*********-------| - * prev vma next - * extend delete delete - */ - - vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags); - vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags); - vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, vm_flags); - vmg.anon_vma = &dummy_anon_vma; - vma_set_dummy_anon_vma(vma, &dummy_anon_vma_chain); - vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags); - vmg.prev = vma_prev; - vmg.middle = vma; - - ASSERT_EQ(merge_existing(&vmg), vma_prev); - ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); - - ASSERT_EQ(vma_prev->vm_start, 0); - ASSERT_EQ(vma_prev->vm_end, 0x8000); - - ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma); - ASSERT_TRUE(vma_prev->anon_vma->was_cloned); - - cleanup_mm(&mm, &vmi); - - /* - * vma has anon_vma, we assign to prev. - * - * |<----->| - * |-------************* - * prev vma - * extend shrink/delete - */ - - vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags); - vma = alloc_and_link_vma(&mm, 0x3000, 0x8000, 3, vm_flags); - - vma_set_dummy_anon_vma(vma, &dummy_anon_vma_chain); - vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags); - vmg.prev = vma_prev; - vmg.middle = vma; - - ASSERT_EQ(merge_existing(&vmg), vma_prev); - ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); - - ASSERT_EQ(vma_prev->vm_start, 0); - ASSERT_EQ(vma_prev->vm_end, 0x5000); - - ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma); - ASSERT_TRUE(vma_prev->anon_vma->was_cloned); - - cleanup_mm(&mm, &vmi); - - /* - * vma has anon_vma, we assign to next. - * - * |<----->| - * *************-------| - * vma next - * shrink/delete extend - */ - - vma = alloc_and_link_vma(&mm, 0, 0x5000, 0, vm_flags); - vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, vm_flags); - - vma_set_dummy_anon_vma(vma, &dummy_anon_vma_chain); - vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags); - vmg.prev = vma; - vmg.middle = vma; - - ASSERT_EQ(merge_existing(&vmg), vma_next); - ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); - - ASSERT_EQ(vma_next->vm_start, 0x3000); - ASSERT_EQ(vma_next->vm_end, 0x8000); - - ASSERT_EQ(vma_next->anon_vma, &dummy_anon_vma); - ASSERT_TRUE(vma_next->anon_vma->was_cloned); - - cleanup_mm(&mm, &vmi); - return true; -} - -static bool test_vmi_prealloc_fail(void) -{ - vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE; - struct mm_struct mm = {}; - VMA_ITERATOR(vmi, &mm, 0); - struct vma_merge_struct vmg = { - .mm = &mm, - .vmi = &vmi, - }; - struct anon_vma_chain avc = {}; - struct vm_area_struct *vma_prev, *vma; - - /* - * We are merging vma into prev, with vma possessing an anon_vma, which - * will be duplicated. We cause the vmi preallocation to fail and assert - * the duplicated anon_vma is unlinked. - */ - - vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags); - vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags); - vma->anon_vma = &dummy_anon_vma; - - vmg_set_range_anon_vma(&vmg, 0x3000, 0x5000, 3, vm_flags, &dummy_anon_vma); - vmg.prev = vma_prev; - vmg.middle = vma; - vma_set_dummy_anon_vma(vma, &avc); - - fail_prealloc = true; - - /* This will cause the merge to fail. */ - ASSERT_EQ(merge_existing(&vmg), NULL); - ASSERT_EQ(vmg.state, VMA_MERGE_ERROR_NOMEM); - /* We will already have assigned the anon_vma. */ - ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma); - /* And it was both cloned and unlinked. */ - ASSERT_TRUE(dummy_anon_vma.was_cloned); - ASSERT_TRUE(dummy_anon_vma.was_unlinked); - - cleanup_mm(&mm, &vmi); /* Resets fail_prealloc too. */ - - /* - * We repeat the same operation for expanding a VMA, which is what new - * VMA merging ultimately uses too. This asserts that unlinking is - * performed in this case too. - */ - - vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags); - vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags); - vma->anon_vma = &dummy_anon_vma; - - vmg_set_range(&vmg, 0, 0x5000, 3, vm_flags); - vmg.target = vma_prev; - vmg.next = vma; - - fail_prealloc = true; - ASSERT_EQ(expand_existing(&vmg), -ENOMEM); - ASSERT_EQ(vmg.state, VMA_MERGE_ERROR_NOMEM); - - ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma); - ASSERT_TRUE(dummy_anon_vma.was_cloned); - ASSERT_TRUE(dummy_anon_vma.was_unlinked); - - cleanup_mm(&mm, &vmi); - return true; -} - -static bool test_merge_extend(void) -{ - vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE; - struct mm_struct mm = {}; - VMA_ITERATOR(vmi, &mm, 0x1000); - struct vm_area_struct *vma; - - vma = alloc_and_link_vma(&mm, 0, 0x1000, 0, vm_flags); - alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, vm_flags); - - /* - * Extend a VMA into the gap between itself and the following VMA. - * This should result in a merge. - * - * <-> - * * * - * - */ - - ASSERT_EQ(vma_merge_extend(&vmi, vma, 0x2000), vma); - ASSERT_EQ(vma->vm_start, 0); - ASSERT_EQ(vma->vm_end, 0x4000); - ASSERT_EQ(vma->vm_pgoff, 0); - ASSERT_TRUE(vma_write_started(vma)); - ASSERT_EQ(mm.map_count, 1); - - cleanup_mm(&mm, &vmi); - return true; -} - -static bool test_copy_vma(void) -{ - vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE; - struct mm_struct mm = {}; - bool need_locks = false; - VMA_ITERATOR(vmi, &mm, 0); - struct vm_area_struct *vma, *vma_new, *vma_next; - - /* Move backwards and do not merge. */ - - vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags); - vma_new = copy_vma(&vma, 0, 0x2000, 0, &need_locks); - ASSERT_NE(vma_new, vma); - ASSERT_EQ(vma_new->vm_start, 0); - ASSERT_EQ(vma_new->vm_end, 0x2000); - ASSERT_EQ(vma_new->vm_pgoff, 0); - vma_assert_attached(vma_new); - - cleanup_mm(&mm, &vmi); - - /* Move a VMA into position next to another and merge the two. */ - - vma = alloc_and_link_vma(&mm, 0, 0x2000, 0, vm_flags); - vma_next = alloc_and_link_vma(&mm, 0x6000, 0x8000, 6, vm_flags); - vma_new = copy_vma(&vma, 0x4000, 0x2000, 4, &need_locks); - vma_assert_attached(vma_new); - - ASSERT_EQ(vma_new, vma_next); - - cleanup_mm(&mm, &vmi); - return true; -} - -static bool test_expand_only_mode(void) -{ - vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE; - struct mm_struct mm = {}; - VMA_ITERATOR(vmi, &mm, 0); - struct vm_area_struct *vma_prev, *vma; - VMG_STATE(vmg, &mm, &vmi, 0x5000, 0x9000, vm_flags, 5); - - /* - * Place a VMA prior to the one we're expanding so we assert that we do - * not erroneously try to traverse to the previous VMA even though we - * have, through the use of the just_expand flag, indicated we do not - * need to do so. - */ - alloc_and_link_vma(&mm, 0, 0x2000, 0, vm_flags); - - /* - * We will be positioned at the prev VMA, but looking to expand to - * 0x9000. - */ - vma_iter_set(&vmi, 0x3000); - vma_prev = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags); - vmg.prev = vma_prev; - vmg.just_expand = true; - - vma = vma_merge_new_range(&vmg); - ASSERT_NE(vma, NULL); - ASSERT_EQ(vma, vma_prev); - ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); - ASSERT_EQ(vma->vm_start, 0x3000); - ASSERT_EQ(vma->vm_end, 0x9000); - ASSERT_EQ(vma->vm_pgoff, 3); - ASSERT_TRUE(vma_write_started(vma)); - ASSERT_EQ(vma_iter_addr(&vmi), 0x3000); - vma_assert_attached(vma); - - cleanup_mm(&mm, &vmi); - return true; -} - -static bool test_mmap_region_basic(void) -{ - struct mm_struct mm = {}; - unsigned long addr; - struct vm_area_struct *vma; - VMA_ITERATOR(vmi, &mm, 0); - - current->mm = &mm; - - /* Map at 0x300000, length 0x3000. */ - addr = __mmap_region(NULL, 0x300000, 0x3000, - VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE, - 0x300, NULL); - ASSERT_EQ(addr, 0x300000); - - /* Map at 0x250000, length 0x3000. */ - addr = __mmap_region(NULL, 0x250000, 0x3000, - VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE, - 0x250, NULL); - ASSERT_EQ(addr, 0x250000); - - /* Map at 0x303000, merging to 0x300000 of length 0x6000. */ - addr = __mmap_region(NULL, 0x303000, 0x3000, - VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE, - 0x303, NULL); - ASSERT_EQ(addr, 0x303000); - - /* Map at 0x24d000, merging to 0x250000 of length 0x6000. */ - addr = __mmap_region(NULL, 0x24d000, 0x3000, - VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE, - 0x24d, NULL); - ASSERT_EQ(addr, 0x24d000); - - ASSERT_EQ(mm.map_count, 2); - - for_each_vma(vmi, vma) { - if (vma->vm_start == 0x300000) { - ASSERT_EQ(vma->vm_end, 0x306000); - ASSERT_EQ(vma->vm_pgoff, 0x300); - } else if (vma->vm_start == 0x24d000) { - ASSERT_EQ(vma->vm_end, 0x253000); - ASSERT_EQ(vma->vm_pgoff, 0x24d); - } else { - ASSERT_FALSE(true); - } - } - - cleanup_mm(&mm, &vmi); - return true; -} - -int main(void) -{ - int num_tests = 0, num_fail = 0; - - maple_tree_init(); - vma_state_init(); - -#define TEST(name) \ - do { \ - num_tests++; \ - if (!test_##name()) { \ - num_fail++; \ - fprintf(stderr, "Test " #name " FAILED\n"); \ - } \ - } while (0) - - /* Very simple tests to kick the tyres. */ - TEST(simple_merge); - TEST(simple_modify); - TEST(simple_expand); - TEST(simple_shrink); - - TEST(merge_new); - TEST(vma_merge_special_flags); - TEST(vma_merge_with_close); - TEST(vma_merge_new_with_close); - TEST(merge_existing); - TEST(anon_vma_non_mergeable); - TEST(dup_anon_vma); - TEST(vmi_prealloc_fail); - TEST(merge_extend); - TEST(copy_vma); - TEST(expand_only_mode); - - TEST(mmap_region_basic); - -#undef TEST - - printf("%d tests run, %d passed, %d failed.\n", - num_tests, num_tests - num_fail, num_fail); - - return num_fail == 0 ? EXIT_SUCCESS : EXIT_FAILURE; -} diff --git a/tools/testing/vma/vma_internal.h b/tools/testing/vma/vma_internal.h index 2743f12ecf32..b48ebae3927d 100644 --- a/tools/testing/vma/vma_internal.h +++ b/tools/testing/vma/vma_internal.h @@ -1127,15 +1127,6 @@ static inline void mapping_allow_writable(struct address_space *mapping) atomic_inc(&mapping->i_mmap_writable); } -static inline void vma_set_range(struct vm_area_struct *vma, - unsigned long start, unsigned long end, - pgoff_t pgoff) -{ - vma->vm_start = start; - vma->vm_end = end; - vma->vm_pgoff = pgoff; -} - static inline struct vm_area_struct *vma_find(struct vma_iterator *vmi, unsigned long max) { -- cgit v1.2.3