summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--tools/testing/selftests/kvm/dirty_log_test.c13
-rw-r--r--tools/testing/selftests/kvm/include/kvm_util.h8
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c37
3 files changed, 51 insertions, 7 deletions
diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c
index e0f3337dfccb..edc5c071bf02 100644
--- a/tools/testing/selftests/kvm/dirty_log_test.c
+++ b/tools/testing/selftests/kvm/dirty_log_test.c
@@ -178,12 +178,11 @@ static void *vcpu_worker(void *data)
return NULL;
}
-static void vm_dirty_log_verify(unsigned long *bmap)
+static void vm_dirty_log_verify(enum vm_guest_mode mode, unsigned long *bmap)
{
+ uint64_t step = vm_num_host_pages(mode, 1);
uint64_t page;
uint64_t *value_ptr;
- uint64_t step = host_page_size >= guest_page_size ? 1 :
- guest_page_size / host_page_size;
for (page = 0; page < host_num_pages; page += step) {
value_ptr = host_test_mem + page * host_page_size;
@@ -289,14 +288,14 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
* case where the size is not aligned to 64 pages.
*/
guest_num_pages = (1ul << (DIRTY_MEM_BITS -
- vm_get_page_shift(vm))) + 16;
+ vm_get_page_shift(vm))) + 3;
+ guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
#ifdef __s390x__
/* Round up to multiple of 1M (segment size) */
guest_num_pages = (guest_num_pages + 0xff) & ~0xffUL;
#endif
host_page_size = getpagesize();
- host_num_pages = (guest_num_pages * guest_page_size) / host_page_size +
- !!((guest_num_pages * guest_page_size) % host_page_size);
+ host_num_pages = vm_num_host_pages(mode, guest_num_pages);
if (!phys_offset) {
guest_test_phys_mem = (vm_get_max_gfn(vm) -
@@ -367,7 +366,7 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
kvm_vm_clear_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap, 0,
host_num_pages);
#endif
- vm_dirty_log_verify(bmap);
+ vm_dirty_log_verify(mode, bmap);
iteration++;
sync_global_to_guest(vm, iteration);
}
diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h
index ae0d14c2540a..1dc13bfa88b7 100644
--- a/tools/testing/selftests/kvm/include/kvm_util.h
+++ b/tools/testing/selftests/kvm/include/kvm_util.h
@@ -164,6 +164,14 @@ unsigned int vm_get_page_size(struct kvm_vm *vm);
unsigned int vm_get_page_shift(struct kvm_vm *vm);
unsigned int vm_get_max_gfn(struct kvm_vm *vm);
+unsigned int vm_num_host_pages(enum vm_guest_mode mode, unsigned int num_guest_pages);
+unsigned int vm_num_guest_pages(enum vm_guest_mode mode, unsigned int num_host_pages);
+static inline unsigned int
+vm_adjust_num_guest_pages(enum vm_guest_mode mode, unsigned int num_guest_pages)
+{
+ return vm_num_guest_pages(mode, vm_num_host_pages(mode, num_guest_pages));
+}
+
struct kvm_userspace_memory_region *
kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start,
uint64_t end);
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index 1b133583d6c7..67f5dc9a6a32 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -580,6 +580,10 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
size_t huge_page_size = KVM_UTIL_PGS_PER_HUGEPG * vm->page_size;
size_t alignment;
+ TEST_ASSERT(vm_adjust_num_guest_pages(vm->mode, npages) == npages,
+ "Number of guest pages is not compatible with the host. "
+ "Try npages=%d", vm_adjust_num_guest_pages(vm->mode, npages));
+
TEST_ASSERT((guest_paddr % vm->page_size) == 0, "Guest physical "
"address not on a page boundary.\n"
" guest_paddr: 0x%lx vm->page_size: 0x%x",
@@ -1701,3 +1705,36 @@ unsigned int vm_get_max_gfn(struct kvm_vm *vm)
{
return vm->max_gfn;
}
+
+static unsigned int vm_calc_num_pages(unsigned int num_pages,
+ unsigned int page_shift,
+ unsigned int new_page_shift,
+ bool ceil)
+{
+ unsigned int n = 1 << (new_page_shift - page_shift);
+
+ if (page_shift >= new_page_shift)
+ return num_pages * (1 << (page_shift - new_page_shift));
+
+ return num_pages / n + !!(ceil && num_pages % n);
+}
+
+static inline int getpageshift(void)
+{
+ return __builtin_ffs(getpagesize()) - 1;
+}
+
+unsigned int
+vm_num_host_pages(enum vm_guest_mode mode, unsigned int num_guest_pages)
+{
+ return vm_calc_num_pages(num_guest_pages,
+ vm_guest_mode_params[mode].page_shift,
+ getpageshift(), true);
+}
+
+unsigned int
+vm_num_guest_pages(enum vm_guest_mode mode, unsigned int num_host_pages)
+{
+ return vm_calc_num_pages(num_host_pages, getpageshift(),
+ vm_guest_mode_params[mode].page_shift, false);
+}