summaryrefslogtreecommitdiff
path: root/tools/testing/selftests/kvm/lib/kvm_util.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-11-18 12:05:22 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2021-11-18 12:05:22 -0800
commitc46e8ece9613b18d9554e2382a228b6e1795288d (patch)
treee5c999b1b426433057dbe348cc21e841434f9482 /tools/testing/selftests/kvm/lib/kvm_util.c
parent4ae275bc6d2fb2b399a8723fc0a0be0929e10e0d (diff)
parent2845e7353bc334d43309f5ea6d376c8fdbc94c93 (diff)
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM fixes from Paolo Bonzini: "Selftest changes: - Cleanups for the perf test infrastructure and mapping hugepages - Avoid contention on mmap_sem when the guests start to run - Add event channel upcall support to xen_shinfo_test x86 changes: - Fixes for Xen emulation - Kill kvm_map_gfn() / kvm_unmap_gfn() and broken gfn_to_pfn_cache - Fixes for migration of 32-bit nested guests on 64-bit hypervisor - Compilation fixes - More SEV cleanups Generic: - Cap the return value of KVM_CAP_NR_VCPUS to both KVM_CAP_MAX_VCPUS and num_online_cpus(). Most architectures were only using one of the two" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (42 commits) KVM: x86: Cap KVM_CAP_NR_VCPUS by KVM_CAP_MAX_VCPUS KVM: s390: Cap KVM_CAP_NR_VCPUS by num_online_cpus() KVM: RISC-V: Cap KVM_CAP_NR_VCPUS by KVM_CAP_MAX_VCPUS KVM: PPC: Cap KVM_CAP_NR_VCPUS by KVM_CAP_MAX_VCPUS KVM: MIPS: Cap KVM_CAP_NR_VCPUS by KVM_CAP_MAX_VCPUS KVM: arm64: Cap KVM_CAP_NR_VCPUS by kvm_arm_default_max_vcpus() KVM: x86: Assume a 64-bit hypercall for guests with protected state selftests: KVM: Add /x86_64/sev_migrate_tests to .gitignore riscv: kvm: fix non-kernel-doc comment block KVM: SEV: Fix typo in and tweak name of cmd_allowed_from_miror() KVM: SEV: Drop a redundant setting of sev->asid during initialization KVM: SEV: WARN if SEV-ES is marked active but SEV is not KVM: SEV: Set sev_info.active after initial checks in sev_guest_init() KVM: SEV: Disallow COPY_ENC_CONTEXT_FROM if target has created vCPUs KVM: Kill kvm_map_gfn() / kvm_unmap_gfn() and gfn_to_pfn_cache KVM: nVMX: Use a gfn_to_hva_cache for vmptrld KVM: nVMX: Use kvm_read_guest_offset_cached() for nested VMCS check KVM: x86/xen: Use sizeof_field() instead of open-coding it KVM: nVMX: Use kvm_{read,write}_guest_cached() for shadow_vmcs12 KVM: x86/xen: Fix get_attr of KVM_XEN_ATTR_TYPE_SHARED_INFO ...
Diffstat (limited to 'tools/testing/selftests/kvm/lib/kvm_util.c')
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c44
1 files changed, 24 insertions, 20 deletions
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index 14bb4d5b6bb7..8f2e0bb1ef96 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -22,15 +22,6 @@
static int vcpu_mmap_sz(void);
-/* Aligns x up to the next multiple of size. Size must be a power of 2. */
-static void *align(void *x, size_t size)
-{
- size_t mask = size - 1;
- TEST_ASSERT(size != 0 && !(size & (size - 1)),
- "size not a power of 2: %lu", size);
- return (void *) (((size_t) x + mask) & ~mask);
-}
-
int open_path_or_exit(const char *path, int flags)
{
int fd;
@@ -191,15 +182,15 @@ const char *vm_guest_mode_string(uint32_t i)
}
const struct vm_guest_mode_params vm_guest_mode_params[] = {
- { 52, 48, 0x1000, 12 },
- { 52, 48, 0x10000, 16 },
- { 48, 48, 0x1000, 12 },
- { 48, 48, 0x10000, 16 },
- { 40, 48, 0x1000, 12 },
- { 40, 48, 0x10000, 16 },
- { 0, 0, 0x1000, 12 },
- { 47, 64, 0x1000, 12 },
- { 44, 64, 0x1000, 12 },
+ [VM_MODE_P52V48_4K] = { 52, 48, 0x1000, 12 },
+ [VM_MODE_P52V48_64K] = { 52, 48, 0x10000, 16 },
+ [VM_MODE_P48V48_4K] = { 48, 48, 0x1000, 12 },
+ [VM_MODE_P48V48_64K] = { 48, 48, 0x10000, 16 },
+ [VM_MODE_P40V48_4K] = { 40, 48, 0x1000, 12 },
+ [VM_MODE_P40V48_64K] = { 40, 48, 0x10000, 16 },
+ [VM_MODE_PXXV48_4K] = { 0, 0, 0x1000, 12 },
+ [VM_MODE_P47V64_4K] = { 47, 64, 0x1000, 12 },
+ [VM_MODE_P44V64_4K] = { 44, 64, 0x1000, 12 },
};
_Static_assert(sizeof(vm_guest_mode_params)/sizeof(struct vm_guest_mode_params) == NUM_VM_MODES,
"Missing new mode params?");
@@ -879,9 +870,17 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
alignment = 1;
#endif
+ /*
+ * When using THP mmap is not guaranteed to returned a hugepage aligned
+ * address so we have to pad the mmap. Padding is not needed for HugeTLB
+ * because mmap will always return an address aligned to the HugeTLB
+ * page size.
+ */
if (src_type == VM_MEM_SRC_ANONYMOUS_THP)
alignment = max(backing_src_pagesz, alignment);
+ ASSERT_EQ(guest_paddr, align_up(guest_paddr, backing_src_pagesz));
+
/* Add enough memory to align up if necessary */
if (alignment > 1)
region->mmap_size += alignment;
@@ -914,8 +913,13 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
"test_malloc failed, mmap_start: %p errno: %i",
region->mmap_start, errno);
+ TEST_ASSERT(!is_backing_src_hugetlb(src_type) ||
+ region->mmap_start == align_ptr_up(region->mmap_start, backing_src_pagesz),
+ "mmap_start %p is not aligned to HugeTLB page size 0x%lx",
+ region->mmap_start, backing_src_pagesz);
+
/* Align host address */
- region->host_mem = align(region->mmap_start, alignment);
+ region->host_mem = align_ptr_up(region->mmap_start, alignment);
/* As needed perform madvise */
if ((src_type == VM_MEM_SRC_ANONYMOUS ||
@@ -958,7 +962,7 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
"mmap of alias failed, errno: %i", errno);
/* Align host alias address */
- region->host_alias = align(region->mmap_alias, alignment);
+ region->host_alias = align_ptr_up(region->mmap_alias, alignment);
}
}