diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2021-11-18 12:05:22 -0800 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-11-18 12:05:22 -0800 |
| commit | c46e8ece9613b18d9554e2382a228b6e1795288d (patch) | |
| tree | e5c999b1b426433057dbe348cc21e841434f9482 /tools/testing/selftests/kvm/demand_paging_test.c | |
| parent | 4ae275bc6d2fb2b399a8723fc0a0be0929e10e0d (diff) | |
| parent | 2845e7353bc334d43309f5ea6d376c8fdbc94c93 (diff) | |
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM fixes from Paolo Bonzini:
"Selftest changes:
- Cleanups for the perf test infrastructure and mapping hugepages
- Avoid contention on mmap_sem when the guests start to run
- Add event channel upcall support to xen_shinfo_test
x86 changes:
- Fixes for Xen emulation
- Kill kvm_map_gfn() / kvm_unmap_gfn() and broken gfn_to_pfn_cache
- Fixes for migration of 32-bit nested guests on 64-bit hypervisor
- Compilation fixes
- More SEV cleanups
Generic:
- Cap the return value of KVM_CAP_NR_VCPUS to both KVM_CAP_MAX_VCPUS
and num_online_cpus(). Most architectures were only using one of
the two"
* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (42 commits)
KVM: x86: Cap KVM_CAP_NR_VCPUS by KVM_CAP_MAX_VCPUS
KVM: s390: Cap KVM_CAP_NR_VCPUS by num_online_cpus()
KVM: RISC-V: Cap KVM_CAP_NR_VCPUS by KVM_CAP_MAX_VCPUS
KVM: PPC: Cap KVM_CAP_NR_VCPUS by KVM_CAP_MAX_VCPUS
KVM: MIPS: Cap KVM_CAP_NR_VCPUS by KVM_CAP_MAX_VCPUS
KVM: arm64: Cap KVM_CAP_NR_VCPUS by kvm_arm_default_max_vcpus()
KVM: x86: Assume a 64-bit hypercall for guests with protected state
selftests: KVM: Add /x86_64/sev_migrate_tests to .gitignore
riscv: kvm: fix non-kernel-doc comment block
KVM: SEV: Fix typo in and tweak name of cmd_allowed_from_miror()
KVM: SEV: Drop a redundant setting of sev->asid during initialization
KVM: SEV: WARN if SEV-ES is marked active but SEV is not
KVM: SEV: Set sev_info.active after initial checks in sev_guest_init()
KVM: SEV: Disallow COPY_ENC_CONTEXT_FROM if target has created vCPUs
KVM: Kill kvm_map_gfn() / kvm_unmap_gfn() and gfn_to_pfn_cache
KVM: nVMX: Use a gfn_to_hva_cache for vmptrld
KVM: nVMX: Use kvm_read_guest_offset_cached() for nested VMCS check
KVM: x86/xen: Use sizeof_field() instead of open-coding it
KVM: nVMX: Use kvm_{read,write}_guest_cached() for shadow_vmcs12
KVM: x86/xen: Fix get_attr of KVM_XEN_ATTR_TYPE_SHARED_INFO
...
Diffstat (limited to 'tools/testing/selftests/kvm/demand_paging_test.c')
| -rw-r--r-- | tools/testing/selftests/kvm/demand_paging_test.c | 56 |
1 files changed, 9 insertions, 47 deletions
diff --git a/tools/testing/selftests/kvm/demand_paging_test.c b/tools/testing/selftests/kvm/demand_paging_test.c index 1510b21e6306..6a719d065599 100644 --- a/tools/testing/selftests/kvm/demand_paging_test.c +++ b/tools/testing/selftests/kvm/demand_paging_test.c @@ -42,10 +42,9 @@ static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE; static size_t demand_paging_size; static char *guest_data_prototype; -static void *vcpu_worker(void *data) +static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args) { int ret; - struct perf_test_vcpu_args *vcpu_args = (struct perf_test_vcpu_args *)data; int vcpu_id = vcpu_args->vcpu_id; struct kvm_vm *vm = perf_test_args.vm; struct kvm_run *run; @@ -68,8 +67,6 @@ static void *vcpu_worker(void *data) ts_diff = timespec_elapsed(start); PER_VCPU_DEBUG("vCPU %d execution time: %ld.%.9lds\n", vcpu_id, ts_diff.tv_sec, ts_diff.tv_nsec); - - return NULL; } static int handle_uffd_page_request(int uffd_mode, int uffd, uint64_t addr) @@ -282,7 +279,6 @@ struct test_params { static void run_test(enum vm_guest_mode mode, void *arg) { struct test_params *p = arg; - pthread_t *vcpu_threads; pthread_t *uffd_handler_threads = NULL; struct uffd_handler_args *uffd_args = NULL; struct timespec start; @@ -293,9 +289,7 @@ static void run_test(enum vm_guest_mode mode, void *arg) int r; vm = perf_test_create_vm(mode, nr_vcpus, guest_percpu_mem_size, 1, - p->src_type); - - perf_test_args.wr_fract = 1; + p->src_type, p->partition_vcpu_memory_access); demand_paging_size = get_backing_src_pagesz(p->src_type); @@ -304,12 +298,6 @@ static void run_test(enum vm_guest_mode mode, void *arg) "Failed to allocate buffer for guest data pattern"); memset(guest_data_prototype, 0xAB, demand_paging_size); - vcpu_threads = malloc(nr_vcpus * sizeof(*vcpu_threads)); - TEST_ASSERT(vcpu_threads, "Memory allocation failed"); - - perf_test_setup_vcpus(vm, nr_vcpus, guest_percpu_mem_size, - p->partition_vcpu_memory_access); - if (p->uffd_mode) { uffd_handler_threads = malloc(nr_vcpus * sizeof(*uffd_handler_threads)); @@ -322,26 +310,15 @@ static void run_test(enum vm_guest_mode mode, void *arg) TEST_ASSERT(pipefds, "Unable to allocate memory for pipefd"); for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) { - vm_paddr_t vcpu_gpa; + struct perf_test_vcpu_args *vcpu_args; void *vcpu_hva; void *vcpu_alias; - uint64_t vcpu_mem_size; - - if (p->partition_vcpu_memory_access) { - vcpu_gpa = guest_test_phys_mem + - (vcpu_id * guest_percpu_mem_size); - vcpu_mem_size = guest_percpu_mem_size; - } else { - vcpu_gpa = guest_test_phys_mem; - vcpu_mem_size = guest_percpu_mem_size * nr_vcpus; - } - PER_VCPU_DEBUG("Added VCPU %d with test mem gpa [%lx, %lx)\n", - vcpu_id, vcpu_gpa, vcpu_gpa + vcpu_mem_size); + vcpu_args = &perf_test_args.vcpu_args[vcpu_id]; /* Cache the host addresses of the region */ - vcpu_hva = addr_gpa2hva(vm, vcpu_gpa); - vcpu_alias = addr_gpa2alias(vm, vcpu_gpa); + vcpu_hva = addr_gpa2hva(vm, vcpu_args->gpa); + vcpu_alias = addr_gpa2alias(vm, vcpu_args->gpa); /* * Set up user fault fd to handle demand paging @@ -355,32 +332,18 @@ static void run_test(enum vm_guest_mode mode, void *arg) pipefds[vcpu_id * 2], p->uffd_mode, p->uffd_delay, &uffd_args[vcpu_id], vcpu_hva, vcpu_alias, - vcpu_mem_size); + vcpu_args->pages * perf_test_args.guest_page_size); } } - /* Export the shared variables to the guest */ - sync_global_to_guest(vm, perf_test_args); - pr_info("Finished creating vCPUs and starting uffd threads\n"); clock_gettime(CLOCK_MONOTONIC, &start); - - for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) { - pthread_create(&vcpu_threads[vcpu_id], NULL, vcpu_worker, - &perf_test_args.vcpu_args[vcpu_id]); - } - + perf_test_start_vcpu_threads(nr_vcpus, vcpu_worker); pr_info("Started all vCPUs\n"); - /* Wait for the vcpu threads to quit */ - for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) { - pthread_join(vcpu_threads[vcpu_id], NULL); - PER_VCPU_DEBUG("Joined thread for vCPU %d\n", vcpu_id); - } - + perf_test_join_vcpu_threads(nr_vcpus); ts_diff = timespec_elapsed(start); - pr_info("All vCPU threads joined\n"); if (p->uffd_mode) { @@ -404,7 +367,6 @@ static void run_test(enum vm_guest_mode mode, void *arg) perf_test_destroy_vm(vm); free(guest_data_prototype); - free(vcpu_threads); if (p->uffd_mode) { free(uffd_handler_threads); free(uffd_args); |
