diff options
| -rw-r--r-- | arch/powerpc/include/asm/kvm_host.h | 12 | ||||
| -rw-r--r-- | arch/powerpc/kvm/Kconfig | 9 | ||||
| -rw-r--r-- | arch/powerpc/kvm/book3s_hv.c | 23 | ||||
| -rw-r--r-- | arch/powerpc/kvm/book3s_hv_p9_entry.c | 14 | 
4 files changed, 34 insertions, 24 deletions
| diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index a0f44762a069..eeba679802cf 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -831,11 +831,13 @@ struct kvm_vcpu_arch {  	struct kvmhv_tb_accumulator *cur_activity;	/* What we're timing */  	u64	cur_tb_start;			/* when it started */  #ifdef CONFIG_KVM_BOOK3S_HV_P9_TIMING -	struct kvmhv_tb_accumulator rm_entry;	/* real-mode entry code */ -	struct kvmhv_tb_accumulator rm_intr;	/* real-mode intr handling */ -	struct kvmhv_tb_accumulator rm_exit;	/* real-mode exit code */ -	struct kvmhv_tb_accumulator guest_time;	/* guest execution */ -	struct kvmhv_tb_accumulator cede_time;	/* time napping inside guest */ +	struct kvmhv_tb_accumulator vcpu_entry; +	struct kvmhv_tb_accumulator vcpu_exit; +	struct kvmhv_tb_accumulator in_guest; +	struct kvmhv_tb_accumulator hcall; +	struct kvmhv_tb_accumulator pg_fault; +	struct kvmhv_tb_accumulator guest_entry; +	struct kvmhv_tb_accumulator guest_exit;  #else  	struct kvmhv_tb_accumulator rm_entry;	/* real-mode entry code */  	struct kvmhv_tb_accumulator rm_intr;	/* real-mode intr handling */ diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig index 191347f44731..cedf1e0f50e1 100644 --- a/arch/powerpc/kvm/Kconfig +++ b/arch/powerpc/kvm/Kconfig @@ -135,10 +135,11 @@ config KVM_BOOK3S_HV_P9_TIMING  	select KVM_BOOK3S_HV_EXIT_TIMING  	depends on KVM_BOOK3S_HV_POSSIBLE && DEBUG_FS  	help -	  Calculate time taken for each vcpu in various parts of the -	  code. The total, minimum and maximum times in nanoseconds -	  together with the number of executions are reported in debugfs in -	  kvm/vm#/vcpu#/timings. +	  Calculate time taken for each vcpu during vcpu entry and +	  exit, time spent inside the guest and time spent handling +	  hypercalls and page faults. The total, minimum and maximum +	  times in nanoseconds together with the number of executions +	  are reported in debugfs in kvm/vm#/vcpu#/timings.  	  If unsure, say N. diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 108ee563f72a..c68883170a82 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -2661,11 +2661,13 @@ static struct debugfs_timings_element {  	size_t offset;  } timings[] = {  #ifdef CONFIG_KVM_BOOK3S_HV_P9_TIMING -	{"rm_entry",	offsetof(struct kvm_vcpu, arch.rm_entry)}, -	{"rm_intr",	offsetof(struct kvm_vcpu, arch.rm_intr)}, -	{"rm_exit",	offsetof(struct kvm_vcpu, arch.rm_exit)}, -	{"guest",	offsetof(struct kvm_vcpu, arch.guest_time)}, -	{"cede",	offsetof(struct kvm_vcpu, arch.cede_time)}, +	{"vcpu_entry",	offsetof(struct kvm_vcpu, arch.vcpu_entry)}, +	{"guest_entry",	offsetof(struct kvm_vcpu, arch.guest_entry)}, +	{"in_guest",	offsetof(struct kvm_vcpu, arch.in_guest)}, +	{"guest_exit",	offsetof(struct kvm_vcpu, arch.guest_exit)}, +	{"vcpu_exit",	offsetof(struct kvm_vcpu, arch.vcpu_exit)}, +	{"hypercall",	offsetof(struct kvm_vcpu, arch.hcall)}, +	{"page_fault",	offsetof(struct kvm_vcpu, arch.pg_fault)},  #else  	{"rm_entry",	offsetof(struct kvm_vcpu, arch.rm_entry)},  	{"rm_intr",	offsetof(struct kvm_vcpu, arch.rm_intr)}, @@ -4014,8 +4016,10 @@ static int kvmhv_vcpu_entry_p9_nested(struct kvm_vcpu *vcpu, u64 time_limit, uns  	mtspr(SPRN_DAR, vcpu->arch.shregs.dar);  	mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr);  	switch_pmu_to_guest(vcpu, &host_os_sprs); +	accumulate_time(vcpu, &vcpu->arch.in_guest);  	trap = plpar_hcall_norets(H_ENTER_NESTED, __pa(&hvregs),  				  __pa(&vcpu->arch.regs)); +	accumulate_time(vcpu, &vcpu->arch.guest_exit);  	kvmhv_restore_hv_return_state(vcpu, &hvregs);  	switch_pmu_to_host(vcpu, &host_os_sprs);  	vcpu->arch.shregs.msr = vcpu->arch.regs.msr; @@ -4703,6 +4707,8 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)  	struct kvm *kvm;  	unsigned long msr; +	start_timing(vcpu, &vcpu->arch.vcpu_entry); +  	if (!vcpu->arch.sane) {  		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;  		return -EINVAL; @@ -4768,6 +4774,7 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)  	vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;  	do { +		accumulate_time(vcpu, &vcpu->arch.guest_entry);  		if (cpu_has_feature(CPU_FTR_ARCH_300))  			r = kvmhv_run_single_vcpu(vcpu, ~(u64)0,  						  vcpu->arch.vcore->lpcr); @@ -4775,6 +4782,8 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)  			r = kvmppc_run_vcpu(vcpu);  		if (run->exit_reason == KVM_EXIT_PAPR_HCALL) { +			accumulate_time(vcpu, &vcpu->arch.hcall); +  			if (WARN_ON_ONCE(vcpu->arch.shregs.msr & MSR_PR)) {  				/*  				 * These should have been caught reflected @@ -4790,6 +4799,7 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)  			trace_kvm_hcall_exit(vcpu, r);  			kvmppc_core_prepare_to_enter(vcpu);  		} else if (r == RESUME_PAGE_FAULT) { +			accumulate_time(vcpu, &vcpu->arch.pg_fault);  			srcu_idx = srcu_read_lock(&kvm->srcu);  			r = kvmppc_book3s_hv_page_fault(vcpu,  				vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); @@ -4801,12 +4811,15 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)  				r = kvmppc_xics_rm_complete(vcpu, 0);  		}  	} while (is_kvmppc_resume_guest(r)); +	accumulate_time(vcpu, &vcpu->arch.vcpu_exit);  	vcpu->arch.state = KVMPPC_VCPU_NOTREADY;  	atomic_dec(&kvm->arch.vcpus_running);  	srr_regs_clobbered(); +	end_timing(vcpu); +  	return r;  } diff --git a/arch/powerpc/kvm/book3s_hv_p9_entry.c b/arch/powerpc/kvm/book3s_hv_p9_entry.c index 32e078db8da2..e740eca45862 100644 --- a/arch/powerpc/kvm/book3s_hv_p9_entry.c +++ b/arch/powerpc/kvm/book3s_hv_p9_entry.c @@ -779,8 +779,6 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc  	WARN_ON_ONCE(vcpu->arch.shregs.msr & MSR_HV);  	WARN_ON_ONCE(!(vcpu->arch.shregs.msr & MSR_ME)); -	start_timing(vcpu, &vcpu->arch.rm_entry); -  	vcpu->arch.ceded = 0;  	/* Save MSR for restore, with EE clear. */ @@ -941,13 +939,13 @@ tm_return_to_guest:  	mtspr(SPRN_SRR0, vcpu->arch.shregs.srr0);  	mtspr(SPRN_SRR1, vcpu->arch.shregs.srr1); -	accumulate_time(vcpu, &vcpu->arch.guest_time); -  	switch_pmu_to_guest(vcpu, &host_os_sprs); +	accumulate_time(vcpu, &vcpu->arch.in_guest); +  	kvmppc_p9_enter_guest(vcpu); -	switch_pmu_to_host(vcpu, &host_os_sprs); -	accumulate_time(vcpu, &vcpu->arch.rm_intr); +	accumulate_time(vcpu, &vcpu->arch.guest_exit); +	switch_pmu_to_host(vcpu, &host_os_sprs);  	/* XXX: Could get these from r11/12 and paca exsave instead */  	vcpu->arch.shregs.srr0 = mfspr(SPRN_SRR0); @@ -1042,8 +1040,6 @@ tm_return_to_guest:  #endif  	} -	accumulate_time(vcpu, &vcpu->arch.rm_exit); -  	/* Advance host PURR/SPURR by the amount used by guest */  	purr = mfspr(SPRN_PURR);  	spurr = mfspr(SPRN_SPURR); @@ -1150,8 +1146,6 @@ tm_return_to_guest:  		asm volatile(PPC_CP_ABORT);  out: -	end_timing(vcpu); -  	return trap;  }  EXPORT_SYMBOL_GPL(kvmhv_vcpu_entry_p9); | 
