[Devel,RHEL7,COMMIT] KVM: x86: remaster kvm_write_tsc code

Submitted by Konstantin Khorenko on March 24, 2017, 11:48 a.m.

Details

Message ID 201703241148.v2OBmJmi018178@finist_cl7.x64_64.work.ct
State New
Series "KVM: x86: remaster kvm_write_tsc code"
Headers show

Commit Message

Konstantin Khorenko March 24, 2017, 11:48 a.m.
The commit is pushed to "branch-rh7-3.10.0-514.10.2.vz7.29.x-ovz" and will appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh7-3.10.0-514.10.2.vz7.29.6
------>
commit 3fee40cdf1c893699a16115eec530139ec638f95
Author: Denis Plotnikov <dplotnikov@virtuozzo.com>
Date:   Fri Mar 24 15:48:19 2017 +0400

    KVM: x86: remaster kvm_write_tsc code
    
    Reuse existing code instead of using inline asm.
    Make the code more concise and clear in the TSC
    synchronization part.
    
    Code refactoring prior fixing
    https://jira.sw.ru/browse/PSBM-59784
    
    Signed-off-by: Denis Plotnikov <dplotnikov@virtuozzo.com>
    Reviewed-by: Roman Kagan <rkagan@virtuozzo.com>
---
 arch/x86/kvm/x86.c | 51 ++++++++++++---------------------------------------
 1 file changed, 12 insertions(+), 39 deletions(-)

Patch hide | download patch | download mbox

diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 407ceb2..df4ac8d 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1413,10 +1413,10 @@  void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
 	struct kvm *kvm = vcpu->kvm;
 	u64 offset, ns, elapsed;
 	unsigned long flags;
-	s64 usdiff;
 	bool matched;
 	bool already_matched;
 	u64 data = msr->data;
+	bool synchronizing = false;
 
 	raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
 	offset = kvm_compute_tsc_offset(vcpu, data);
@@ -1424,51 +1424,24 @@  void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
 	elapsed = ns - kvm->arch.last_tsc_nsec;
 
 	if (vcpu->arch.virtual_tsc_khz) {
-		int faulted = 0;
-
-		/* n.b - signed multiplication and division required */
-		usdiff = data - kvm->arch.last_tsc_write;
-#ifdef CONFIG_X86_64
-		usdiff = (usdiff * 1000) / vcpu->arch.virtual_tsc_khz;
-#else
-		/* do_div() only does unsigned */
-		asm("1: idivl %[divisor]\n"
-		    "2: xor %%edx, %%edx\n"
-		    "   movl $0, %[faulted]\n"
-		    "3:\n"
-		    ".section .fixup,\"ax\"\n"
-		    "4: movl $1, %[faulted]\n"
-		    "   jmp  3b\n"
-		    ".previous\n"
-
-		_ASM_EXTABLE(1b, 4b)
-
-		: "=A"(usdiff), [faulted] "=r" (faulted)
-		: "A"(usdiff * 1000), [divisor] "rm"(vcpu->arch.virtual_tsc_khz));
-
-#endif
-		do_div(elapsed, 1000);
-		usdiff -= elapsed;
-		if (usdiff < 0)
-			usdiff = -usdiff;
-
-		/* idivl overflow => difference is larger than USEC_PER_SEC */
-		if (faulted)
-			usdiff = USEC_PER_SEC;
-	} else
-		usdiff = USEC_PER_SEC; /* disable TSC match window below */
+		u64 tsc_exp = kvm->arch.last_tsc_write +
+					nsec_to_cycles(vcpu, elapsed);
+		u64 tsc_hz = vcpu->arch.virtual_tsc_khz * 1000LL;
+		/*
+		 * Special case: TSC write with a small delta (1 second) of virtual
+		 * cycle time against real time is interpreted as an attempt to
+		 * synchronize the CPU.
+	         */
+		synchronizing = data < tsc_exp + tsc_hz && data > tsc_exp - tsc_hz;
+	}
 
 	/*
-	 * Special case: TSC write with a small delta (1 second) of virtual
-	 * cycle time against real time is interpreted as an attempt to
-	 * synchronize the CPU.
-         *
 	 * For a reliable TSC, we can match TSC offsets, and for an unstable
 	 * TSC, we add elapsed time in this computation.  We could let the
 	 * compensation code attempt to catch up if we fall behind, but
 	 * it's better to try to match offsets from the beginning.
          */
-	if (usdiff < USEC_PER_SEC &&
+	if (synchronizing &&
 	    vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) {
 		if (!check_tsc_unstable()) {
 			offset = kvm->arch.cur_tsc_offset;