Skip to content

Commit 54aa83c

Browse files
committed
KVM: x86: do not set st->preempted when going back to user space
Similar to the Xen path, only change the vCPU's reported state if the vCPU was actually preempted. The reason for KVM's behavior is that for example optimistic spinning might not be a good idea if the guest is doing repeated exits to userspace; however, it is confusing and unlikely to make a difference, because well-tuned guests will hardly ever exit KVM_RUN in the first place. Suggested-by: Sean Christopherson <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
1 parent 11d39e8 commit 54aa83c

File tree

2 files changed

+18
-14
lines changed

2 files changed

+18
-14
lines changed

arch/x86/kvm/x86.c

Lines changed: 14 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -4654,19 +4654,21 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
46544654
{
46554655
int idx;
46564656

4657-
if (vcpu->preempted && !vcpu->arch.guest_state_protected)
4658-
vcpu->arch.preempted_in_kernel = !static_call(kvm_x86_get_cpl)(vcpu);
4657+
if (vcpu->preempted) {
4658+
if (!vcpu->arch.guest_state_protected)
4659+
vcpu->arch.preempted_in_kernel = !static_call(kvm_x86_get_cpl)(vcpu);
46594660

4660-
/*
4661-
* Take the srcu lock as memslots will be accessed to check the gfn
4662-
* cache generation against the memslots generation.
4663-
*/
4664-
idx = srcu_read_lock(&vcpu->kvm->srcu);
4665-
if (kvm_xen_msr_enabled(vcpu->kvm))
4666-
kvm_xen_runstate_set_preempted(vcpu);
4667-
else
4668-
kvm_steal_time_set_preempted(vcpu);
4669-
srcu_read_unlock(&vcpu->kvm->srcu, idx);
4661+
/*
4662+
* Take the srcu lock as memslots will be accessed to check the gfn
4663+
* cache generation against the memslots generation.
4664+
*/
4665+
idx = srcu_read_lock(&vcpu->kvm->srcu);
4666+
if (kvm_xen_msr_enabled(vcpu->kvm))
4667+
kvm_xen_runstate_set_preempted(vcpu);
4668+
else
4669+
kvm_steal_time_set_preempted(vcpu);
4670+
srcu_read_unlock(&vcpu->kvm->srcu, idx);
4671+
}
46704672

46714673
static_call(kvm_x86_vcpu_put)(vcpu);
46724674
vcpu->arch.last_host_tsc = rdtsc();

arch/x86/kvm/xen.h

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -159,8 +159,10 @@ static inline void kvm_xen_runstate_set_preempted(struct kvm_vcpu *vcpu)
159159
* behalf of the vCPU. Only if the VMM does actually block
160160
* does it need to enter RUNSTATE_blocked.
161161
*/
162-
if (vcpu->preempted)
163-
kvm_xen_update_runstate_guest(vcpu, RUNSTATE_runnable);
162+
if (WARN_ON_ONCE(!vcpu->preempted))
163+
return;
164+
165+
kvm_xen_update_runstate_guest(vcpu, RUNSTATE_runnable);
164166
}
165167

166168
/* 32-bit compatibility definitions, also used natively in 32-bit build */

0 commit comments

Comments
 (0)