Skip to content

Commit b67a4cc

Browse files
pgondabonzini
authored andcommitted
KVM: SEV: Refactor out sev_es_state struct
Move SEV-ES vCPU metadata into new sev_es_state struct from vcpu_svm. Signed-off-by: Peter Gonda <[email protected]> Suggested-by: Tom Lendacky <[email protected]> Acked-by: Tom Lendacky <[email protected]> Reviewed-by: Sean Christopherson <[email protected]> Cc: Marc Orr <[email protected]> Cc: Paolo Bonzini <[email protected]> Cc: David Rientjes <[email protected]> Cc: Dr. David Alan Gilbert <[email protected]> Cc: Brijesh Singh <[email protected]> Cc: Tom Lendacky <[email protected]> Cc: Vitaly Kuznetsov <[email protected]> Cc: Wanpeng Li <[email protected]> Cc: Jim Mattson <[email protected]> Cc: Joerg Roedel <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: Ingo Molnar <[email protected]> Cc: Borislav Petkov <[email protected]> Cc: "H. Peter Anvin" <[email protected]> Cc: [email protected] Cc: [email protected] Message-Id: <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
1 parent 52cf891 commit b67a4cc

File tree

3 files changed

+61
-56
lines changed

3 files changed

+61
-56
lines changed

arch/x86/kvm/svm/sev.c

Lines changed: 42 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -590,7 +590,7 @@ static int sev_es_sync_vmsa(struct vcpu_svm *svm)
590590
* traditional VMSA as it has been built so far (in prep
591591
* for LAUNCH_UPDATE_VMSA) to be the initial SEV-ES state.
592592
*/
593-
memcpy(svm->vmsa, save, sizeof(*save));
593+
memcpy(svm->sev_es.vmsa, save, sizeof(*save));
594594

595595
return 0;
596596
}
@@ -612,11 +612,11 @@ static int __sev_launch_update_vmsa(struct kvm *kvm, struct kvm_vcpu *vcpu,
612612
* the VMSA memory content (i.e it will write the same memory region
613613
* with the guest's key), so invalidate it first.
614614
*/
615-
clflush_cache_range(svm->vmsa, PAGE_SIZE);
615+
clflush_cache_range(svm->sev_es.vmsa, PAGE_SIZE);
616616

617617
vmsa.reserved = 0;
618618
vmsa.handle = to_kvm_svm(kvm)->sev_info.handle;
619-
vmsa.address = __sme_pa(svm->vmsa);
619+
vmsa.address = __sme_pa(svm->sev_es.vmsa);
620620
vmsa.len = PAGE_SIZE;
621621
return sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_VMSA, &vmsa, error);
622622
}
@@ -2026,16 +2026,16 @@ void sev_free_vcpu(struct kvm_vcpu *vcpu)
20262026
svm = to_svm(vcpu);
20272027

20282028
if (vcpu->arch.guest_state_protected)
2029-
sev_flush_guest_memory(svm, svm->vmsa, PAGE_SIZE);
2030-
__free_page(virt_to_page(svm->vmsa));
2029+
sev_flush_guest_memory(svm, svm->sev_es.vmsa, PAGE_SIZE);
2030+
__free_page(virt_to_page(svm->sev_es.vmsa));
20312031

2032-
if (svm->ghcb_sa_free)
2033-
kfree(svm->ghcb_sa);
2032+
if (svm->sev_es.ghcb_sa_free)
2033+
kfree(svm->sev_es.ghcb_sa);
20342034
}
20352035

20362036
static void dump_ghcb(struct vcpu_svm *svm)
20372037
{
2038-
struct ghcb *ghcb = svm->ghcb;
2038+
struct ghcb *ghcb = svm->sev_es.ghcb;
20392039
unsigned int nbits;
20402040

20412041
/* Re-use the dump_invalid_vmcb module parameter */
@@ -2061,7 +2061,7 @@ static void dump_ghcb(struct vcpu_svm *svm)
20612061
static void sev_es_sync_to_ghcb(struct vcpu_svm *svm)
20622062
{
20632063
struct kvm_vcpu *vcpu = &svm->vcpu;
2064-
struct ghcb *ghcb = svm->ghcb;
2064+
struct ghcb *ghcb = svm->sev_es.ghcb;
20652065

20662066
/*
20672067
* The GHCB protocol so far allows for the following data
@@ -2081,7 +2081,7 @@ static void sev_es_sync_from_ghcb(struct vcpu_svm *svm)
20812081
{
20822082
struct vmcb_control_area *control = &svm->vmcb->control;
20832083
struct kvm_vcpu *vcpu = &svm->vcpu;
2084-
struct ghcb *ghcb = svm->ghcb;
2084+
struct ghcb *ghcb = svm->sev_es.ghcb;
20852085
u64 exit_code;
20862086

20872087
/*
@@ -2128,7 +2128,7 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
21282128
struct ghcb *ghcb;
21292129
u64 exit_code = 0;
21302130

2131-
ghcb = svm->ghcb;
2131+
ghcb = svm->sev_es.ghcb;
21322132

21332133
/* Only GHCB Usage code 0 is supported */
21342134
if (ghcb->ghcb_usage)
@@ -2246,33 +2246,34 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
22462246

22472247
void sev_es_unmap_ghcb(struct vcpu_svm *svm)
22482248
{
2249-
if (!svm->ghcb)
2249+
if (!svm->sev_es.ghcb)
22502250
return;
22512251

2252-
if (svm->ghcb_sa_free) {
2252+
if (svm->sev_es.ghcb_sa_free) {
22532253
/*
22542254
* The scratch area lives outside the GHCB, so there is a
22552255
* buffer that, depending on the operation performed, may
22562256
* need to be synced, then freed.
22572257
*/
2258-
if (svm->ghcb_sa_sync) {
2258+
if (svm->sev_es.ghcb_sa_sync) {
22592259
kvm_write_guest(svm->vcpu.kvm,
2260-
ghcb_get_sw_scratch(svm->ghcb),
2261-
svm->ghcb_sa, svm->ghcb_sa_len);
2262-
svm->ghcb_sa_sync = false;
2260+
ghcb_get_sw_scratch(svm->sev_es.ghcb),
2261+
svm->sev_es.ghcb_sa,
2262+
svm->sev_es.ghcb_sa_len);
2263+
svm->sev_es.ghcb_sa_sync = false;
22632264
}
22642265

2265-
kfree(svm->ghcb_sa);
2266-
svm->ghcb_sa = NULL;
2267-
svm->ghcb_sa_free = false;
2266+
kfree(svm->sev_es.ghcb_sa);
2267+
svm->sev_es.ghcb_sa = NULL;
2268+
svm->sev_es.ghcb_sa_free = false;
22682269
}
22692270

2270-
trace_kvm_vmgexit_exit(svm->vcpu.vcpu_id, svm->ghcb);
2271+
trace_kvm_vmgexit_exit(svm->vcpu.vcpu_id, svm->sev_es.ghcb);
22712272

22722273
sev_es_sync_to_ghcb(svm);
22732274

2274-
kvm_vcpu_unmap(&svm->vcpu, &svm->ghcb_map, true);
2275-
svm->ghcb = NULL;
2275+
kvm_vcpu_unmap(&svm->vcpu, &svm->sev_es.ghcb_map, true);
2276+
svm->sev_es.ghcb = NULL;
22762277
}
22772278

22782279
void pre_sev_run(struct vcpu_svm *svm, int cpu)
@@ -2302,7 +2303,7 @@ void pre_sev_run(struct vcpu_svm *svm, int cpu)
23022303
static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
23032304
{
23042305
struct vmcb_control_area *control = &svm->vmcb->control;
2305-
struct ghcb *ghcb = svm->ghcb;
2306+
struct ghcb *ghcb = svm->sev_es.ghcb;
23062307
u64 ghcb_scratch_beg, ghcb_scratch_end;
23072308
u64 scratch_gpa_beg, scratch_gpa_end;
23082309
void *scratch_va;
@@ -2338,7 +2339,7 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
23382339
return false;
23392340
}
23402341

2341-
scratch_va = (void *)svm->ghcb;
2342+
scratch_va = (void *)svm->sev_es.ghcb;
23422343
scratch_va += (scratch_gpa_beg - control->ghcb_gpa);
23432344
} else {
23442345
/*
@@ -2368,12 +2369,12 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
23682369
* the vCPU next time (i.e. a read was requested so the data
23692370
* must be written back to the guest memory).
23702371
*/
2371-
svm->ghcb_sa_sync = sync;
2372-
svm->ghcb_sa_free = true;
2372+
svm->sev_es.ghcb_sa_sync = sync;
2373+
svm->sev_es.ghcb_sa_free = true;
23732374
}
23742375

2375-
svm->ghcb_sa = scratch_va;
2376-
svm->ghcb_sa_len = len;
2376+
svm->sev_es.ghcb_sa = scratch_va;
2377+
svm->sev_es.ghcb_sa_len = len;
23772378

23782379
return true;
23792380
}
@@ -2492,15 +2493,15 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
24922493
return -EINVAL;
24932494
}
24942495

2495-
if (kvm_vcpu_map(vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->ghcb_map)) {
2496+
if (kvm_vcpu_map(vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->sev_es.ghcb_map)) {
24962497
/* Unable to map GHCB from guest */
24972498
vcpu_unimpl(vcpu, "vmgexit: error mapping GHCB [%#llx] from guest\n",
24982499
ghcb_gpa);
24992500
return -EINVAL;
25002501
}
25012502

2502-
svm->ghcb = svm->ghcb_map.hva;
2503-
ghcb = svm->ghcb_map.hva;
2503+
svm->sev_es.ghcb = svm->sev_es.ghcb_map.hva;
2504+
ghcb = svm->sev_es.ghcb_map.hva;
25042505

25052506
trace_kvm_vmgexit_enter(vcpu->vcpu_id, ghcb);
25062507

@@ -2523,7 +2524,7 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
25232524
ret = kvm_sev_es_mmio_read(vcpu,
25242525
control->exit_info_1,
25252526
control->exit_info_2,
2526-
svm->ghcb_sa);
2527+
svm->sev_es.ghcb_sa);
25272528
break;
25282529
case SVM_VMGEXIT_MMIO_WRITE:
25292530
if (!setup_vmgexit_scratch(svm, false, control->exit_info_2))
@@ -2532,7 +2533,7 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
25322533
ret = kvm_sev_es_mmio_write(vcpu,
25332534
control->exit_info_1,
25342535
control->exit_info_2,
2535-
svm->ghcb_sa);
2536+
svm->sev_es.ghcb_sa);
25362537
break;
25372538
case SVM_VMGEXIT_NMI_COMPLETE:
25382539
ret = svm_invoke_exit_handler(vcpu, SVM_EXIT_IRET);
@@ -2582,8 +2583,8 @@ int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in)
25822583
if (!setup_vmgexit_scratch(svm, in, svm->vmcb->control.exit_info_2))
25832584
return -EINVAL;
25842585

2585-
return kvm_sev_es_string_io(&svm->vcpu, size, port,
2586-
svm->ghcb_sa, svm->ghcb_sa_len / size, in);
2586+
return kvm_sev_es_string_io(&svm->vcpu, size, port, svm->sev_es.ghcb_sa,
2587+
svm->sev_es.ghcb_sa_len / size, in);
25872588
}
25882589

25892590
void sev_es_init_vmcb(struct vcpu_svm *svm)
@@ -2598,7 +2599,7 @@ void sev_es_init_vmcb(struct vcpu_svm *svm)
25982599
* VMCB page. Do not include the encryption mask on the VMSA physical
25992600
* address since hardware will access it using the guest key.
26002601
*/
2601-
svm->vmcb->control.vmsa_pa = __pa(svm->vmsa);
2602+
svm->vmcb->control.vmsa_pa = __pa(svm->sev_es.vmsa);
26022603

26032604
/* Can't intercept CR register access, HV can't modify CR registers */
26042605
svm_clr_intercept(svm, INTERCEPT_CR0_READ);
@@ -2670,8 +2671,8 @@ void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
26702671
struct vcpu_svm *svm = to_svm(vcpu);
26712672

26722673
/* First SIPI: Use the values as initially set by the VMM */
2673-
if (!svm->received_first_sipi) {
2674-
svm->received_first_sipi = true;
2674+
if (!svm->sev_es.received_first_sipi) {
2675+
svm->sev_es.received_first_sipi = true;
26752676
return;
26762677
}
26772678

@@ -2680,8 +2681,8 @@ void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
26802681
* the guest will set the CS and RIP. Set SW_EXIT_INFO_2 to a
26812682
* non-zero value.
26822683
*/
2683-
if (!svm->ghcb)
2684+
if (!svm->sev_es.ghcb)
26842685
return;
26852686

2686-
ghcb_set_sw_exit_info_2(svm->ghcb, 1);
2687+
ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, 1);
26872688
}

arch/x86/kvm/svm/svm.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1450,7 +1450,7 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu)
14501450
svm_switch_vmcb(svm, &svm->vmcb01);
14511451

14521452
if (vmsa_page)
1453-
svm->vmsa = page_address(vmsa_page);
1453+
svm->sev_es.vmsa = page_address(vmsa_page);
14541454

14551455
svm->guest_state_loaded = false;
14561456

@@ -2833,11 +2833,11 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
28332833
static int svm_complete_emulated_msr(struct kvm_vcpu *vcpu, int err)
28342834
{
28352835
struct vcpu_svm *svm = to_svm(vcpu);
2836-
if (!err || !sev_es_guest(vcpu->kvm) || WARN_ON_ONCE(!svm->ghcb))
2836+
if (!err || !sev_es_guest(vcpu->kvm) || WARN_ON_ONCE(!svm->sev_es.ghcb))
28372837
return kvm_complete_insn_gp(vcpu, err);
28382838

2839-
ghcb_set_sw_exit_info_1(svm->ghcb, 1);
2840-
ghcb_set_sw_exit_info_2(svm->ghcb,
2839+
ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 1);
2840+
ghcb_set_sw_exit_info_2(svm->sev_es.ghcb,
28412841
X86_TRAP_GP |
28422842
SVM_EVTINJ_TYPE_EXEPT |
28432843
SVM_EVTINJ_VALID);

arch/x86/kvm/svm/svm.h

Lines changed: 15 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -123,6 +123,20 @@ struct svm_nested_state {
123123
bool initialized;
124124
};
125125

126+
struct vcpu_sev_es_state {
127+
/* SEV-ES support */
128+
struct vmcb_save_area *vmsa;
129+
struct ghcb *ghcb;
130+
struct kvm_host_map ghcb_map;
131+
bool received_first_sipi;
132+
133+
/* SEV-ES scratch area support */
134+
void *ghcb_sa;
135+
u64 ghcb_sa_len;
136+
bool ghcb_sa_sync;
137+
bool ghcb_sa_free;
138+
};
139+
126140
struct vcpu_svm {
127141
struct kvm_vcpu vcpu;
128142
/* vmcb always points at current_vmcb->ptr, it's purely a shorthand. */
@@ -186,17 +200,7 @@ struct vcpu_svm {
186200
DECLARE_BITMAP(write, MAX_DIRECT_ACCESS_MSRS);
187201
} shadow_msr_intercept;
188202

189-
/* SEV-ES support */
190-
struct vmcb_save_area *vmsa;
191-
struct ghcb *ghcb;
192-
struct kvm_host_map ghcb_map;
193-
bool received_first_sipi;
194-
195-
/* SEV-ES scratch area support */
196-
void *ghcb_sa;
197-
u64 ghcb_sa_len;
198-
bool ghcb_sa_sync;
199-
bool ghcb_sa_free;
203+
struct vcpu_sev_es_state sev_es;
200204

201205
bool guest_state_loaded;
202206
};

0 commit comments

Comments
 (0)