@@ -590,7 +590,7 @@ static int sev_es_sync_vmsa(struct vcpu_svm *svm)
590
590
* traditional VMSA as it has been built so far (in prep
591
591
* for LAUNCH_UPDATE_VMSA) to be the initial SEV-ES state.
592
592
*/
593
- memcpy (svm -> vmsa , save , sizeof (* save ));
593
+ memcpy (svm -> sev_es . vmsa , save , sizeof (* save ));
594
594
595
595
return 0 ;
596
596
}
@@ -612,11 +612,11 @@ static int __sev_launch_update_vmsa(struct kvm *kvm, struct kvm_vcpu *vcpu,
612
612
* the VMSA memory content (i.e it will write the same memory region
613
613
* with the guest's key), so invalidate it first.
614
614
*/
615
- clflush_cache_range (svm -> vmsa , PAGE_SIZE );
615
+ clflush_cache_range (svm -> sev_es . vmsa , PAGE_SIZE );
616
616
617
617
vmsa .reserved = 0 ;
618
618
vmsa .handle = to_kvm_svm (kvm )-> sev_info .handle ;
619
- vmsa .address = __sme_pa (svm -> vmsa );
619
+ vmsa .address = __sme_pa (svm -> sev_es . vmsa );
620
620
vmsa .len = PAGE_SIZE ;
621
621
return sev_issue_cmd (kvm , SEV_CMD_LAUNCH_UPDATE_VMSA , & vmsa , error );
622
622
}
@@ -2026,16 +2026,16 @@ void sev_free_vcpu(struct kvm_vcpu *vcpu)
2026
2026
svm = to_svm (vcpu );
2027
2027
2028
2028
if (vcpu -> arch .guest_state_protected )
2029
- sev_flush_guest_memory (svm , svm -> vmsa , PAGE_SIZE );
2030
- __free_page (virt_to_page (svm -> vmsa ));
2029
+ sev_flush_guest_memory (svm , svm -> sev_es . vmsa , PAGE_SIZE );
2030
+ __free_page (virt_to_page (svm -> sev_es . vmsa ));
2031
2031
2032
- if (svm -> ghcb_sa_free )
2033
- kfree (svm -> ghcb_sa );
2032
+ if (svm -> sev_es . ghcb_sa_free )
2033
+ kfree (svm -> sev_es . ghcb_sa );
2034
2034
}
2035
2035
2036
2036
static void dump_ghcb (struct vcpu_svm * svm )
2037
2037
{
2038
- struct ghcb * ghcb = svm -> ghcb ;
2038
+ struct ghcb * ghcb = svm -> sev_es . ghcb ;
2039
2039
unsigned int nbits ;
2040
2040
2041
2041
/* Re-use the dump_invalid_vmcb module parameter */
@@ -2061,7 +2061,7 @@ static void dump_ghcb(struct vcpu_svm *svm)
2061
2061
static void sev_es_sync_to_ghcb (struct vcpu_svm * svm )
2062
2062
{
2063
2063
struct kvm_vcpu * vcpu = & svm -> vcpu ;
2064
- struct ghcb * ghcb = svm -> ghcb ;
2064
+ struct ghcb * ghcb = svm -> sev_es . ghcb ;
2065
2065
2066
2066
/*
2067
2067
* The GHCB protocol so far allows for the following data
@@ -2081,7 +2081,7 @@ static void sev_es_sync_from_ghcb(struct vcpu_svm *svm)
2081
2081
{
2082
2082
struct vmcb_control_area * control = & svm -> vmcb -> control ;
2083
2083
struct kvm_vcpu * vcpu = & svm -> vcpu ;
2084
- struct ghcb * ghcb = svm -> ghcb ;
2084
+ struct ghcb * ghcb = svm -> sev_es . ghcb ;
2085
2085
u64 exit_code ;
2086
2086
2087
2087
/*
@@ -2128,7 +2128,7 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
2128
2128
struct ghcb * ghcb ;
2129
2129
u64 exit_code = 0 ;
2130
2130
2131
- ghcb = svm -> ghcb ;
2131
+ ghcb = svm -> sev_es . ghcb ;
2132
2132
2133
2133
/* Only GHCB Usage code 0 is supported */
2134
2134
if (ghcb -> ghcb_usage )
@@ -2246,33 +2246,34 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
2246
2246
2247
2247
void sev_es_unmap_ghcb (struct vcpu_svm * svm )
2248
2248
{
2249
- if (!svm -> ghcb )
2249
+ if (!svm -> sev_es . ghcb )
2250
2250
return ;
2251
2251
2252
- if (svm -> ghcb_sa_free ) {
2252
+ if (svm -> sev_es . ghcb_sa_free ) {
2253
2253
/*
2254
2254
* The scratch area lives outside the GHCB, so there is a
2255
2255
* buffer that, depending on the operation performed, may
2256
2256
* need to be synced, then freed.
2257
2257
*/
2258
- if (svm -> ghcb_sa_sync ) {
2258
+ if (svm -> sev_es . ghcb_sa_sync ) {
2259
2259
kvm_write_guest (svm -> vcpu .kvm ,
2260
- ghcb_get_sw_scratch (svm -> ghcb ),
2261
- svm -> ghcb_sa , svm -> ghcb_sa_len );
2262
- svm -> ghcb_sa_sync = false;
2260
+ ghcb_get_sw_scratch (svm -> sev_es .ghcb ),
2261
+ svm -> sev_es .ghcb_sa ,
2262
+ svm -> sev_es .ghcb_sa_len );
2263
+ svm -> sev_es .ghcb_sa_sync = false;
2263
2264
}
2264
2265
2265
- kfree (svm -> ghcb_sa );
2266
- svm -> ghcb_sa = NULL ;
2267
- svm -> ghcb_sa_free = false;
2266
+ kfree (svm -> sev_es . ghcb_sa );
2267
+ svm -> sev_es . ghcb_sa = NULL ;
2268
+ svm -> sev_es . ghcb_sa_free = false;
2268
2269
}
2269
2270
2270
- trace_kvm_vmgexit_exit (svm -> vcpu .vcpu_id , svm -> ghcb );
2271
+ trace_kvm_vmgexit_exit (svm -> vcpu .vcpu_id , svm -> sev_es . ghcb );
2271
2272
2272
2273
sev_es_sync_to_ghcb (svm );
2273
2274
2274
- kvm_vcpu_unmap (& svm -> vcpu , & svm -> ghcb_map , true);
2275
- svm -> ghcb = NULL ;
2275
+ kvm_vcpu_unmap (& svm -> vcpu , & svm -> sev_es . ghcb_map , true);
2276
+ svm -> sev_es . ghcb = NULL ;
2276
2277
}
2277
2278
2278
2279
void pre_sev_run (struct vcpu_svm * svm , int cpu )
@@ -2302,7 +2303,7 @@ void pre_sev_run(struct vcpu_svm *svm, int cpu)
2302
2303
static bool setup_vmgexit_scratch (struct vcpu_svm * svm , bool sync , u64 len )
2303
2304
{
2304
2305
struct vmcb_control_area * control = & svm -> vmcb -> control ;
2305
- struct ghcb * ghcb = svm -> ghcb ;
2306
+ struct ghcb * ghcb = svm -> sev_es . ghcb ;
2306
2307
u64 ghcb_scratch_beg , ghcb_scratch_end ;
2307
2308
u64 scratch_gpa_beg , scratch_gpa_end ;
2308
2309
void * scratch_va ;
@@ -2338,7 +2339,7 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
2338
2339
return false;
2339
2340
}
2340
2341
2341
- scratch_va = (void * )svm -> ghcb ;
2342
+ scratch_va = (void * )svm -> sev_es . ghcb ;
2342
2343
scratch_va += (scratch_gpa_beg - control -> ghcb_gpa );
2343
2344
} else {
2344
2345
/*
@@ -2368,12 +2369,12 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
2368
2369
* the vCPU next time (i.e. a read was requested so the data
2369
2370
* must be written back to the guest memory).
2370
2371
*/
2371
- svm -> ghcb_sa_sync = sync ;
2372
- svm -> ghcb_sa_free = true;
2372
+ svm -> sev_es . ghcb_sa_sync = sync ;
2373
+ svm -> sev_es . ghcb_sa_free = true;
2373
2374
}
2374
2375
2375
- svm -> ghcb_sa = scratch_va ;
2376
- svm -> ghcb_sa_len = len ;
2376
+ svm -> sev_es . ghcb_sa = scratch_va ;
2377
+ svm -> sev_es . ghcb_sa_len = len ;
2377
2378
2378
2379
return true;
2379
2380
}
@@ -2492,15 +2493,15 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
2492
2493
return - EINVAL ;
2493
2494
}
2494
2495
2495
- if (kvm_vcpu_map (vcpu , ghcb_gpa >> PAGE_SHIFT , & svm -> ghcb_map )) {
2496
+ if (kvm_vcpu_map (vcpu , ghcb_gpa >> PAGE_SHIFT , & svm -> sev_es . ghcb_map )) {
2496
2497
/* Unable to map GHCB from guest */
2497
2498
vcpu_unimpl (vcpu , "vmgexit: error mapping GHCB [%#llx] from guest\n" ,
2498
2499
ghcb_gpa );
2499
2500
return - EINVAL ;
2500
2501
}
2501
2502
2502
- svm -> ghcb = svm -> ghcb_map .hva ;
2503
- ghcb = svm -> ghcb_map .hva ;
2503
+ svm -> sev_es . ghcb = svm -> sev_es . ghcb_map .hva ;
2504
+ ghcb = svm -> sev_es . ghcb_map .hva ;
2504
2505
2505
2506
trace_kvm_vmgexit_enter (vcpu -> vcpu_id , ghcb );
2506
2507
@@ -2523,7 +2524,7 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
2523
2524
ret = kvm_sev_es_mmio_read (vcpu ,
2524
2525
control -> exit_info_1 ,
2525
2526
control -> exit_info_2 ,
2526
- svm -> ghcb_sa );
2527
+ svm -> sev_es . ghcb_sa );
2527
2528
break ;
2528
2529
case SVM_VMGEXIT_MMIO_WRITE :
2529
2530
if (!setup_vmgexit_scratch (svm , false, control -> exit_info_2 ))
@@ -2532,7 +2533,7 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
2532
2533
ret = kvm_sev_es_mmio_write (vcpu ,
2533
2534
control -> exit_info_1 ,
2534
2535
control -> exit_info_2 ,
2535
- svm -> ghcb_sa );
2536
+ svm -> sev_es . ghcb_sa );
2536
2537
break ;
2537
2538
case SVM_VMGEXIT_NMI_COMPLETE :
2538
2539
ret = svm_invoke_exit_handler (vcpu , SVM_EXIT_IRET );
@@ -2582,8 +2583,8 @@ int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in)
2582
2583
if (!setup_vmgexit_scratch (svm , in , svm -> vmcb -> control .exit_info_2 ))
2583
2584
return - EINVAL ;
2584
2585
2585
- return kvm_sev_es_string_io (& svm -> vcpu , size , port ,
2586
- svm -> ghcb_sa , svm -> ghcb_sa_len / size , in );
2586
+ return kvm_sev_es_string_io (& svm -> vcpu , size , port , svm -> sev_es . ghcb_sa ,
2587
+ svm -> sev_es . ghcb_sa_len / size , in );
2587
2588
}
2588
2589
2589
2590
void sev_es_init_vmcb (struct vcpu_svm * svm )
@@ -2598,7 +2599,7 @@ void sev_es_init_vmcb(struct vcpu_svm *svm)
2598
2599
* VMCB page. Do not include the encryption mask on the VMSA physical
2599
2600
* address since hardware will access it using the guest key.
2600
2601
*/
2601
- svm -> vmcb -> control .vmsa_pa = __pa (svm -> vmsa );
2602
+ svm -> vmcb -> control .vmsa_pa = __pa (svm -> sev_es . vmsa );
2602
2603
2603
2604
/* Can't intercept CR register access, HV can't modify CR registers */
2604
2605
svm_clr_intercept (svm , INTERCEPT_CR0_READ );
@@ -2670,8 +2671,8 @@ void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
2670
2671
struct vcpu_svm * svm = to_svm (vcpu );
2671
2672
2672
2673
/* First SIPI: Use the values as initially set by the VMM */
2673
- if (!svm -> received_first_sipi ) {
2674
- svm -> received_first_sipi = true;
2674
+ if (!svm -> sev_es . received_first_sipi ) {
2675
+ svm -> sev_es . received_first_sipi = true;
2675
2676
return ;
2676
2677
}
2677
2678
@@ -2680,8 +2681,8 @@ void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
2680
2681
* the guest will set the CS and RIP. Set SW_EXIT_INFO_2 to a
2681
2682
* non-zero value.
2682
2683
*/
2683
- if (!svm -> ghcb )
2684
+ if (!svm -> sev_es . ghcb )
2684
2685
return ;
2685
2686
2686
- ghcb_set_sw_exit_info_2 (svm -> ghcb , 1 );
2687
+ ghcb_set_sw_exit_info_2 (svm -> sev_es . ghcb , 1 );
2687
2688
}
0 commit comments