@@ -2874,6 +2874,17 @@ static void setup_msrs(struct vcpu_vmx *vmx)
2874
2874
vmx_update_msr_bitmap (& vmx -> vcpu );
2875
2875
}
2876
2876
2877
+ static u64 vmx_read_l1_tsc_offset (struct kvm_vcpu * vcpu )
2878
+ {
2879
+ struct vmcs12 * vmcs12 = get_vmcs12 (vcpu );
2880
+
2881
+ if (is_guest_mode (vcpu ) &&
2882
+ (vmcs12 -> cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING ))
2883
+ return vcpu -> arch .tsc_offset - vmcs12 -> tsc_offset ;
2884
+
2885
+ return vcpu -> arch .tsc_offset ;
2886
+ }
2887
+
2877
2888
/*
2878
2889
* reads and returns guest's timestamp counter "register"
2879
2890
* guest_tsc = (host_tsc * tsc multiplier) >> 48 + tsc_offset
@@ -11175,11 +11186,8 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
11175
11186
vmcs_write64 (GUEST_IA32_PAT , vmx -> vcpu .arch .pat );
11176
11187
}
11177
11188
11178
- if (vmcs12 -> cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING )
11179
- vmcs_write64 (TSC_OFFSET ,
11180
- vcpu -> arch .tsc_offset + vmcs12 -> tsc_offset );
11181
- else
11182
- vmcs_write64 (TSC_OFFSET , vcpu -> arch .tsc_offset );
11189
+ vmcs_write64 (TSC_OFFSET , vcpu -> arch .tsc_offset );
11190
+
11183
11191
if (kvm_has_tsc_control )
11184
11192
decache_tsc_multiplier (vmx );
11185
11193
@@ -11427,6 +11435,7 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry)
11427
11435
struct vmcs12 * vmcs12 = get_vmcs12 (vcpu );
11428
11436
u32 msr_entry_idx ;
11429
11437
u32 exit_qual ;
11438
+ int r ;
11430
11439
11431
11440
enter_guest_mode (vcpu );
11432
11441
@@ -11436,26 +11445,21 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry)
11436
11445
vmx_switch_vmcs (vcpu , & vmx -> nested .vmcs02 );
11437
11446
vmx_segment_cache_clear (vmx );
11438
11447
11439
- if (prepare_vmcs02 (vcpu , vmcs12 , from_vmentry , & exit_qual )) {
11440
- leave_guest_mode (vcpu );
11441
- vmx_switch_vmcs (vcpu , & vmx -> vmcs01 );
11442
- nested_vmx_entry_failure (vcpu , vmcs12 ,
11443
- EXIT_REASON_INVALID_STATE , exit_qual );
11444
- return 1 ;
11445
- }
11448
+ if (vmcs12 -> cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING )
11449
+ vcpu -> arch .tsc_offset += vmcs12 -> tsc_offset ;
11450
+
11451
+ r = EXIT_REASON_INVALID_STATE ;
11452
+ if (prepare_vmcs02 (vcpu , vmcs12 , from_vmentry , & exit_qual ))
11453
+ goto fail ;
11446
11454
11447
11455
nested_get_vmcs12_pages (vcpu , vmcs12 );
11448
11456
11457
+ r = EXIT_REASON_MSR_LOAD_FAIL ;
11449
11458
msr_entry_idx = nested_vmx_load_msr (vcpu ,
11450
11459
vmcs12 -> vm_entry_msr_load_addr ,
11451
11460
vmcs12 -> vm_entry_msr_load_count );
11452
- if (msr_entry_idx ) {
11453
- leave_guest_mode (vcpu );
11454
- vmx_switch_vmcs (vcpu , & vmx -> vmcs01 );
11455
- nested_vmx_entry_failure (vcpu , vmcs12 ,
11456
- EXIT_REASON_MSR_LOAD_FAIL , msr_entry_idx );
11457
- return 1 ;
11458
- }
11461
+ if (msr_entry_idx )
11462
+ goto fail ;
11459
11463
11460
11464
/*
11461
11465
* Note no nested_vmx_succeed or nested_vmx_fail here. At this point
@@ -11464,6 +11468,14 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry)
11464
11468
* the success flag) when L2 exits (see nested_vmx_vmexit()).
11465
11469
*/
11466
11470
return 0 ;
11471
+
11472
+ fail :
11473
+ if (vmcs12 -> cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING )
11474
+ vcpu -> arch .tsc_offset -= vmcs12 -> tsc_offset ;
11475
+ leave_guest_mode (vcpu );
11476
+ vmx_switch_vmcs (vcpu , & vmx -> vmcs01 );
11477
+ nested_vmx_entry_failure (vcpu , vmcs12 , r , exit_qual );
11478
+ return 1 ;
11467
11479
}
11468
11480
11469
11481
/*
@@ -12035,6 +12047,9 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
12035
12047
12036
12048
leave_guest_mode (vcpu );
12037
12049
12050
+ if (vmcs12 -> cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING )
12051
+ vcpu -> arch .tsc_offset -= vmcs12 -> tsc_offset ;
12052
+
12038
12053
if (likely (!vmx -> fail )) {
12039
12054
if (exit_reason == -1 )
12040
12055
sync_vmcs12 (vcpu , vmcs12 );
@@ -12725,6 +12740,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
12725
12740
12726
12741
.has_wbinvd_exit = cpu_has_vmx_wbinvd_exit ,
12727
12742
12743
+ .read_l1_tsc_offset = vmx_read_l1_tsc_offset ,
12728
12744
.write_tsc_offset = vmx_write_tsc_offset ,
12729
12745
12730
12746
.set_tdp_cr3 = vmx_set_cr3 ,
0 commit comments