@@ -2530,7 +2530,7 @@ void pre_sev_run(struct vcpu_svm *svm, int cpu)
2530
2530
}
2531
2531
2532
2532
#define GHCB_SCRATCH_AREA_LIMIT (16ULL * PAGE_SIZE)
2533
- static bool setup_vmgexit_scratch (struct vcpu_svm * svm , bool sync , u64 len )
2533
+ static int setup_vmgexit_scratch (struct vcpu_svm * svm , bool sync , u64 len )
2534
2534
{
2535
2535
struct vmcb_control_area * control = & svm -> vmcb -> control ;
2536
2536
struct ghcb * ghcb = svm -> sev_es .ghcb ;
@@ -2541,14 +2541,14 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
2541
2541
scratch_gpa_beg = ghcb_get_sw_scratch (ghcb );
2542
2542
if (!scratch_gpa_beg ) {
2543
2543
pr_err ("vmgexit: scratch gpa not provided\n" );
2544
- return false ;
2544
+ return - EINVAL ;
2545
2545
}
2546
2546
2547
2547
scratch_gpa_end = scratch_gpa_beg + len ;
2548
2548
if (scratch_gpa_end < scratch_gpa_beg ) {
2549
2549
pr_err ("vmgexit: scratch length (%#llx) not valid for scratch address (%#llx)\n" ,
2550
2550
len , scratch_gpa_beg );
2551
- return false ;
2551
+ return - EINVAL ;
2552
2552
}
2553
2553
2554
2554
if ((scratch_gpa_beg & PAGE_MASK ) == control -> ghcb_gpa ) {
@@ -2566,7 +2566,7 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
2566
2566
scratch_gpa_end > ghcb_scratch_end ) {
2567
2567
pr_err ("vmgexit: scratch area is outside of GHCB shared buffer area (%#llx - %#llx)\n" ,
2568
2568
scratch_gpa_beg , scratch_gpa_end );
2569
- return false ;
2569
+ return - EINVAL ;
2570
2570
}
2571
2571
2572
2572
scratch_va = (void * )svm -> sev_es .ghcb ;
@@ -2579,18 +2579,18 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
2579
2579
if (len > GHCB_SCRATCH_AREA_LIMIT ) {
2580
2580
pr_err ("vmgexit: scratch area exceeds KVM limits (%#llx requested, %#llx limit)\n" ,
2581
2581
len , GHCB_SCRATCH_AREA_LIMIT );
2582
- return false ;
2582
+ return - EINVAL ;
2583
2583
}
2584
2584
scratch_va = kzalloc (len , GFP_KERNEL_ACCOUNT );
2585
2585
if (!scratch_va )
2586
- return false ;
2586
+ return - ENOMEM ;
2587
2587
2588
2588
if (kvm_read_guest (svm -> vcpu .kvm , scratch_gpa_beg , scratch_va , len )) {
2589
2589
/* Unable to copy scratch area from guest */
2590
2590
pr_err ("vmgexit: kvm_read_guest for scratch area failed\n" );
2591
2591
2592
2592
kfree (scratch_va );
2593
- return false ;
2593
+ return - EFAULT ;
2594
2594
}
2595
2595
2596
2596
/*
@@ -2606,7 +2606,7 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
2606
2606
svm -> sev_es .ghcb_sa = scratch_va ;
2607
2607
svm -> sev_es .ghcb_sa_len = len ;
2608
2608
2609
- return true ;
2609
+ return 0 ;
2610
2610
}
2611
2611
2612
2612
static void set_ghcb_msr_bits (struct vcpu_svm * svm , u64 value , u64 mask ,
@@ -2745,10 +2745,10 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
2745
2745
ghcb_set_sw_exit_info_1 (ghcb , 0 );
2746
2746
ghcb_set_sw_exit_info_2 (ghcb , 0 );
2747
2747
2748
- ret = - EINVAL ;
2749
2748
switch (exit_code ) {
2750
2749
case SVM_VMGEXIT_MMIO_READ :
2751
- if (!setup_vmgexit_scratch (svm , true, control -> exit_info_2 ))
2750
+ ret = setup_vmgexit_scratch (svm , true, control -> exit_info_2 );
2751
+ if (ret )
2752
2752
break ;
2753
2753
2754
2754
ret = kvm_sev_es_mmio_read (vcpu ,
@@ -2757,7 +2757,8 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
2757
2757
svm -> sev_es .ghcb_sa );
2758
2758
break ;
2759
2759
case SVM_VMGEXIT_MMIO_WRITE :
2760
- if (!setup_vmgexit_scratch (svm , false, control -> exit_info_2 ))
2760
+ ret = setup_vmgexit_scratch (svm , false, control -> exit_info_2 );
2761
+ if (ret )
2761
2762
break ;
2762
2763
2763
2764
ret = kvm_sev_es_mmio_write (vcpu ,
@@ -2800,6 +2801,7 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
2800
2801
vcpu_unimpl (vcpu ,
2801
2802
"vmgexit: unsupported event - exit_info_1=%#llx, exit_info_2=%#llx\n" ,
2802
2803
control -> exit_info_1 , control -> exit_info_2 );
2804
+ ret = - EINVAL ;
2803
2805
break ;
2804
2806
default :
2805
2807
ret = svm_invoke_exit_handler (vcpu , exit_code );
@@ -2812,6 +2814,7 @@ int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in)
2812
2814
{
2813
2815
int count ;
2814
2816
int bytes ;
2817
+ int r ;
2815
2818
2816
2819
if (svm -> vmcb -> control .exit_info_2 > INT_MAX )
2817
2820
return - EINVAL ;
@@ -2820,8 +2823,9 @@ int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in)
2820
2823
if (unlikely (check_mul_overflow (count , size , & bytes )))
2821
2824
return - EINVAL ;
2822
2825
2823
- if (!setup_vmgexit_scratch (svm , in , bytes ))
2824
- return - EINVAL ;
2826
+ r = setup_vmgexit_scratch (svm , in , bytes );
2827
+ if (r )
2828
+ return r ;
2825
2829
2826
2830
return kvm_sev_es_string_io (& svm -> vcpu , size , port , svm -> sev_es .ghcb_sa ,
2827
2831
count , in );
0 commit comments