Thanks to visit codestin.com
Credit goes to github.com

Skip to content

Commit 7d86270

Browse files
committed
Merge tag 'kvmarm-fixes-6.17-2' of https://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD
KVM/arm64 changes for 6.17, round #3 - Invalidate nested MMUs upon freeing the PGD to avoid WARNs when visiting from an MMU notifier - Fixes to the TLB match process and TLB invalidation range for managing the VCNR pseudo-TLB - Prevent SPE from erroneously profiling guests due to UNKNOWN reset values in PMSCR_EL1 - Fix save/restore of host MDCR_EL2 to account for eagerly programming at vcpu_load() on VHE systems - Correct lock ordering when dealing with VGIC LPIs, avoiding scenarios where an xarray's spinlock was nested with a *raw* spinlock - Permit stage-2 read permission aborts which are possible in the case of NV depending on the guest hypervisor's stage-2 translation - Call raw_spin_unlock() instead of the internal spinlock API - Fix parameter ordering when assigning VBAR_EL1
2 parents f83ec76 + e615725 commit 7d86270

File tree

19 files changed

+120
-154
lines changed

19 files changed

+120
-154
lines changed

arch/arm64/include/asm/kvm_host.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1369,6 +1369,7 @@ static inline bool kvm_system_needs_idmapped_vectors(void)
13691369
}
13701370

13711371
void kvm_init_host_debug_data(void);
1372+
void kvm_debug_init_vhe(void);
13721373
void kvm_vcpu_load_debug(struct kvm_vcpu *vcpu);
13731374
void kvm_vcpu_put_debug(struct kvm_vcpu *vcpu);
13741375
void kvm_debug_set_guest_ownership(struct kvm_vcpu *vcpu);

arch/arm64/include/asm/kvm_pgtable.h

Lines changed: 0 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -355,11 +355,6 @@ static inline kvm_pte_t *kvm_dereference_pteref(struct kvm_pgtable_walker *walke
355355
return pteref;
356356
}
357357

358-
static inline kvm_pte_t *kvm_dereference_pteref_raw(kvm_pteref_t pteref)
359-
{
360-
return pteref;
361-
}
362-
363358
static inline int kvm_pgtable_walk_begin(struct kvm_pgtable_walker *walker)
364359
{
365360
/*
@@ -389,11 +384,6 @@ static inline kvm_pte_t *kvm_dereference_pteref(struct kvm_pgtable_walker *walke
389384
return rcu_dereference_check(pteref, !(walker->flags & KVM_PGTABLE_WALK_SHARED));
390385
}
391386

392-
static inline kvm_pte_t *kvm_dereference_pteref_raw(kvm_pteref_t pteref)
393-
{
394-
return rcu_dereference_raw(pteref);
395-
}
396-
397387
static inline int kvm_pgtable_walk_begin(struct kvm_pgtable_walker *walker)
398388
{
399389
if (walker->flags & KVM_PGTABLE_WALK_SHARED)
@@ -561,26 +551,6 @@ static inline int kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2
561551
*/
562552
void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt);
563553

564-
/**
565-
* kvm_pgtable_stage2_destroy_range() - Destroy the unlinked range of addresses.
566-
* @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
567-
* @addr: Intermediate physical address at which to place the mapping.
568-
* @size: Size of the mapping.
569-
*
570-
* The page-table is assumed to be unreachable by any hardware walkers prior
571-
* to freeing and therefore no TLB invalidation is performed.
572-
*/
573-
void kvm_pgtable_stage2_destroy_range(struct kvm_pgtable *pgt,
574-
u64 addr, u64 size);
575-
576-
/**
577-
* kvm_pgtable_stage2_destroy_pgd() - Destroy the PGD of guest stage-2 page-table.
578-
* @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
579-
*
580-
* It is assumed that the rest of the page-table is freed before this operation.
581-
*/
582-
void kvm_pgtable_stage2_destroy_pgd(struct kvm_pgtable *pgt);
583-
584554
/**
585555
* kvm_pgtable_stage2_free_unlinked() - Free an unlinked stage-2 paging structure.
586556
* @mm_ops: Memory management callbacks.

arch/arm64/include/asm/kvm_pkvm.h

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -179,9 +179,7 @@ struct pkvm_mapping {
179179

180180
int pkvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
181181
struct kvm_pgtable_mm_ops *mm_ops);
182-
void pkvm_pgtable_stage2_destroy_range(struct kvm_pgtable *pgt,
183-
u64 addr, u64 size);
184-
void pkvm_pgtable_stage2_destroy_pgd(struct kvm_pgtable *pgt);
182+
void pkvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt);
185183
int pkvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
186184
enum kvm_pgtable_prot prot, void *mc,
187185
enum kvm_pgtable_walk_flags flags);

arch/arm64/kvm/arm.c

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2113,8 +2113,10 @@ static void cpu_hyp_init_features(void)
21132113
{
21142114
cpu_set_hyp_vector();
21152115

2116-
if (is_kernel_in_hyp_mode())
2116+
if (is_kernel_in_hyp_mode()) {
21172117
kvm_timer_init_vhe();
2118+
kvm_debug_init_vhe();
2119+
}
21182120

21192121
if (vgic_present)
21202122
kvm_vgic_init_cpu_hardware();

arch/arm64/kvm/debug.c

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -96,6 +96,13 @@ void kvm_init_host_debug_data(void)
9696
}
9797
}
9898

99+
void kvm_debug_init_vhe(void)
100+
{
101+
/* Clear PMSCR_EL1.E{0,1}SPE which reset to UNKNOWN values. */
102+
if (SYS_FIELD_GET(ID_AA64DFR0_EL1, PMSVer, read_sysreg(id_aa64dfr0_el1)))
103+
write_sysreg_el1(0, SYS_PMSCR);
104+
}
105+
99106
/*
100107
* Configures the 'external' MDSCR_EL1 value for the guest, i.e. when the host
101108
* has taken over MDSCR_EL1.
@@ -138,6 +145,9 @@ void kvm_vcpu_load_debug(struct kvm_vcpu *vcpu)
138145
/* Must be called before kvm_vcpu_load_vhe() */
139146
KVM_BUG_ON(vcpu_get_flag(vcpu, SYSREGS_ON_CPU), vcpu->kvm);
140147

148+
if (has_vhe())
149+
*host_data_ptr(host_debug_state.mdcr_el2) = read_sysreg(mdcr_el2);
150+
141151
/*
142152
* Determine which of the possible debug states we're in:
143153
*
@@ -184,6 +194,9 @@ void kvm_vcpu_load_debug(struct kvm_vcpu *vcpu)
184194

185195
void kvm_vcpu_put_debug(struct kvm_vcpu *vcpu)
186196
{
197+
if (has_vhe())
198+
write_sysreg(*host_data_ptr(host_debug_state.mdcr_el2), mdcr_el2);
199+
187200
if (likely(!(vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
188201
return;
189202

arch/arm64/kvm/hyp/include/hyp/switch.h

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -431,9 +431,6 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
431431
vcpu_set_flag(vcpu, PMUSERENR_ON_CPU);
432432
}
433433

434-
*host_data_ptr(host_debug_state.mdcr_el2) = read_sysreg(mdcr_el2);
435-
write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
436-
437434
if (cpus_have_final_cap(ARM64_HAS_HCX)) {
438435
u64 hcrx = vcpu->arch.hcrx_el2;
439436
if (is_nested_ctxt(vcpu)) {
@@ -454,8 +451,6 @@ static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu)
454451
{
455452
struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt);
456453

457-
write_sysreg(*host_data_ptr(host_debug_state.mdcr_el2), mdcr_el2);
458-
459454
write_sysreg(0, hstr_el2);
460455
if (system_supports_pmuv3()) {
461456
write_sysreg(ctxt_sys_reg(hctxt, PMUSERENR_EL0), pmuserenr_el0);

arch/arm64/kvm/hyp/nvhe/switch.c

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -50,6 +50,10 @@ extern void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc);
5050
static void __activate_traps(struct kvm_vcpu *vcpu)
5151
{
5252
___activate_traps(vcpu, vcpu->arch.hcr_el2);
53+
54+
*host_data_ptr(host_debug_state.mdcr_el2) = read_sysreg(mdcr_el2);
55+
write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
56+
5357
__activate_traps_common(vcpu);
5458
__activate_cptr_traps(vcpu);
5559

@@ -93,6 +97,8 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
9397
isb();
9498
}
9599

100+
write_sysreg(*host_data_ptr(host_debug_state.mdcr_el2), mdcr_el2);
101+
96102
__deactivate_traps_common(vcpu);
97103

98104
write_sysreg_hcr(this_cpu_ptr(&kvm_init_params)->hcr_el2);

arch/arm64/kvm/hyp/nvhe/sys_regs.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -253,7 +253,7 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
253253

254254
*vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR);
255255
*vcpu_cpsr(vcpu) = read_sysreg_el2(SYS_SPSR);
256-
__vcpu_assign_sys_reg(vcpu, read_sysreg_el1(SYS_VBAR), VBAR_EL1);
256+
__vcpu_assign_sys_reg(vcpu, VBAR_EL1, read_sysreg_el1(SYS_VBAR));
257257

258258
kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC);
259259

arch/arm64/kvm/hyp/pgtable.c

Lines changed: 4 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -1551,38 +1551,21 @@ static int stage2_free_walker(const struct kvm_pgtable_visit_ctx *ctx,
15511551
return 0;
15521552
}
15531553

1554-
void kvm_pgtable_stage2_destroy_range(struct kvm_pgtable *pgt,
1555-
u64 addr, u64 size)
1554+
void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt)
15561555
{
1556+
size_t pgd_sz;
15571557
struct kvm_pgtable_walker walker = {
15581558
.cb = stage2_free_walker,
15591559
.flags = KVM_PGTABLE_WALK_LEAF |
15601560
KVM_PGTABLE_WALK_TABLE_POST,
15611561
};
15621562

1563-
WARN_ON(kvm_pgtable_walk(pgt, addr, size, &walker));
1564-
}
1565-
1566-
void kvm_pgtable_stage2_destroy_pgd(struct kvm_pgtable *pgt)
1567-
{
1568-
size_t pgd_sz;
1569-
1563+
WARN_ON(kvm_pgtable_walk(pgt, 0, BIT(pgt->ia_bits), &walker));
15701564
pgd_sz = kvm_pgd_pages(pgt->ia_bits, pgt->start_level) * PAGE_SIZE;
1571-
1572-
/*
1573-
* Since the pgtable is unlinked at this point, and not shared with
1574-
* other walkers, safely deference pgd with kvm_dereference_pteref_raw()
1575-
*/
1576-
pgt->mm_ops->free_pages_exact(kvm_dereference_pteref_raw(pgt->pgd), pgd_sz);
1565+
pgt->mm_ops->free_pages_exact(kvm_dereference_pteref(&walker, pgt->pgd), pgd_sz);
15771566
pgt->pgd = NULL;
15781567
}
15791568

1580-
void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt)
1581-
{
1582-
kvm_pgtable_stage2_destroy_range(pgt, 0, BIT(pgt->ia_bits));
1583-
kvm_pgtable_stage2_destroy_pgd(pgt);
1584-
}
1585-
15861569
void kvm_pgtable_stage2_free_unlinked(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, s8 level)
15871570
{
15881571
kvm_pteref_t ptep = (kvm_pteref_t)pgtable;

arch/arm64/kvm/mmu.c

Lines changed: 6 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -904,38 +904,6 @@ static int kvm_init_ipa_range(struct kvm_s2_mmu *mmu, unsigned long type)
904904
return 0;
905905
}
906906

907-
/*
908-
* Assume that @pgt is valid and unlinked from the KVM MMU to free the
909-
* page-table without taking the kvm_mmu_lock and without performing any
910-
* TLB invalidations.
911-
*
912-
* Also, the range of addresses can be large enough to cause need_resched
913-
* warnings, for instance on CONFIG_PREEMPT_NONE kernels. Hence, invoke
914-
* cond_resched() periodically to prevent hogging the CPU for a long time
915-
* and schedule something else, if required.
916-
*/
917-
static void stage2_destroy_range(struct kvm_pgtable *pgt, phys_addr_t addr,
918-
phys_addr_t end)
919-
{
920-
u64 next;
921-
922-
do {
923-
next = stage2_range_addr_end(addr, end);
924-
KVM_PGT_FN(kvm_pgtable_stage2_destroy_range)(pgt, addr,
925-
next - addr);
926-
if (next != end)
927-
cond_resched();
928-
} while (addr = next, addr != end);
929-
}
930-
931-
static void kvm_stage2_destroy(struct kvm_pgtable *pgt)
932-
{
933-
unsigned int ia_bits = VTCR_EL2_IPA(pgt->mmu->vtcr);
934-
935-
stage2_destroy_range(pgt, 0, BIT(ia_bits));
936-
KVM_PGT_FN(kvm_pgtable_stage2_destroy_pgd)(pgt);
937-
}
938-
939907
/**
940908
* kvm_init_stage2_mmu - Initialise a S2 MMU structure
941909
* @kvm: The pointer to the KVM structure
@@ -1012,7 +980,7 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long t
1012980
return 0;
1013981

1014982
out_destroy_pgtable:
1015-
kvm_stage2_destroy(pgt);
983+
KVM_PGT_FN(kvm_pgtable_stage2_destroy)(pgt);
1016984
out_free_pgtable:
1017985
kfree(pgt);
1018986
return err;
@@ -1106,10 +1074,14 @@ void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu)
11061074
mmu->pgt = NULL;
11071075
free_percpu(mmu->last_vcpu_ran);
11081076
}
1077+
1078+
if (kvm_is_nested_s2_mmu(kvm, mmu))
1079+
kvm_init_nested_s2_mmu(mmu);
1080+
11091081
write_unlock(&kvm->mmu_lock);
11101082

11111083
if (pgt) {
1112-
kvm_stage2_destroy(pgt);
1084+
KVM_PGT_FN(kvm_pgtable_stage2_destroy)(pgt);
11131085
kfree(pgt);
11141086
}
11151087
}
@@ -1541,11 +1513,6 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
15411513
exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu);
15421514
VM_BUG_ON(write_fault && exec_fault);
15431515

1544-
if (fault_is_perm && !write_fault && !exec_fault) {
1545-
kvm_err("Unexpected L2 read permission error\n");
1546-
return -EFAULT;
1547-
}
1548-
15491516
if (!is_protected_kvm_enabled())
15501517
memcache = &vcpu->arch.mmu_page_cache;
15511518
else

0 commit comments

Comments
 (0)