Thanks to visit codestin.com
Credit goes to github.com

Skip to content

Commit 27fe199

Browse files
mrutland-armkelmously
authored andcommitted
KVM: arm/arm64: Correct AArch32 SPSR on exception entry
BugLink: https://bugs.launchpad.net/bugs/1867051 commit 1cfbb48 upstream. Confusingly, there are three SPSR layouts that a kernel may need to deal with: (1) An AArch64 SPSR_ELx view of an AArch64 pstate (2) An AArch64 SPSR_ELx view of an AArch32 pstate (3) An AArch32 SPSR_* view of an AArch32 pstate When the KVM AArch32 support code deals with SPSR_{EL2,HYP}, it's either dealing with #2 or #3 consistently. On arm64 the PSR_AA32_* definitions match the AArch64 SPSR_ELx view, and on arm the PSR_AA32_* definitions match the AArch32 SPSR_* view. However, when we inject an exception into an AArch32 guest, we have to synthesize the AArch32 SPSR_* that the guest will see. Thus, an AArch64 host needs to synthesize layout #3 from layout #2. This patch adds a new host_spsr_to_spsr32() helper for this, and makes use of it in the KVM AArch32 support code. For arm64 we need to shuffle the DIT bit around, and remove the SS bit, while for arm we can use the value as-is. I've open-coded the bit manipulation for now to avoid having to rework the existing PSR_* definitions into PSR64_AA32_* and PSR32_AA32_* definitions. I hope to perform a more thorough refactoring in future so that we can handle pstate view manipulation more consistently across the kernel tree. Signed-off-by: Mark Rutland <[email protected]> Signed-off-by: Marc Zyngier <[email protected]> Reviewed-by: Alexandru Elisei <[email protected]> Cc: [email protected] Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Greg Kroah-Hartman <[email protected]> Signed-off-by: Kamal Mostafa <[email protected]> Signed-off-by: Khalid Elmously <[email protected]>
1 parent 13a322b commit 27fe199

File tree

3 files changed

+40
-3
lines changed

3 files changed

+40
-3
lines changed

arch/arm/include/asm/kvm_emulate.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -53,6 +53,11 @@ static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v)
5353
*__vcpu_spsr(vcpu) = v;
5454
}
5555

56+
static inline unsigned long host_spsr_to_spsr32(unsigned long spsr)
57+
{
58+
return spsr;
59+
}
60+
5661
static inline unsigned long vcpu_get_reg(struct kvm_vcpu *vcpu,
5762
u8 reg_num)
5863
{

arch/arm64/include/asm/kvm_emulate.h

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -204,6 +204,38 @@ static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v)
204204
vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1] = v;
205205
}
206206

207+
/*
208+
* The layout of SPSR for an AArch32 state is different when observed from an
209+
* AArch64 SPSR_ELx or an AArch32 SPSR_*. This function generates the AArch32
210+
* view given an AArch64 view.
211+
*
212+
* In ARM DDI 0487E.a see:
213+
*
214+
* - The AArch64 view (SPSR_EL2) in section C5.2.18, page C5-426
215+
* - The AArch32 view (SPSR_abt) in section G8.2.126, page G8-6256
216+
* - The AArch32 view (SPSR_und) in section G8.2.132, page G8-6280
217+
*
218+
* Which show the following differences:
219+
*
220+
* | Bit | AA64 | AA32 | Notes |
221+
* +-----+------+------+-----------------------------|
222+
* | 24 | DIT | J | J is RES0 in ARMv8 |
223+
* | 21 | SS | DIT | SS doesn't exist in AArch32 |
224+
*
225+
* ... and all other bits are (currently) common.
226+
*/
227+
static inline unsigned long host_spsr_to_spsr32(unsigned long spsr)
228+
{
229+
const unsigned long overlap = BIT(24) | BIT(21);
230+
unsigned long dit = !!(spsr & PSR_AA32_DIT_BIT);
231+
232+
spsr &= ~overlap;
233+
234+
spsr |= dit << 21;
235+
236+
return spsr;
237+
}
238+
207239
static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
208240
{
209241
u32 mode;

virt/kvm/arm/aarch32.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -129,15 +129,15 @@ static unsigned long get_except32_cpsr(struct kvm_vcpu *vcpu, u32 mode)
129129

130130
static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
131131
{
132-
unsigned long new_spsr_value = *vcpu_cpsr(vcpu);
133-
bool is_thumb = (new_spsr_value & PSR_AA32_T_BIT);
132+
unsigned long spsr = *vcpu_cpsr(vcpu);
133+
bool is_thumb = (spsr & PSR_AA32_T_BIT);
134134
u32 return_offset = return_offsets[vect_offset >> 2][is_thumb];
135135
u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
136136

137137
*vcpu_cpsr(vcpu) = get_except32_cpsr(vcpu, mode);
138138

139139
/* Note: These now point to the banked copies */
140-
vcpu_write_spsr(vcpu, new_spsr_value);
140+
vcpu_write_spsr(vcpu, host_spsr_to_spsr32(spsr));
141141
*vcpu_reg32(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
142142

143143
/* Branch to exception vector */

0 commit comments

Comments
 (0)