ANDROID: KVM: arm64: Move kvm_hyp_handle_fpsimd_host() to switch.h

Move kvm_hyp_handle_fpsimd_host() to the shared switch header, instead
of having separate implementations in the vhe/nvhe switch.c files.
Subsequent patches will remove all specific implementations from
switch.c and include switch.h in other files.

Bug: 411040189
Change-Id: I07f1d92f96b072435ded5f0b84a446df4e6a81ab
Signed-off-by: Fuad Tabba <tabba@google.com>
This commit is contained in:
Fuad Tabba
2025-05-07 12:40:11 +01:00
parent 6fc7e69888
commit 290f5d0002
4 changed files with 28 additions and 31 deletions

View File

@@ -142,6 +142,9 @@ void __hyp_exit(void);
#ifdef __KVM_NVHE_HYPERVISOR__
struct user_fpsimd_state *get_host_fpsimd_state(struct kvm_vcpu *vcpu);
struct kvm_host_sve_state *get_host_sve_state(struct kvm_vcpu *vcpu);
#else
#define get_host_fpsimd_state(vcpu) (vcpu)->arch.host_fpsimd_state
#define get_host_sve_state(vcpu) NULL
#endif
extern u64 kvm_nvhe_sym(id_aa64pfr0_el1_sys_val);

View File

@@ -26,6 +26,7 @@
#include <asm/kvm_emulate.h>
#include <asm/kvm_hyp.h>
#include <asm/kvm_mmu.h>
#include <asm/kvm_pkvm.h>
#include <asm/kvm_nested.h>
#include <asm/fpsimd.h>
#include <asm/debug-monitors.h>
@@ -303,7 +304,30 @@ static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu)
sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1ULL, SYS_ZCR_EL2);
}
static void kvm_hyp_handle_fpsimd_host(struct kvm_vcpu *vcpu);
static void kvm_hyp_handle_fpsimd_host(struct kvm_vcpu *vcpu)
{
/*
* Non-protected kvm relies on the host restoring its sve state.
* Protected kvm restores the host's sve state as not to reveal that
* fpsimd was used by a guest nor leak upper sve bits.
*/
if (unlikely(is_protected_kvm_enabled() && system_supports_sve())) {
struct kvm_host_sve_state *sve_state = get_host_sve_state(vcpu);
u64 zcr_el2 = sve_vq_from_vl(kvm_host_sve_max_vl) - 1;
sve_state->zcr_el1 = read_sysreg_el1(SYS_ZCR);
sve_cond_update_zcr_vq(zcr_el2, SYS_ZCR_EL2);
__sve_save_state(sve_state->sve_regs +
sve_ffr_offset(kvm_host_sve_max_vl),
&sve_state->fpsr);
/* Still trap SVE since it's handled by hyp in pKVM. */
if (!vcpu_has_sve(vcpu))
sysreg_clear_set(cptr_el2, 0, CPTR_EL2_TZ);
} else {
__fpsimd_save_state(get_host_fpsimd_state(vcpu));
}
}
static void __deactivate_fpsimd_traps(struct kvm_vcpu *vcpu);

View File

@@ -262,31 +262,6 @@ static bool kvm_handle_pvm_sys64(struct kvm_vcpu *vcpu, u64 *exit_code)
kvm_handle_pvm_sysreg(vcpu, exit_code));
}
static void kvm_hyp_handle_fpsimd_host(struct kvm_vcpu *vcpu)
{
/*
* Non-protected kvm relies on the host restoring its sve state.
* Protected kvm restores the host's sve state as not to reveal that
* fpsimd was used by a guest nor leak upper sve bits.
*/
if (unlikely(is_protected_kvm_enabled() && system_supports_sve())) {
struct kvm_host_sve_state *sve_state = get_host_sve_state(vcpu);
u64 zcr_el2 = sve_vq_from_vl(kvm_host_sve_max_vl) - 1;
sve_state->zcr_el1 = read_sysreg_el1(SYS_ZCR);
sve_cond_update_zcr_vq(zcr_el2, SYS_ZCR_EL2);
__sve_save_state(sve_state->sve_regs +
sve_ffr_offset(kvm_host_sve_max_vl),
&sve_state->fpsr);
/* Still trap SVE since it's handled by hyp in pKVM. */
if (!vcpu_has_sve(vcpu))
sysreg_clear_set(cptr_el2, 0, CPTR_EL2_TZ);
} else {
__fpsimd_save_state(get_host_fpsimd_state(vcpu));
}
}
static const exit_handler_fn hyp_exit_handlers[] = {
[0 ... ESR_ELx_EC_MAX] = NULL,
[ESR_ELx_EC_CP15_32] = kvm_hyp_handle_cp15_32,

View File

@@ -173,11 +173,6 @@ static void __deactivate_fpsimd_traps(struct kvm_vcpu *vcpu)
sysreg_clear_set(cpacr_el1, 0, reg);
}
static void kvm_hyp_handle_fpsimd_host(struct kvm_vcpu *vcpu)
{
__fpsimd_save_state(vcpu->arch.host_fpsimd_state);
}
static const exit_handler_fn hyp_exit_handlers[] = {
[0 ... ESR_ELx_EC_MAX] = NULL,
[ESR_ELx_EC_CP15_32] = kvm_hyp_handle_cp15_32,