BACKPORT: KVM: arm64: Refactor exit handlers
[ Upstream commit 9b66195063c5a145843547b1d692bd189be85287 ] The hyp exit handling logic is largely shared between VHE and nVHE/hVHE, with common logic in arch/arm64/kvm/hyp/include/hyp/switch.h. The code in the header depends on function definitions provided by arch/arm64/kvm/hyp/vhe/switch.c and arch/arm64/kvm/hyp/nvhe/switch.c when they include the header. This is an unusual header dependency, and prevents the use of arch/arm64/kvm/hyp/include/hyp/switch.h in other files as this would result in compiler warnings regarding missing definitions, e.g. | In file included from arch/arm64/kvm/hyp/nvhe/hyp-main.c:8: | ./arch/arm64/kvm/hyp/include/hyp/switch.h:733:31: warning: 'kvm_get_exit_handler_array' used but never defined | 733 | static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu); | | ^~~~~~~~~~~~~~~~~~~~~~~~~~ | ./arch/arm64/kvm/hyp/include/hyp/switch.h:735:13: warning: 'early_exit_filter' used but never defined | 735 | static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code); | | ^~~~~~~~~~~~~~~~~ Refactor the logic such that the header doesn't depend on anything from the C files. There should be no functional change as a result of this patch. Bug: 411040189 Change-Id: I4e58bad80763afd73fd03f9653ed4e66dfe97255 Signed-off-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Mark Brown <broonie@kernel.org> Tested-by: Mark Brown <broonie@kernel.org> Acked-by: Will Deacon <will@kernel.org> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Fuad Tabba <tabba@google.com> Cc: Marc Zyngier <maz@kernel.org> Cc: Oliver Upton <oliver.upton@linux.dev> Reviewed-by: Oliver Upton <oliver.upton@linux.dev> Link: https://lore.kernel.org/r/20250210195226.1215254-7-mark.rutland@arm.com Signed-off-by: Marc Zyngier <maz@kernel.org> Signed-off-by: Mark Brown <broonie@kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Signed-off-by: Fuad Tabba <tabba@google.com>
This commit is contained in:
@@ -660,23 +660,16 @@ static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
|
|||||||
|
|
||||||
typedef bool (*exit_handler_fn)(struct kvm_vcpu *, u64 *);
|
typedef bool (*exit_handler_fn)(struct kvm_vcpu *, u64 *);
|
||||||
|
|
||||||
static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu);
|
|
||||||
|
|
||||||
static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Allow the hypervisor to handle the exit with an exit handler if it has one.
|
* Allow the hypervisor to handle the exit with an exit handler if it has one.
|
||||||
*
|
*
|
||||||
* Returns true if the hypervisor handled the exit, and control should go back
|
* Returns true if the hypervisor handled the exit, and control should go back
|
||||||
* to the guest, or false if it hasn't.
|
* to the guest, or false if it hasn't.
|
||||||
*/
|
*/
|
||||||
static inline bool kvm_hyp_handle_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
|
static inline bool kvm_hyp_handle_exit(struct kvm_vcpu *vcpu, u64 *exit_code,
|
||||||
|
const exit_handler_fn *handlers)
|
||||||
{
|
{
|
||||||
const exit_handler_fn *handlers = kvm_get_exit_handler_array(vcpu);
|
exit_handler_fn fn = handlers[kvm_vcpu_trap_get_class(vcpu)];
|
||||||
exit_handler_fn fn;
|
|
||||||
|
|
||||||
fn = handlers[kvm_vcpu_trap_get_class(vcpu)];
|
|
||||||
|
|
||||||
if (fn)
|
if (fn)
|
||||||
return fn(vcpu, exit_code);
|
return fn(vcpu, exit_code);
|
||||||
|
|
||||||
@@ -706,20 +699,9 @@ static inline void synchronize_vcpu_pstate(struct kvm_vcpu *vcpu, u64 *exit_code
|
|||||||
* the guest, false when we should restore the host state and return to the
|
* the guest, false when we should restore the host state and return to the
|
||||||
* main run loop.
|
* main run loop.
|
||||||
*/
|
*/
|
||||||
static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
|
static inline bool __fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code,
|
||||||
|
const exit_handler_fn *handlers)
|
||||||
{
|
{
|
||||||
/*
|
|
||||||
* Save PSTATE early so that we can evaluate the vcpu mode
|
|
||||||
* early on.
|
|
||||||
*/
|
|
||||||
synchronize_vcpu_pstate(vcpu, exit_code);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Check whether we want to repaint the state one way or
|
|
||||||
* another.
|
|
||||||
*/
|
|
||||||
early_exit_filter(vcpu, exit_code);
|
|
||||||
|
|
||||||
if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ)
|
if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ)
|
||||||
vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR);
|
vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR);
|
||||||
|
|
||||||
@@ -749,7 +731,7 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
|
|||||||
goto exit;
|
goto exit;
|
||||||
|
|
||||||
/* Check if there's an exit handler and allow it to handle the exit. */
|
/* Check if there's an exit handler and allow it to handle the exit. */
|
||||||
if (kvm_hyp_handle_exit(vcpu, exit_code))
|
if (kvm_hyp_handle_exit(vcpu, exit_code, handlers))
|
||||||
goto guest;
|
goto guest;
|
||||||
exit:
|
exit:
|
||||||
/* Return to the host kernel and handle the exit */
|
/* Return to the host kernel and handle the exit */
|
||||||
|
@@ -288,20 +288,23 @@ static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu)
|
|||||||
return hyp_exit_handlers;
|
return hyp_exit_handlers;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||||
* Some guests (e.g., protected VMs) are not be allowed to run in AArch32.
|
|
||||||
* The ARMv8 architecture does not give the hypervisor a mechanism to prevent a
|
|
||||||
* guest from dropping to AArch32 EL0 if implemented by the CPU. If the
|
|
||||||
* hypervisor spots a guest in such a state ensure it is handled, and don't
|
|
||||||
* trust the host to spot or fix it. The check below is based on the one in
|
|
||||||
* kvm_arch_vcpu_ioctl_run().
|
|
||||||
*
|
|
||||||
* Returns false if the guest ran in AArch32 when it shouldn't have, and
|
|
||||||
* thus should exit to the host, or true if a the guest run loop can continue.
|
|
||||||
*/
|
|
||||||
static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
|
|
||||||
{
|
{
|
||||||
if (unlikely(vcpu_is_protected(vcpu) && vcpu_mode_is_32bit(vcpu))) {
|
const exit_handler_fn *handlers = kvm_get_exit_handler_array(vcpu);
|
||||||
|
struct kvm *kvm = kern_hyp_va(vcpu->kvm);
|
||||||
|
|
||||||
|
synchronize_vcpu_pstate(vcpu, exit_code);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Some guests (e.g., protected VMs) are not be allowed to run in
|
||||||
|
* AArch32. The ARMv8 architecture does not give the hypervisor a
|
||||||
|
* mechanism to prevent a guest from dropping to AArch32 EL0 if
|
||||||
|
* implemented by the CPU. If the hypervisor spots a guest in such a
|
||||||
|
* state ensure it is handled, and don't trust the host to spot or fix
|
||||||
|
* it. The check below is based on the one in
|
||||||
|
* kvm_arch_vcpu_ioctl_run().
|
||||||
|
*/
|
||||||
|
if (kvm_vm_is_protected(kvm) && vcpu_mode_is_32bit(vcpu)) {
|
||||||
/*
|
/*
|
||||||
* As we have caught the guest red-handed, decide that it isn't
|
* As we have caught the guest red-handed, decide that it isn't
|
||||||
* fit for purpose anymore by making the vcpu invalid. The VMM
|
* fit for purpose anymore by making the vcpu invalid. The VMM
|
||||||
@@ -313,6 +316,8 @@ static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
|
|||||||
*exit_code &= BIT(ARM_EXIT_WITH_SERROR_BIT);
|
*exit_code &= BIT(ARM_EXIT_WITH_SERROR_BIT);
|
||||||
*exit_code |= ARM_EXCEPTION_IL;
|
*exit_code |= ARM_EXCEPTION_IL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return __fixup_guest_exit(vcpu, exit_code, handlers);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Switch to the guest for legacy non-VHE systems */
|
/* Switch to the guest for legacy non-VHE systems */
|
||||||
|
@@ -175,13 +175,10 @@ static const exit_handler_fn hyp_exit_handlers[] = {
|
|||||||
[ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth,
|
[ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu)
|
static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||||
{
|
{
|
||||||
return hyp_exit_handlers;
|
synchronize_vcpu_pstate(vcpu, exit_code);
|
||||||
}
|
|
||||||
|
|
||||||
static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
|
|
||||||
{
|
|
||||||
/*
|
/*
|
||||||
* If we were in HYP context on entry, adjust the PSTATE view
|
* If we were in HYP context on entry, adjust the PSTATE view
|
||||||
* so that the usual helpers work correctly.
|
* so that the usual helpers work correctly.
|
||||||
@@ -201,6 +198,8 @@ static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
|
|||||||
*vcpu_cpsr(vcpu) &= ~(PSR_MODE_MASK | PSR_MODE32_BIT);
|
*vcpu_cpsr(vcpu) &= ~(PSR_MODE_MASK | PSR_MODE32_BIT);
|
||||||
*vcpu_cpsr(vcpu) |= mode;
|
*vcpu_cpsr(vcpu) |= mode;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return __fixup_guest_exit(vcpu, exit_code, hyp_exit_handlers);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Switch to the guest for VHE systems running in EL2 */
|
/* Switch to the guest for VHE systems running in EL2 */
|
||||||
|
Reference in New Issue
Block a user