x86/bugs: Rename MDS machinery to something more generic

Commit f9af88a3d384c8b55beb5dc5483e5da0135fadbd upstream.

It will be used by other x86 mitigations.

No functional changes.

Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Reviewed-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Borislav Petkov (AMD)
2024-09-11 05:13:46 +02:00
committed by Greg Kroah-Hartman
parent 8a7ac27372
commit 2b6a5fbe9d
8 changed files with 36 additions and 36 deletions

View File

@@ -157,9 +157,7 @@ This is achieved by using the otherwise unused and obsolete VERW instruction in
combination with a microcode update. The microcode clears the affected CPU combination with a microcode update. The microcode clears the affected CPU
buffers when the VERW instruction is executed. buffers when the VERW instruction is executed.
Kernel reuses the MDS function to invoke the buffer clearing: Kernel does the buffer clearing with x86_clear_cpu_buffers().
mds_clear_cpu_buffers()
On MDS affected CPUs, the kernel already invokes CPU buffer clear on On MDS affected CPUs, the kernel already invokes CPU buffer clear on
kernel/userspace, hypervisor/guest and C-state (idle) transitions. No kernel/userspace, hypervisor/guest and C-state (idle) transitions. No

View File

@@ -93,7 +93,7 @@ enters a C-state.
The kernel provides a function to invoke the buffer clearing: The kernel provides a function to invoke the buffer clearing:
mds_clear_cpu_buffers() x86_clear_cpu_buffers()
Also macro CLEAR_CPU_BUFFERS can be used in ASM late in exit-to-user path. Also macro CLEAR_CPU_BUFFERS can be used in ASM late in exit-to-user path.
Other than CFLAGS.ZF, this macro doesn't clobber any registers. Other than CFLAGS.ZF, this macro doesn't clobber any registers.
@@ -185,9 +185,9 @@ Mitigation points
idle clearing would be a window dressing exercise and is therefore not idle clearing would be a window dressing exercise and is therefore not
activated. activated.
The invocation is controlled by the static key mds_idle_clear which is The invocation is controlled by the static key cpu_buf_idle_clear which is
switched depending on the chosen mitigation mode and the SMT state of switched depending on the chosen mitigation mode and the SMT state of the
the system. system.
The buffer clear is only invoked before entering the C-State to prevent The buffer clear is only invoked before entering the C-State to prevent
that stale data from the idling CPU from spilling to the Hyper-Thread that stale data from the idling CPU from spilling to the Hyper-Thread

View File

@@ -31,20 +31,20 @@ EXPORT_SYMBOL_GPL(entry_ibpb);
/* /*
* Define the VERW operand that is disguised as entry code so that * Define the VERW operand that is disguised as entry code so that
* it can be referenced with KPTI enabled. This ensure VERW can be * it can be referenced with KPTI enabled. This ensures VERW can be
* used late in exit-to-user path after page tables are switched. * used late in exit-to-user path after page tables are switched.
*/ */
.pushsection .entry.text, "ax" .pushsection .entry.text, "ax"
.align L1_CACHE_BYTES, 0xcc .align L1_CACHE_BYTES, 0xcc
SYM_CODE_START_NOALIGN(mds_verw_sel) SYM_CODE_START_NOALIGN(x86_verw_sel)
UNWIND_HINT_UNDEFINED UNWIND_HINT_UNDEFINED
ANNOTATE_NOENDBR ANNOTATE_NOENDBR
.word __KERNEL_DS .word __KERNEL_DS
.align L1_CACHE_BYTES, 0xcc .align L1_CACHE_BYTES, 0xcc
SYM_CODE_END(mds_verw_sel); SYM_CODE_END(x86_verw_sel);
/* For KVM */ /* For KVM */
EXPORT_SYMBOL_GPL(mds_verw_sel); EXPORT_SYMBOL_GPL(x86_verw_sel);
.popsection .popsection

View File

@@ -44,13 +44,13 @@ static __always_inline void native_irq_enable(void)
static __always_inline void native_safe_halt(void) static __always_inline void native_safe_halt(void)
{ {
mds_idle_clear_cpu_buffers(); x86_idle_clear_cpu_buffers();
asm volatile("sti; hlt": : :"memory"); asm volatile("sti; hlt": : :"memory");
} }
static __always_inline void native_halt(void) static __always_inline void native_halt(void)
{ {
mds_idle_clear_cpu_buffers(); x86_idle_clear_cpu_buffers();
asm volatile("hlt": : :"memory"); asm volatile("hlt": : :"memory");
} }

View File

@@ -44,7 +44,7 @@ static __always_inline void __monitorx(const void *eax, unsigned long ecx,
static __always_inline void __mwait(unsigned long eax, unsigned long ecx) static __always_inline void __mwait(unsigned long eax, unsigned long ecx)
{ {
mds_idle_clear_cpu_buffers(); x86_idle_clear_cpu_buffers();
/* "mwait %eax, %ecx;" */ /* "mwait %eax, %ecx;" */
asm volatile(".byte 0x0f, 0x01, 0xc9;" asm volatile(".byte 0x0f, 0x01, 0xc9;"
@@ -89,7 +89,8 @@ static __always_inline void __mwaitx(unsigned long eax, unsigned long ebx,
static __always_inline void __sti_mwait(unsigned long eax, unsigned long ecx) static __always_inline void __sti_mwait(unsigned long eax, unsigned long ecx)
{ {
mds_idle_clear_cpu_buffers(); x86_idle_clear_cpu_buffers();
/* "mwait %eax, %ecx;" */ /* "mwait %eax, %ecx;" */
asm volatile("sti; .byte 0x0f, 0x01, 0xc9;" asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
:: "a" (eax), "c" (ecx)); :: "a" (eax), "c" (ecx));

View File

@@ -324,22 +324,22 @@
.endm .endm
/* /*
* Macro to execute VERW instruction that mitigate transient data sampling * Macro to execute VERW insns that mitigate transient data sampling
* attacks such as MDS. On affected systems a microcode update overloaded VERW * attacks such as MDS or TSA. On affected systems a microcode update
* instruction to also clear the CPU buffers. VERW clobbers CFLAGS.ZF. * overloaded VERW insns to also clear the CPU buffers. VERW clobbers
* * CFLAGS.ZF.
* Note: Only the memory operand variant of VERW clears the CPU buffers. * Note: Only the memory operand variant of VERW clears the CPU buffers.
*/ */
.macro CLEAR_CPU_BUFFERS .macro CLEAR_CPU_BUFFERS
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
ALTERNATIVE "", "verw mds_verw_sel(%rip)", X86_FEATURE_CLEAR_CPU_BUF ALTERNATIVE "", "verw x86_verw_sel(%rip)", X86_FEATURE_CLEAR_CPU_BUF
#else #else
/* /*
* In 32bit mode, the memory operand must be a %cs reference. The data * In 32bit mode, the memory operand must be a %cs reference. The data
* segments may not be usable (vm86 mode), and the stack segment may not * segments may not be usable (vm86 mode), and the stack segment may not
* be flat (ESPFIX32). * be flat (ESPFIX32).
*/ */
ALTERNATIVE "", "verw %cs:mds_verw_sel", X86_FEATURE_CLEAR_CPU_BUF ALTERNATIVE "", "verw %cs:x86_verw_sel", X86_FEATURE_CLEAR_CPU_BUF
#endif #endif
.endm .endm
@@ -592,24 +592,24 @@ DECLARE_STATIC_KEY_FALSE(switch_to_cond_stibp);
DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb); DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb); DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
DECLARE_STATIC_KEY_FALSE(mds_idle_clear); DECLARE_STATIC_KEY_FALSE(cpu_buf_idle_clear);
DECLARE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush); DECLARE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
DECLARE_STATIC_KEY_FALSE(mmio_stale_data_clear); DECLARE_STATIC_KEY_FALSE(mmio_stale_data_clear);
extern u16 mds_verw_sel; extern u16 x86_verw_sel;
#include <asm/segment.h> #include <asm/segment.h>
/** /**
* mds_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability * x86_clear_cpu_buffers - Buffer clearing support for different x86 CPU vulns
* *
* This uses the otherwise unused and obsolete VERW instruction in * This uses the otherwise unused and obsolete VERW instruction in
* combination with microcode which triggers a CPU buffer flush when the * combination with microcode which triggers a CPU buffer flush when the
* instruction is executed. * instruction is executed.
*/ */
static __always_inline void mds_clear_cpu_buffers(void) static __always_inline void x86_clear_cpu_buffers(void)
{ {
static const u16 ds = __KERNEL_DS; static const u16 ds = __KERNEL_DS;
@@ -626,14 +626,15 @@ static __always_inline void mds_clear_cpu_buffers(void)
} }
/** /**
* mds_idle_clear_cpu_buffers - Mitigation for MDS vulnerability * x86_idle_clear_cpu_buffers - Buffer clearing support in idle for the MDS
* vulnerability
* *
* Clear CPU buffers if the corresponding static key is enabled * Clear CPU buffers if the corresponding static key is enabled
*/ */
static __always_inline void mds_idle_clear_cpu_buffers(void) static __always_inline void x86_idle_clear_cpu_buffers(void)
{ {
if (static_branch_likely(&mds_idle_clear)) if (static_branch_likely(&cpu_buf_idle_clear))
mds_clear_cpu_buffers(); x86_clear_cpu_buffers();
} }
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */

View File

@@ -122,9 +122,9 @@ DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
/* Control unconditional IBPB in switch_mm() */ /* Control unconditional IBPB in switch_mm() */
DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb); DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
/* Control MDS CPU buffer clear before idling (halt, mwait) */ /* Control CPU buffer clear before idling (halt, mwait) */
DEFINE_STATIC_KEY_FALSE(mds_idle_clear); DEFINE_STATIC_KEY_FALSE(cpu_buf_idle_clear);
EXPORT_SYMBOL_GPL(mds_idle_clear); EXPORT_SYMBOL_GPL(cpu_buf_idle_clear);
/* /*
* Controls whether l1d flush based mitigations are enabled, * Controls whether l1d flush based mitigations are enabled,
@@ -445,7 +445,7 @@ static void __init mmio_select_mitigation(void)
* is required irrespective of SMT state. * is required irrespective of SMT state.
*/ */
if (!(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) if (!(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO))
static_branch_enable(&mds_idle_clear); static_branch_enable(&cpu_buf_idle_clear);
/* /*
* Check if the system has the right microcode. * Check if the system has the right microcode.
@@ -2082,10 +2082,10 @@ static void update_mds_branch_idle(void)
return; return;
if (sched_smt_active()) { if (sched_smt_active()) {
static_branch_enable(&mds_idle_clear); static_branch_enable(&cpu_buf_idle_clear);
} else if (mmio_mitigation == MMIO_MITIGATION_OFF || } else if (mmio_mitigation == MMIO_MITIGATION_OFF ||
(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) { (x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) {
static_branch_disable(&mds_idle_clear); static_branch_disable(&cpu_buf_idle_clear);
} }
} }

View File

@@ -7263,7 +7263,7 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
vmx_l1d_flush(vcpu); vmx_l1d_flush(vcpu);
else if (static_branch_unlikely(&mmio_stale_data_clear) && else if (static_branch_unlikely(&mmio_stale_data_clear) &&
kvm_arch_has_assigned_device(vcpu->kvm)) kvm_arch_has_assigned_device(vcpu->kvm))
mds_clear_cpu_buffers(); x86_clear_cpu_buffers();
vmx_disable_fb_clear(vmx); vmx_disable_fb_clear(vmx);