x86/bugs: Add a Transient Scheduler Attacks mitigation

Commit d8010d4ba43e9f790925375a7de100604a5e2dba upstream.

Add the required features detection glue to bugs.c et all in order to
support the TSA mitigation.

Co-developed-by: Kim Phillips <kim.phillips@amd.com>
Signed-off-by: Kim Phillips <kim.phillips@amd.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Reviewed-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Borislav Petkov (AMD)
2024-09-11 10:53:08 +02:00
committed by Greg Kroah-Hartman
parent 2b6a5fbe9d
commit 90293047df
15 changed files with 258 additions and 18 deletions

View File

@@ -526,6 +526,7 @@ What: /sys/devices/system/cpu/vulnerabilities
/sys/devices/system/cpu/vulnerabilities/spectre_v1 /sys/devices/system/cpu/vulnerabilities/spectre_v1
/sys/devices/system/cpu/vulnerabilities/spectre_v2 /sys/devices/system/cpu/vulnerabilities/spectre_v2
/sys/devices/system/cpu/vulnerabilities/srbds /sys/devices/system/cpu/vulnerabilities/srbds
/sys/devices/system/cpu/vulnerabilities/tsa
/sys/devices/system/cpu/vulnerabilities/tsx_async_abort /sys/devices/system/cpu/vulnerabilities/tsx_async_abort
Date: January 2018 Date: January 2018
Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org> Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>

View File

@@ -6645,6 +6645,19 @@
If not specified, "default" is used. In this case, If not specified, "default" is used. In this case,
the RNG's choice is left to each individual trust source. the RNG's choice is left to each individual trust source.
tsa= [X86] Control mitigation for Transient Scheduler
Attacks on AMD CPUs. Search the following in your
favourite search engine for more details:
"Technical guidance for mitigating transient scheduler
attacks".
off - disable the mitigation
on - enable the mitigation (default)
user - mitigate only user/kernel transitions
vm - mitigate only guest/host transitions
tsc= Disable clocksource stability checks for TSC. tsc= Disable clocksource stability checks for TSC.
Format: <string> Format: <string>
[x86] reliable: mark tsc clocksource as reliable, this [x86] reliable: mark tsc clocksource as reliable, this

View File

@@ -2621,6 +2621,15 @@ config MITIGATION_ITS
disabled, mitigation cannot be enabled via cmdline. disabled, mitigation cannot be enabled via cmdline.
See <file:Documentation/admin-guide/hw-vuln/indirect-target-selection.rst> See <file:Documentation/admin-guide/hw-vuln/indirect-target-selection.rst>
config MITIGATION_TSA
bool "Mitigate Transient Scheduler Attacks"
depends on CPU_SUP_AMD
default y
help
Enable mitigation for Transient Scheduler Attacks. TSA is a hardware
security vulnerability on AMD CPUs which can lead to forwarding of
invalid info to subsequent instructions and thus can affect their
timing and thereby cause a leakage.
endif endif
config ARCH_HAS_ADD_PAGES config ARCH_HAS_ADD_PAGES

View File

@@ -81,4 +81,16 @@ int intel_microcode_sanity_check(void *mc, bool print_err, int hdr_type);
extern struct cpumask cpus_stop_mask; extern struct cpumask cpus_stop_mask;
union zen_patch_rev {
struct {
__u32 rev : 8,
stepping : 4,
model : 4,
__reserved : 4,
ext_model : 4,
ext_fam : 8;
};
__u32 ucode_rev;
};
#endif /* _ASM_X86_CPU_H */ #endif /* _ASM_X86_CPU_H */

View File

@@ -449,6 +449,7 @@
/* AMD-defined Extended Feature 2 EAX, CPUID level 0x80000021 (EAX), word 20 */ /* AMD-defined Extended Feature 2 EAX, CPUID level 0x80000021 (EAX), word 20 */
#define X86_FEATURE_NO_NESTED_DATA_BP (20*32+ 0) /* "" No Nested Data Breakpoints */ #define X86_FEATURE_NO_NESTED_DATA_BP (20*32+ 0) /* "" No Nested Data Breakpoints */
#define X86_FEATURE_LFENCE_RDTSC (20*32+ 2) /* "" LFENCE always serializing / synchronizes RDTSC */ #define X86_FEATURE_LFENCE_RDTSC (20*32+ 2) /* "" LFENCE always serializing / synchronizes RDTSC */
#define X86_FEATURE_VERW_CLEAR (20*32+ 5) /* "" The memory form of VERW mitigates TSA */
#define X86_FEATURE_NULL_SEL_CLR_BASE (20*32+ 6) /* "" Null Selector Clears Base */ #define X86_FEATURE_NULL_SEL_CLR_BASE (20*32+ 6) /* "" Null Selector Clears Base */
#define X86_FEATURE_AUTOIBRS (20*32+ 8) /* "" Automatic IBRS */ #define X86_FEATURE_AUTOIBRS (20*32+ 8) /* "" Automatic IBRS */
#define X86_FEATURE_NO_SMM_CTL_MSR (20*32+ 9) /* "" SMM_CTL MSR is not present */ #define X86_FEATURE_NO_SMM_CTL_MSR (20*32+ 9) /* "" SMM_CTL MSR is not present */
@@ -470,6 +471,10 @@
#define X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT (21*32+ 4) /* "" Clear branch history at vmexit using SW loop */ #define X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT (21*32+ 4) /* "" Clear branch history at vmexit using SW loop */
#define X86_FEATURE_INDIRECT_THUNK_ITS (21*32 + 5) /* "" Use thunk for indirect branches in lower half of cacheline */ #define X86_FEATURE_INDIRECT_THUNK_ITS (21*32 + 5) /* "" Use thunk for indirect branches in lower half of cacheline */
#define X86_FEATURE_TSA_SQ_NO (21*32+11) /* "" AMD CPU not vulnerable to TSA-SQ */
#define X86_FEATURE_TSA_L1_NO (21*32+12) /* "" AMD CPU not vulnerable to TSA-L1 */
#define X86_FEATURE_CLEAR_CPU_BUF_VM (21*32+13) /* "" Clear CPU buffers using VERW before VMRUN */
/* /*
* BUG word(s) * BUG word(s)
*/ */
@@ -521,4 +526,5 @@
#define X86_BUG_IBPB_NO_RET X86_BUG(1*32 + 4) /* "ibpb_no_ret" IBPB omits return target predictions */ #define X86_BUG_IBPB_NO_RET X86_BUG(1*32 + 4) /* "ibpb_no_ret" IBPB omits return target predictions */
#define X86_BUG_ITS X86_BUG(1*32 + 5) /* CPU is affected by Indirect Target Selection */ #define X86_BUG_ITS X86_BUG(1*32 + 5) /* CPU is affected by Indirect Target Selection */
#define X86_BUG_ITS_NATIVE_ONLY X86_BUG(1*32 + 6) /* CPU is affected by ITS, VMX is not affected */ #define X86_BUG_ITS_NATIVE_ONLY X86_BUG(1*32 + 6) /* CPU is affected by ITS, VMX is not affected */
#define X86_BUG_TSA X86_BUG(1*32+ 9) /* "tsa" CPU is affected by Transient Scheduler Attacks */
#endif /* _ASM_X86_CPUFEATURES_H */ #endif /* _ASM_X86_CPUFEATURES_H */

View File

@@ -80,7 +80,7 @@ static __always_inline void __mwait(unsigned long eax, unsigned long ecx)
static __always_inline void __mwaitx(unsigned long eax, unsigned long ebx, static __always_inline void __mwaitx(unsigned long eax, unsigned long ebx,
unsigned long ecx) unsigned long ecx)
{ {
/* No MDS buffer clear as this is AMD/HYGON only */ /* No need for TSA buffer clearing on AMD */
/* "mwaitx %eax, %ebx, %ecx;" */ /* "mwaitx %eax, %ebx, %ecx;" */
asm volatile(".byte 0x0f, 0x01, 0xfb;" asm volatile(".byte 0x0f, 0x01, 0xfb;"

View File

@@ -330,19 +330,25 @@
* CFLAGS.ZF. * CFLAGS.ZF.
* Note: Only the memory operand variant of VERW clears the CPU buffers. * Note: Only the memory operand variant of VERW clears the CPU buffers.
*/ */
.macro CLEAR_CPU_BUFFERS .macro __CLEAR_CPU_BUFFERS feature
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
ALTERNATIVE "", "verw x86_verw_sel(%rip)", X86_FEATURE_CLEAR_CPU_BUF ALTERNATIVE "", "verw x86_verw_sel(%rip)", \feature
#else #else
/* /*
* In 32bit mode, the memory operand must be a %cs reference. The data * In 32bit mode, the memory operand must be a %cs reference. The data
* segments may not be usable (vm86 mode), and the stack segment may not * segments may not be usable (vm86 mode), and the stack segment may not
* be flat (ESPFIX32). * be flat (ESPFIX32).
*/ */
ALTERNATIVE "", "verw %cs:x86_verw_sel", X86_FEATURE_CLEAR_CPU_BUF ALTERNATIVE "", "verw %cs:x86_verw_sel", \feature
#endif #endif
.endm .endm
#define CLEAR_CPU_BUFFERS \
__CLEAR_CPU_BUFFERS X86_FEATURE_CLEAR_CPU_BUF
#define VM_CLEAR_CPU_BUFFERS \
__CLEAR_CPU_BUFFERS X86_FEATURE_CLEAR_CPU_BUF_VM
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
.macro CLEAR_BRANCH_HISTORY .macro CLEAR_BRANCH_HISTORY
ALTERNATIVE "", "call clear_bhb_loop", X86_FEATURE_CLEAR_BHB_LOOP ALTERNATIVE "", "call clear_bhb_loop", X86_FEATURE_CLEAR_BHB_LOOP
@@ -627,7 +633,7 @@ static __always_inline void x86_clear_cpu_buffers(void)
/** /**
* x86_idle_clear_cpu_buffers - Buffer clearing support in idle for the MDS * x86_idle_clear_cpu_buffers - Buffer clearing support in idle for the MDS
* vulnerability * and TSA vulnerabilities.
* *
* Clear CPU buffers if the corresponding static key is enabled * Clear CPU buffers if the corresponding static key is enabled
*/ */

View File

@@ -539,6 +539,63 @@ static void early_init_amd_mc(struct cpuinfo_x86 *c)
#endif #endif
} }
static bool amd_check_tsa_microcode(void)
{
struct cpuinfo_x86 *c = &boot_cpu_data;
union zen_patch_rev p;
u32 min_rev = 0;
p.ext_fam = c->x86 - 0xf;
p.model = c->x86_model;
p.stepping = c->x86_stepping;
if (cpu_has(c, X86_FEATURE_ZEN3) ||
cpu_has(c, X86_FEATURE_ZEN4)) {
switch (p.ucode_rev >> 8) {
case 0xa0011: min_rev = 0x0a0011d7; break;
case 0xa0012: min_rev = 0x0a00123b; break;
case 0xa0082: min_rev = 0x0a00820d; break;
case 0xa1011: min_rev = 0x0a10114c; break;
case 0xa1012: min_rev = 0x0a10124c; break;
case 0xa1081: min_rev = 0x0a108109; break;
case 0xa2010: min_rev = 0x0a20102e; break;
case 0xa2012: min_rev = 0x0a201211; break;
case 0xa4041: min_rev = 0x0a404108; break;
case 0xa5000: min_rev = 0x0a500012; break;
case 0xa6012: min_rev = 0x0a60120a; break;
case 0xa7041: min_rev = 0x0a704108; break;
case 0xa7052: min_rev = 0x0a705208; break;
case 0xa7080: min_rev = 0x0a708008; break;
case 0xa70c0: min_rev = 0x0a70c008; break;
case 0xaa002: min_rev = 0x0aa00216; break;
default:
pr_debug("%s: ucode_rev: 0x%x, current revision: 0x%x\n",
__func__, p.ucode_rev, c->microcode);
return false;
}
}
if (!min_rev)
return false;
return c->microcode >= min_rev;
}
static void tsa_init(struct cpuinfo_x86 *c)
{
if (cpu_has(c, X86_FEATURE_HYPERVISOR))
return;
if (cpu_has(c, X86_FEATURE_ZEN3) ||
cpu_has(c, X86_FEATURE_ZEN4)) {
if (amd_check_tsa_microcode())
setup_force_cpu_cap(X86_FEATURE_VERW_CLEAR);
} else {
setup_force_cpu_cap(X86_FEATURE_TSA_SQ_NO);
setup_force_cpu_cap(X86_FEATURE_TSA_L1_NO);
}
}
static void bsp_init_amd(struct cpuinfo_x86 *c) static void bsp_init_amd(struct cpuinfo_x86 *c)
{ {
if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) { if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
@@ -645,6 +702,9 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
break; break;
} }
tsa_init(c);
return; return;
warn: warn:

View File

@@ -50,6 +50,7 @@ static void __init l1d_flush_select_mitigation(void);
static void __init srso_select_mitigation(void); static void __init srso_select_mitigation(void);
static void __init gds_select_mitigation(void); static void __init gds_select_mitigation(void);
static void __init its_select_mitigation(void); static void __init its_select_mitigation(void);
static void __init tsa_select_mitigation(void);
/* The base value of the SPEC_CTRL MSR without task-specific bits set */ /* The base value of the SPEC_CTRL MSR without task-specific bits set */
u64 x86_spec_ctrl_base; u64 x86_spec_ctrl_base;
@@ -185,6 +186,7 @@ void __init cpu_select_mitigations(void)
srso_select_mitigation(); srso_select_mitigation();
gds_select_mitigation(); gds_select_mitigation();
its_select_mitigation(); its_select_mitigation();
tsa_select_mitigation();
} }
/* /*
@@ -2093,6 +2095,94 @@ static void update_mds_branch_idle(void)
#define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n" #define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n"
#define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n" #define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n"
#undef pr_fmt
#define pr_fmt(fmt) "Transient Scheduler Attacks: " fmt
enum tsa_mitigations {
TSA_MITIGATION_NONE,
TSA_MITIGATION_UCODE_NEEDED,
TSA_MITIGATION_USER_KERNEL,
TSA_MITIGATION_VM,
TSA_MITIGATION_FULL,
};
static const char * const tsa_strings[] = {
[TSA_MITIGATION_NONE] = "Vulnerable",
[TSA_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode",
[TSA_MITIGATION_USER_KERNEL] = "Mitigation: Clear CPU buffers: user/kernel boundary",
[TSA_MITIGATION_VM] = "Mitigation: Clear CPU buffers: VM",
[TSA_MITIGATION_FULL] = "Mitigation: Clear CPU buffers",
};
static enum tsa_mitigations tsa_mitigation __ro_after_init =
IS_ENABLED(CONFIG_MITIGATION_TSA) ? TSA_MITIGATION_FULL : TSA_MITIGATION_NONE;
static int __init tsa_parse_cmdline(char *str)
{
if (!str)
return -EINVAL;
if (!strcmp(str, "off"))
tsa_mitigation = TSA_MITIGATION_NONE;
else if (!strcmp(str, "on"))
tsa_mitigation = TSA_MITIGATION_FULL;
else if (!strcmp(str, "user"))
tsa_mitigation = TSA_MITIGATION_USER_KERNEL;
else if (!strcmp(str, "vm"))
tsa_mitigation = TSA_MITIGATION_VM;
else
pr_err("Ignoring unknown tsa=%s option.\n", str);
return 0;
}
early_param("tsa", tsa_parse_cmdline);
static void __init tsa_select_mitigation(void)
{
if (tsa_mitigation == TSA_MITIGATION_NONE)
return;
if (cpu_mitigations_off() || !boot_cpu_has_bug(X86_BUG_TSA)) {
tsa_mitigation = TSA_MITIGATION_NONE;
return;
}
if (!boot_cpu_has(X86_FEATURE_VERW_CLEAR))
tsa_mitigation = TSA_MITIGATION_UCODE_NEEDED;
switch (tsa_mitigation) {
case TSA_MITIGATION_USER_KERNEL:
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
break;
case TSA_MITIGATION_VM:
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM);
break;
case TSA_MITIGATION_UCODE_NEEDED:
if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
goto out;
pr_notice("Forcing mitigation on in a VM\n");
/*
* On the off-chance that microcode has been updated
* on the host, enable the mitigation in the guest just
* in case.
*/
fallthrough;
case TSA_MITIGATION_FULL:
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM);
break;
default:
break;
}
out:
pr_info("%s\n", tsa_strings[tsa_mitigation]);
}
void cpu_bugs_smt_update(void) void cpu_bugs_smt_update(void)
{ {
mutex_lock(&spec_ctrl_mutex); mutex_lock(&spec_ctrl_mutex);
@@ -2146,6 +2236,24 @@ void cpu_bugs_smt_update(void)
break; break;
} }
switch (tsa_mitigation) {
case TSA_MITIGATION_USER_KERNEL:
case TSA_MITIGATION_VM:
case TSA_MITIGATION_FULL:
case TSA_MITIGATION_UCODE_NEEDED:
/*
* TSA-SQ can potentially lead to info leakage between
* SMT threads.
*/
if (sched_smt_active())
static_branch_enable(&cpu_buf_idle_clear);
else
static_branch_disable(&cpu_buf_idle_clear);
break;
case TSA_MITIGATION_NONE:
break;
}
mutex_unlock(&spec_ctrl_mutex); mutex_unlock(&spec_ctrl_mutex);
} }
@@ -3075,6 +3183,11 @@ static ssize_t gds_show_state(char *buf)
return sysfs_emit(buf, "%s\n", gds_strings[gds_mitigation]); return sysfs_emit(buf, "%s\n", gds_strings[gds_mitigation]);
} }
static ssize_t tsa_show_state(char *buf)
{
return sysfs_emit(buf, "%s\n", tsa_strings[tsa_mitigation]);
}
static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
char *buf, unsigned int bug) char *buf, unsigned int bug)
{ {
@@ -3136,6 +3249,9 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
case X86_BUG_ITS: case X86_BUG_ITS:
return its_show_state(buf); return its_show_state(buf);
case X86_BUG_TSA:
return tsa_show_state(buf);
default: default:
break; break;
} }
@@ -3220,4 +3336,9 @@ ssize_t cpu_show_indirect_target_selection(struct device *dev, struct device_att
{ {
return cpu_show_common(dev, attr, buf, X86_BUG_ITS); return cpu_show_common(dev, attr, buf, X86_BUG_ITS);
} }
ssize_t cpu_show_tsa(struct device *dev, struct device_attribute *attr, char *buf)
{
return cpu_show_common(dev, attr, buf, X86_BUG_TSA);
}
#endif #endif

View File

@@ -1277,6 +1277,8 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
#define ITS BIT(8) #define ITS BIT(8)
/* CPU is affected by Indirect Target Selection, but guest-host isolation is not affected */ /* CPU is affected by Indirect Target Selection, but guest-host isolation is not affected */
#define ITS_NATIVE_ONLY BIT(9) #define ITS_NATIVE_ONLY BIT(9)
/* CPU is affected by Transient Scheduler Attacks */
#define TSA BIT(10)
static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = { static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS), VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS),
@@ -1324,7 +1326,7 @@ static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
VULNBL_AMD(0x16, RETBLEED), VULNBL_AMD(0x16, RETBLEED),
VULNBL_AMD(0x17, RETBLEED | SMT_RSB | SRSO), VULNBL_AMD(0x17, RETBLEED | SMT_RSB | SRSO),
VULNBL_HYGON(0x18, RETBLEED | SMT_RSB | SRSO), VULNBL_HYGON(0x18, RETBLEED | SMT_RSB | SRSO),
VULNBL_AMD(0x19, SRSO), VULNBL_AMD(0x19, SRSO | TSA),
{} {}
}; };
@@ -1529,6 +1531,16 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
setup_force_cpu_bug(X86_BUG_ITS_NATIVE_ONLY); setup_force_cpu_bug(X86_BUG_ITS_NATIVE_ONLY);
} }
if (c->x86_vendor == X86_VENDOR_AMD) {
if (!cpu_has(c, X86_FEATURE_TSA_SQ_NO) ||
!cpu_has(c, X86_FEATURE_TSA_L1_NO)) {
if (cpu_matches(cpu_vuln_blacklist, TSA) ||
/* Enable bug on Zen guests to allow for live migration. */
(cpu_has(c, X86_FEATURE_HYPERVISOR) && cpu_has(c, X86_FEATURE_ZEN)))
setup_force_cpu_bug(X86_BUG_TSA);
}
}
if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN)) if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
return; return;

View File

@@ -96,18 +96,6 @@ static struct equiv_cpu_table {
struct equiv_cpu_entry *entry; struct equiv_cpu_entry *entry;
} equiv_table; } equiv_table;
union zen_patch_rev {
struct {
__u32 rev : 8,
stepping : 4,
model : 4,
__reserved : 4,
ext_model : 4,
ext_fam : 8;
};
__u32 ucode_rev;
};
union cpuid_1_eax { union cpuid_1_eax {
struct { struct {
__u32 stepping : 4, __u32 stepping : 4,

View File

@@ -48,6 +48,8 @@ static const struct cpuid_bit cpuid_bits[] = {
{ X86_FEATURE_MBA, CPUID_EBX, 6, 0x80000008, 0 }, { X86_FEATURE_MBA, CPUID_EBX, 6, 0x80000008, 0 },
{ X86_FEATURE_SMBA, CPUID_EBX, 2, 0x80000020, 0 }, { X86_FEATURE_SMBA, CPUID_EBX, 2, 0x80000020, 0 },
{ X86_FEATURE_BMEC, CPUID_EBX, 3, 0x80000020, 0 }, { X86_FEATURE_BMEC, CPUID_EBX, 3, 0x80000020, 0 },
{ X86_FEATURE_TSA_SQ_NO, CPUID_ECX, 1, 0x80000021, 0 },
{ X86_FEATURE_TSA_L1_NO, CPUID_ECX, 2, 0x80000021, 0 },
{ X86_FEATURE_PERFMON_V2, CPUID_EAX, 0, 0x80000022, 0 }, { X86_FEATURE_PERFMON_V2, CPUID_EAX, 0, 0x80000022, 0 },
{ X86_FEATURE_AMD_LBR_V2, CPUID_EAX, 1, 0x80000022, 0 }, { X86_FEATURE_AMD_LBR_V2, CPUID_EAX, 1, 0x80000022, 0 },
{ X86_FEATURE_AMD_LBR_PMC_FREEZE, CPUID_EAX, 2, 0x80000022, 0 }, { X86_FEATURE_AMD_LBR_PMC_FREEZE, CPUID_EAX, 2, 0x80000022, 0 },

View File

@@ -167,6 +167,9 @@ SYM_FUNC_START(__svm_vcpu_run)
#endif #endif
mov VCPU_RDI(%_ASM_DI), %_ASM_DI mov VCPU_RDI(%_ASM_DI), %_ASM_DI
/* Clobbers EFLAGS.ZF */
VM_CLEAR_CPU_BUFFERS
/* Enter guest mode */ /* Enter guest mode */
sti sti
@@ -334,6 +337,9 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run)
mov SVM_current_vmcb(%_ASM_DI), %_ASM_AX mov SVM_current_vmcb(%_ASM_DI), %_ASM_AX
mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX
/* Clobbers EFLAGS.ZF */
VM_CLEAR_CPU_BUFFERS
/* Enter guest mode */ /* Enter guest mode */
sti sti

View File

@@ -567,6 +567,7 @@ CPU_SHOW_VULN_FALLBACK(spec_rstack_overflow);
CPU_SHOW_VULN_FALLBACK(gds); CPU_SHOW_VULN_FALLBACK(gds);
CPU_SHOW_VULN_FALLBACK(reg_file_data_sampling); CPU_SHOW_VULN_FALLBACK(reg_file_data_sampling);
CPU_SHOW_VULN_FALLBACK(indirect_target_selection); CPU_SHOW_VULN_FALLBACK(indirect_target_selection);
CPU_SHOW_VULN_FALLBACK(tsa);
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL); static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
@@ -583,6 +584,7 @@ static DEVICE_ATTR(spec_rstack_overflow, 0444, cpu_show_spec_rstack_overflow, NU
static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gds, NULL); static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gds, NULL);
static DEVICE_ATTR(reg_file_data_sampling, 0444, cpu_show_reg_file_data_sampling, NULL); static DEVICE_ATTR(reg_file_data_sampling, 0444, cpu_show_reg_file_data_sampling, NULL);
static DEVICE_ATTR(indirect_target_selection, 0444, cpu_show_indirect_target_selection, NULL); static DEVICE_ATTR(indirect_target_selection, 0444, cpu_show_indirect_target_selection, NULL);
static DEVICE_ATTR(tsa, 0444, cpu_show_tsa, NULL);
static struct attribute *cpu_root_vulnerabilities_attrs[] = { static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_meltdown.attr, &dev_attr_meltdown.attr,
@@ -600,6 +602,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_gather_data_sampling.attr, &dev_attr_gather_data_sampling.attr,
&dev_attr_reg_file_data_sampling.attr, &dev_attr_reg_file_data_sampling.attr,
&dev_attr_indirect_target_selection.attr, &dev_attr_indirect_target_selection.attr,
&dev_attr_tsa.attr,
NULL NULL
}; };

View File

@@ -79,6 +79,7 @@ extern ssize_t cpu_show_reg_file_data_sampling(struct device *dev,
struct device_attribute *attr, char *buf); struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_indirect_target_selection(struct device *dev, extern ssize_t cpu_show_indirect_target_selection(struct device *dev,
struct device_attribute *attr, char *buf); struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_tsa(struct device *dev, struct device_attribute *attr, char *buf);
extern __printf(4, 5) extern __printf(4, 5)
struct device *cpu_device_create(struct device *parent, void *drvdata, struct device *cpu_device_create(struct device *parent, void *drvdata,