x86/its: Add support for ITS-safe return thunk

commit a75bf27fe41abe658c53276a0c486c4bf9adecfc upstream.

RETs in the lower half of cacheline may be affected by ITS bug,
specifically when the RSB-underflows. Use ITS-safe return thunk for such
RETs.

RETs that are not patched:

- RET in retpoline sequence does not need to be patched, because the
  sequence itself fills an RSB before RET.
- RET in Call Depth Tracking (CDT) thunks __x86_indirect_{call|jump}_thunk
  and call_depth_return_thunk are not patched because CDT by design
  prevents RSB-underflow.
- RETs in .init section are not reachable after init.
- RETs that are explicitly marked safe with ANNOTATE_UNRET_SAFE.

Signed-off-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Reviewed-by: Josh Poimboeuf <jpoimboe@kernel.org>
Reviewed-by: Alexandre Chartre <alexandre.chartre@oracle.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Pawan Gupta
2024-06-21 21:17:21 -07:00
committed by Greg Kroah-Hartman
parent c5a5d80752
commit 4754e29f43
8 changed files with 57 additions and 7 deletions

View File

@@ -130,6 +130,20 @@ static __always_inline int x86_call_depth_emit_accounting(u8 **pprog,
}
#endif
#if defined(CONFIG_RETHUNK) && defined(CONFIG_OBJTOOL)
extern bool cpu_wants_rethunk(void);
extern bool cpu_wants_rethunk_at(void *addr);
#else
static __always_inline bool cpu_wants_rethunk(void)
{
return false;
}
static __always_inline bool cpu_wants_rethunk_at(void *addr)
{
return false;
}
#endif
#ifdef CONFIG_SMP
extern void alternatives_smp_module_add(struct module *mod, char *name,
void *locks, void *locks_end,

View File

@@ -395,6 +395,12 @@ static inline void srso_return_thunk(void) {}
static inline void srso_alias_return_thunk(void) {}
#endif
#ifdef CONFIG_MITIGATION_ITS
extern void its_return_thunk(void);
#else
static inline void its_return_thunk(void) {}
#endif
extern void retbleed_return_thunk(void);
extern void srso_return_thunk(void);
extern void srso_alias_return_thunk(void);

View File

@@ -749,6 +749,21 @@ void __init_or_module noinline apply_retpolines(s32 *start, s32 *end)
#ifdef CONFIG_RETHUNK
bool cpu_wants_rethunk(void)
{
return cpu_feature_enabled(X86_FEATURE_RETHUNK);
}
bool cpu_wants_rethunk_at(void *addr)
{
if (!cpu_feature_enabled(X86_FEATURE_RETHUNK))
return false;
if (x86_return_thunk != its_return_thunk)
return true;
return !((unsigned long)addr & 0x20);
}
/*
* Rewrite the compiler generated return thunk tail-calls.
*
@@ -765,7 +780,7 @@ static int patch_return(void *addr, struct insn *insn, u8 *bytes)
int i = 0;
/* Patch the custom return thunks... */
if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) {
if (cpu_wants_rethunk_at(addr)) {
i = JMP32_INSN_SIZE;
__text_gen_insn(bytes, JMP32_INSN_OPCODE, addr, x86_return_thunk, i);
} else {
@@ -782,7 +797,7 @@ void __init_or_module noinline apply_returns(s32 *start, s32 *end)
{
s32 *s;
if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
if (cpu_wants_rethunk())
static_call_force_reinit();
for (s = start; s < end; s++) {

View File

@@ -363,7 +363,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
goto fail;
ip = trampoline + size;
if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
if (cpu_wants_rethunk_at(ip))
__text_gen_insn(ip, JMP32_INSN_OPCODE, ip, x86_return_thunk, JMP32_INSN_SIZE);
else
memcpy(ip, retq, sizeof(retq));

View File

@@ -81,7 +81,7 @@ static void __ref __static_call_transform(void *insn, enum insn_type type,
break;
case RET:
if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
if (cpu_wants_rethunk_at(insn))
code = text_gen_insn(JMP32_INSN_OPCODE, insn, x86_return_thunk);
else
code = &retinsn;
@@ -90,7 +90,7 @@ static void __ref __static_call_transform(void *insn, enum insn_type type,
case JCC:
if (!func) {
func = __static_call_return;
if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
if (cpu_wants_rethunk())
func = x86_return_thunk;
}

View File

@@ -547,4 +547,8 @@ INIT_PER_CPU(irq_stack_backing_store);
. = ASSERT(__x86_indirect_its_thunk_array == __x86_indirect_its_thunk_rax, "Gap in ITS thunk array");
#endif
#if defined(CONFIG_MITIGATION_ITS) && !defined(CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B)
. = ASSERT(its_return_thunk & 0x20, "its_return_thunk not in second half of cacheline");
#endif
#endif /* CONFIG_X86_64 */

View File

@@ -386,7 +386,18 @@ SYM_CODE_START(__x86_indirect_its_thunk_array)
.align 64, 0xcc
SYM_CODE_END(__x86_indirect_its_thunk_array)
#endif
.align 64, 0xcc
.skip 32, 0xcc
SYM_CODE_START(its_return_thunk)
UNWIND_HINT_FUNC
ANNOTATE_NOENDBR
ANNOTATE_UNRET_SAFE
ret
int3
SYM_CODE_END(its_return_thunk)
EXPORT_SYMBOL(its_return_thunk)
#endif /* CONFIG_MITIGATION_ITS */
/*
* This function name is magical and is used by -mfunction-return=thunk-extern

View File

@@ -498,7 +498,7 @@ static void emit_return(u8 **pprog, u8 *ip)
{
u8 *prog = *pprog;
if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) {
if (cpu_wants_rethunk()) {
emit_jump(&prog, x86_return_thunk, ip);
} else {
EMIT1(0xC3); /* ret */