Skip to content

Commit f9af88a

Browse files
committed
x86/bugs: Rename MDS machinery to something more generic
It will be used by other x86 mitigations. No functional changes. Signed-off-by: Borislav Petkov (AMD) <[email protected]> Reviewed-by: Pawan Gupta <[email protected]>
1 parent e04c78d commit f9af88a

File tree

8 files changed

+35
-36
lines changed

8 files changed

+35
-36
lines changed

Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -157,9 +157,7 @@ This is achieved by using the otherwise unused and obsolete VERW instruction in
157157
combination with a microcode update. The microcode clears the affected CPU
158158
buffers when the VERW instruction is executed.
159159

160-
Kernel reuses the MDS function to invoke the buffer clearing:
161-
162-
mds_clear_cpu_buffers()
160+
Kernel does the buffer clearing with x86_clear_cpu_buffers().
163161

164162
On MDS affected CPUs, the kernel already invokes CPU buffer clear on
165163
kernel/userspace, hypervisor/guest and C-state (idle) transitions. No

Documentation/arch/x86/mds.rst

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -93,7 +93,7 @@ enters a C-state.
9393

9494
The kernel provides a function to invoke the buffer clearing:
9595

96-
mds_clear_cpu_buffers()
96+
x86_clear_cpu_buffers()
9797

9898
Also macro CLEAR_CPU_BUFFERS can be used in ASM late in exit-to-user path.
9999
Other than CFLAGS.ZF, this macro doesn't clobber any registers.
@@ -185,9 +185,9 @@ Mitigation points
185185
idle clearing would be a window dressing exercise and is therefore not
186186
activated.
187187

188-
The invocation is controlled by the static key mds_idle_clear which is
189-
switched depending on the chosen mitigation mode and the SMT state of
190-
the system.
188+
The invocation is controlled by the static key cpu_buf_idle_clear which is
189+
switched depending on the chosen mitigation mode and the SMT state of the
190+
system.
191191

192192
The buffer clear is only invoked before entering the C-State to prevent
193193
that stale data from the idling CPU from spilling to the Hyper-Thread

arch/x86/entry/entry.S

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -36,20 +36,20 @@ EXPORT_SYMBOL_GPL(write_ibpb);
3636

3737
/*
3838
* Define the VERW operand that is disguised as entry code so that
39-
* it can be referenced with KPTI enabled. This ensure VERW can be
39+
* it can be referenced with KPTI enabled. This ensures VERW can be
4040
* used late in exit-to-user path after page tables are switched.
4141
*/
4242
.pushsection .entry.text, "ax"
4343

4444
.align L1_CACHE_BYTES, 0xcc
45-
SYM_CODE_START_NOALIGN(mds_verw_sel)
45+
SYM_CODE_START_NOALIGN(x86_verw_sel)
4646
UNWIND_HINT_UNDEFINED
4747
ANNOTATE_NOENDBR
4848
.word __KERNEL_DS
4949
.align L1_CACHE_BYTES, 0xcc
50-
SYM_CODE_END(mds_verw_sel);
50+
SYM_CODE_END(x86_verw_sel);
5151
/* For KVM */
52-
EXPORT_SYMBOL_GPL(mds_verw_sel);
52+
EXPORT_SYMBOL_GPL(x86_verw_sel);
5353

5454
.popsection
5555

arch/x86/include/asm/irqflags.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -44,13 +44,13 @@ static __always_inline void native_irq_enable(void)
4444

4545
static __always_inline void native_safe_halt(void)
4646
{
47-
mds_idle_clear_cpu_buffers();
47+
x86_idle_clear_cpu_buffers();
4848
asm volatile("sti; hlt": : :"memory");
4949
}
5050

5151
static __always_inline void native_halt(void)
5252
{
53-
mds_idle_clear_cpu_buffers();
53+
x86_idle_clear_cpu_buffers();
5454
asm volatile("hlt": : :"memory");
5555
}
5656

arch/x86/include/asm/mwait.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@ static __always_inline void __monitorx(const void *eax, u32 ecx, u32 edx)
4343

4444
static __always_inline void __mwait(u32 eax, u32 ecx)
4545
{
46-
mds_idle_clear_cpu_buffers();
46+
x86_idle_clear_cpu_buffers();
4747

4848
/*
4949
* Use the instruction mnemonic with implicit operands, as the LLVM
@@ -98,7 +98,7 @@ static __always_inline void __mwaitx(u32 eax, u32 ebx, u32 ecx)
9898
*/
9999
static __always_inline void __sti_mwait(u32 eax, u32 ecx)
100100
{
101-
mds_idle_clear_cpu_buffers();
101+
x86_idle_clear_cpu_buffers();
102102

103103
asm volatile("sti; mwait" :: "a" (eax), "c" (ecx));
104104
}

arch/x86/include/asm/nospec-branch.h

Lines changed: 15 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -302,22 +302,22 @@
302302
.endm
303303

304304
/*
305-
* Macro to execute VERW instruction that mitigate transient data sampling
306-
* attacks such as MDS. On affected systems a microcode update overloaded VERW
307-
* instruction to also clear the CPU buffers. VERW clobbers CFLAGS.ZF.
308-
*
305+
* Macro to execute VERW insns that mitigate transient data sampling
306+
* attacks such as MDS or TSA. On affected systems a microcode update
307+
* overloaded VERW insns to also clear the CPU buffers. VERW clobbers
308+
* CFLAGS.ZF.
309309
* Note: Only the memory operand variant of VERW clears the CPU buffers.
310310
*/
311311
.macro CLEAR_CPU_BUFFERS
312312
#ifdef CONFIG_X86_64
313-
ALTERNATIVE "", "verw mds_verw_sel(%rip)", X86_FEATURE_CLEAR_CPU_BUF
313+
ALTERNATIVE "", "verw x86_verw_sel(%rip)", X86_FEATURE_CLEAR_CPU_BUF
314314
#else
315315
/*
316316
* In 32bit mode, the memory operand must be a %cs reference. The data
317317
* segments may not be usable (vm86 mode), and the stack segment may not
318318
* be flat (ESPFIX32).
319319
*/
320-
ALTERNATIVE "", "verw %cs:mds_verw_sel", X86_FEATURE_CLEAR_CPU_BUF
320+
ALTERNATIVE "", "verw %cs:x86_verw_sel", X86_FEATURE_CLEAR_CPU_BUF
321321
#endif
322322
.endm
323323

@@ -567,24 +567,24 @@ DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
567567

568568
DECLARE_STATIC_KEY_FALSE(switch_vcpu_ibpb);
569569

570-
DECLARE_STATIC_KEY_FALSE(mds_idle_clear);
570+
DECLARE_STATIC_KEY_FALSE(cpu_buf_idle_clear);
571571

572572
DECLARE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
573573

574574
DECLARE_STATIC_KEY_FALSE(cpu_buf_vm_clear);
575575

576-
extern u16 mds_verw_sel;
576+
extern u16 x86_verw_sel;
577577

578578
#include <asm/segment.h>
579579

580580
/**
581-
* mds_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability
581+
* x86_clear_cpu_buffers - Buffer clearing support for different x86 CPU vulns
582582
*
583583
* This uses the otherwise unused and obsolete VERW instruction in
584584
* combination with microcode which triggers a CPU buffer flush when the
585585
* instruction is executed.
586586
*/
587-
static __always_inline void mds_clear_cpu_buffers(void)
587+
static __always_inline void x86_clear_cpu_buffers(void)
588588
{
589589
static const u16 ds = __KERNEL_DS;
590590

@@ -601,14 +601,15 @@ static __always_inline void mds_clear_cpu_buffers(void)
601601
}
602602

603603
/**
604-
* mds_idle_clear_cpu_buffers - Mitigation for MDS vulnerability
604+
* x86_idle_clear_cpu_buffers - Buffer clearing support in idle for the MDS
605+
* vulnerability
605606
*
606607
* Clear CPU buffers if the corresponding static key is enabled
607608
*/
608-
static __always_inline void mds_idle_clear_cpu_buffers(void)
609+
static __always_inline void x86_idle_clear_cpu_buffers(void)
609610
{
610-
if (static_branch_likely(&mds_idle_clear))
611-
mds_clear_cpu_buffers();
611+
if (static_branch_likely(&cpu_buf_idle_clear))
612+
x86_clear_cpu_buffers();
612613
}
613614

614615
#endif /* __ASSEMBLER__ */

arch/x86/kernel/cpu/bugs.c

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -169,9 +169,9 @@ DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
169169
DEFINE_STATIC_KEY_FALSE(switch_vcpu_ibpb);
170170
EXPORT_SYMBOL_GPL(switch_vcpu_ibpb);
171171

172-
/* Control MDS CPU buffer clear before idling (halt, mwait) */
173-
DEFINE_STATIC_KEY_FALSE(mds_idle_clear);
174-
EXPORT_SYMBOL_GPL(mds_idle_clear);
172+
/* Control CPU buffer clear before idling (halt, mwait) */
173+
DEFINE_STATIC_KEY_FALSE(cpu_buf_idle_clear);
174+
EXPORT_SYMBOL_GPL(cpu_buf_idle_clear);
175175

176176
/*
177177
* Controls whether l1d flush based mitigations are enabled,
@@ -637,7 +637,7 @@ static void __init mmio_apply_mitigation(void)
637637
* is required irrespective of SMT state.
638638
*/
639639
if (!(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO))
640-
static_branch_enable(&mds_idle_clear);
640+
static_branch_enable(&cpu_buf_idle_clear);
641641

642642
if (mmio_nosmt || cpu_mitigations_auto_nosmt())
643643
cpu_smt_disable(false);
@@ -2249,10 +2249,10 @@ static void update_mds_branch_idle(void)
22492249
return;
22502250

22512251
if (sched_smt_active()) {
2252-
static_branch_enable(&mds_idle_clear);
2252+
static_branch_enable(&cpu_buf_idle_clear);
22532253
} else if (mmio_mitigation == MMIO_MITIGATION_OFF ||
22542254
(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) {
2255-
static_branch_disable(&mds_idle_clear);
2255+
static_branch_disable(&cpu_buf_idle_clear);
22562256
}
22572257
}
22582258

arch/x86/kvm/vmx/vmx.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7291,7 +7291,7 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
72917291
vmx_l1d_flush(vcpu);
72927292
else if (static_branch_unlikely(&cpu_buf_vm_clear) &&
72937293
kvm_arch_has_assigned_device(vcpu->kvm))
7294-
mds_clear_cpu_buffers();
7294+
x86_clear_cpu_buffers();
72957295

72967296
vmx_disable_fb_clear(vmx);
72977297

0 commit comments

Comments
 (0)