Skip to content

Commit 198ff55

Browse files
committed
x86/mm: Randomize per-cpu entry area
jira VULN-8044 cve CVE-2023-0597 commit-author Peter Zijlstra <[email protected]> commit 97e3d26 upstream-diff Included `linux/prandom.h' in `arch/x86/mm/cpu_entry_area.c' directly (compilation fails without it) Seth found that the CPU-entry-area; the piece of per-cpu data that is mapped into the userspace page-tables for kPTI is not subject to any randomization -- irrespective of kASLR settings. On x86_64 a whole P4D (512 GB) of virtual address space is reserved for this structure, which is plenty large enough to randomize things a little. As such, use a straight forward randomization scheme that avoids duplicates to spread the existing CPUs over the available space. [ bp: Fix le build. ] Reported-by: Seth Jenkins <[email protected]> Reviewed-by: Kees Cook <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Signed-off-by: Dave Hansen <[email protected]> Signed-off-by: Borislav Petkov <[email protected]> (cherry picked from commit 97e3d26) Signed-off-by: Marcin Wcisło <[email protected]>
1 parent d468641 commit 198ff55

File tree

4 files changed

+51
-10
lines changed

4 files changed

+51
-10
lines changed

arch/x86/include/asm/cpu_entry_area.h

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -130,10 +130,6 @@ struct cpu_entry_area {
130130
};
131131

132132
#define CPU_ENTRY_AREA_SIZE (sizeof(struct cpu_entry_area))
133-
#define CPU_ENTRY_AREA_ARRAY_SIZE (CPU_ENTRY_AREA_SIZE * NR_CPUS)
134-
135-
/* Total size includes the readonly IDT mapping page as well: */
136-
#define CPU_ENTRY_AREA_TOTAL_SIZE (CPU_ENTRY_AREA_ARRAY_SIZE + PAGE_SIZE)
137133

138134
DECLARE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
139135
DECLARE_PER_CPU(struct cea_exception_stacks *, cea_exception_stacks);

arch/x86/include/asm/pgtable_areas.h

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,12 @@
1111

1212
#define CPU_ENTRY_AREA_RO_IDT_VADDR ((void *)CPU_ENTRY_AREA_RO_IDT)
1313

14-
#define CPU_ENTRY_AREA_MAP_SIZE (CPU_ENTRY_AREA_PER_CPU + CPU_ENTRY_AREA_ARRAY_SIZE - CPU_ENTRY_AREA_BASE)
14+
#ifdef CONFIG_X86_32
15+
#define CPU_ENTRY_AREA_MAP_SIZE (CPU_ENTRY_AREA_PER_CPU + \
16+
(CPU_ENTRY_AREA_SIZE * NR_CPUS) - \
17+
CPU_ENTRY_AREA_BASE)
18+
#else
19+
#define CPU_ENTRY_AREA_MAP_SIZE P4D_SIZE
20+
#endif
1521

1622
#endif /* _ASM_X86_PGTABLE_AREAS_H */

arch/x86/kernel/hw_breakpoint.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -266,7 +266,7 @@ static inline bool within_cpu_entry(unsigned long addr, unsigned long end)
266266

267267
/* CPU entry erea is always used for CPU entry */
268268
if (within_area(addr, end, CPU_ENTRY_AREA_BASE,
269-
CPU_ENTRY_AREA_TOTAL_SIZE))
269+
CPU_ENTRY_AREA_MAP_SIZE))
270270
return true;
271271

272272
/*

arch/x86/mm/cpu_entry_area.c

Lines changed: 43 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
#include <linux/kallsyms.h>
66
#include <linux/kcore.h>
77
#include <linux/pgtable.h>
8+
#include <linux/prandom.h>
89

910
#include <asm/cpu_entry_area.h>
1011
#include <asm/fixmap.h>
@@ -16,16 +17,53 @@ static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage)
1617
#ifdef CONFIG_X86_64
1718
static DEFINE_PER_CPU_PAGE_ALIGNED(struct exception_stacks, exception_stacks);
1819
DEFINE_PER_CPU(struct cea_exception_stacks*, cea_exception_stacks);
19-
#endif
2020

21-
#ifdef CONFIG_X86_32
21+
static DEFINE_PER_CPU_READ_MOSTLY(unsigned long, _cea_offset);
22+
23+
static __always_inline unsigned int cea_offset(unsigned int cpu)
24+
{
25+
return per_cpu(_cea_offset, cpu);
26+
}
27+
28+
static __init void init_cea_offsets(void)
29+
{
30+
unsigned int max_cea;
31+
unsigned int i, j;
32+
33+
max_cea = (CPU_ENTRY_AREA_MAP_SIZE - PAGE_SIZE) / CPU_ENTRY_AREA_SIZE;
34+
35+
/* O(sodding terrible) */
36+
for_each_possible_cpu(i) {
37+
unsigned int cea;
38+
39+
again:
40+
cea = prandom_u32_max(max_cea);
41+
42+
for_each_possible_cpu(j) {
43+
if (cea_offset(j) == cea)
44+
goto again;
45+
46+
if (i == j)
47+
break;
48+
}
49+
50+
per_cpu(_cea_offset, i) = cea;
51+
}
52+
}
53+
#else /* !X86_64 */
2254
DECLARE_PER_CPU_PAGE_ALIGNED(struct doublefault_stack, doublefault_stack);
55+
56+
static __always_inline unsigned int cea_offset(unsigned int cpu)
57+
{
58+
return cpu;
59+
}
60+
static inline void init_cea_offsets(void) { }
2361
#endif
2462

2563
/* Is called from entry code, so must be noinstr */
2664
noinstr struct cpu_entry_area *get_cpu_entry_area(int cpu)
2765
{
28-
unsigned long va = CPU_ENTRY_AREA_PER_CPU + cpu * CPU_ENTRY_AREA_SIZE;
66+
unsigned long va = CPU_ENTRY_AREA_PER_CPU + cea_offset(cpu) * CPU_ENTRY_AREA_SIZE;
2967
BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0);
3068

3169
return (struct cpu_entry_area *) va;
@@ -211,7 +249,6 @@ static __init void setup_cpu_entry_area_ptes(void)
211249

212250
/* The +1 is for the readonly IDT: */
213251
BUILD_BUG_ON((CPU_ENTRY_AREA_PAGES+1)*PAGE_SIZE != CPU_ENTRY_AREA_MAP_SIZE);
214-
BUILD_BUG_ON(CPU_ENTRY_AREA_TOTAL_SIZE != CPU_ENTRY_AREA_MAP_SIZE);
215252
BUG_ON(CPU_ENTRY_AREA_BASE & ~PMD_MASK);
216253

217254
start = CPU_ENTRY_AREA_BASE;
@@ -227,6 +264,8 @@ void __init setup_cpu_entry_areas(void)
227264
{
228265
unsigned int cpu;
229266

267+
init_cea_offsets();
268+
230269
setup_cpu_entry_area_ptes();
231270

232271
for_each_possible_cpu(cpu)

0 commit comments

Comments
 (0)