Skip to content

Commit e0bddc1

Browse files
kirylhansendc
authored andcommitted
x86/mm: Reduce untagged_addr() overhead for systems without LAM
Use alternatives to reduce untagged_addr() overhead. Signed-off-by: Kirill A. Shutemov <[email protected]> Signed-off-by: Dave Hansen <[email protected]> Link: https://lore.kernel.org/all/20230312112612.31869-8-kirill.shutemov%40linux.intel.com
1 parent 74c228d commit e0bddc1

File tree

2 files changed

+37
-12
lines changed

2 files changed

+37
-12
lines changed

arch/x86/include/asm/disabled-features.h

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -75,6 +75,12 @@
7575
# define DISABLE_CALL_DEPTH_TRACKING (1 << (X86_FEATURE_CALL_DEPTH & 31))
7676
#endif
7777

78+
#ifdef CONFIG_ADDRESS_MASKING
79+
# define DISABLE_LAM 0
80+
#else
81+
# define DISABLE_LAM (1 << (X86_FEATURE_LAM & 31))
82+
#endif
83+
7884
#ifdef CONFIG_INTEL_IOMMU_SVM
7985
# define DISABLE_ENQCMD 0
8086
#else
@@ -115,7 +121,7 @@
115121
#define DISABLED_MASK10 0
116122
#define DISABLED_MASK11 (DISABLE_RETPOLINE|DISABLE_RETHUNK|DISABLE_UNRET| \
117123
DISABLE_CALL_DEPTH_TRACKING)
118-
#define DISABLED_MASK12 0
124+
#define DISABLED_MASK12 (DISABLE_LAM)
119125
#define DISABLED_MASK13 0
120126
#define DISABLED_MASK14 0
121127
#define DISABLED_MASK15 0

arch/x86/include/asm/uaccess.h

Lines changed: 30 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
#include <linux/kasan-checks.h>
1010
#include <linux/mm_types.h>
1111
#include <linux/string.h>
12+
#include <linux/mmap_lock.h>
1213
#include <asm/asm.h>
1314
#include <asm/page.h>
1415
#include <asm/smap.h>
@@ -30,26 +31,44 @@ static inline bool pagefault_disabled(void);
3031
* Magic with the 'sign' allows to untag userspace pointer without any branches
3132
* while leaving kernel addresses intact.
3233
*/
33-
static inline unsigned long __untagged_addr(unsigned long addr,
34-
unsigned long mask)
34+
static inline unsigned long __untagged_addr(unsigned long addr)
3535
{
36-
long sign = addr >> 63;
36+
long sign;
37+
38+
/*
39+
* Refer tlbstate_untag_mask directly to avoid RIP-relative relocation
40+
* in alternative instructions. The relocation gets wrong when gets
41+
* copied to the target place.
42+
*/
43+
asm (ALTERNATIVE("",
44+
"sar $63, %[sign]\n\t" /* user_ptr ? 0 : -1UL */
45+
"or %%gs:tlbstate_untag_mask, %[sign]\n\t"
46+
"and %[sign], %[addr]\n\t", X86_FEATURE_LAM)
47+
: [addr] "+r" (addr), [sign] "=r" (sign)
48+
: "m" (tlbstate_untag_mask), "[sign]" (addr));
3749

38-
addr &= mask | sign;
3950
return addr;
4051
}
4152

4253
#define untagged_addr(addr) ({ \
43-
u64 __addr = (__force u64)(addr); \
44-
__addr = __untagged_addr(__addr, current_untag_mask()); \
45-
(__force __typeof__(addr))__addr; \
54+
unsigned long __addr = (__force unsigned long)(addr); \
55+
(__force __typeof__(addr))__untagged_addr(__addr); \
4656
})
4757

58+
static inline unsigned long __untagged_addr_remote(struct mm_struct *mm,
59+
unsigned long addr)
60+
{
61+
long sign = addr >> 63;
62+
63+
mmap_assert_locked(mm);
64+
addr &= (mm)->context.untag_mask | sign;
65+
66+
return addr;
67+
}
68+
4869
#define untagged_addr_remote(mm, addr) ({ \
49-
u64 __addr = (__force u64)(addr); \
50-
mmap_assert_locked(mm); \
51-
__addr = __untagged_addr(__addr, (mm)->context.untag_mask); \
52-
(__force __typeof__(addr))__addr; \
70+
unsigned long __addr = (__force unsigned long)(addr); \
71+
(__force __typeof__(addr))__untagged_addr_remote(mm, __addr); \
5372
})
5473

5574
#else

0 commit comments

Comments
 (0)