Skip to content

Commit dc16b55

Browse files
cyrilbur-ibmmpe
authored andcommitted
powerpc: Always restore FPU/VEC/VSX if hardware transactional memory in use
Comment from arch/powerpc/kernel/process.c:967: If userspace is inside a transaction (whether active or suspended) and FP/VMX/VSX instructions have ever been enabled inside that transaction, then we have to keep them enabled and keep the FP/VMX/VSX state loaded while ever the transaction continues. The reason is that if we didn't, and subsequently got a FP/VMX/VSX unavailable interrupt inside a transaction, we don't know whether it's the same transaction, and thus we don't know which of the checkpointed state and the ransactional state to use. restore_math() restore_fp() and restore_altivec() currently may not restore the registers. It doesn't appear that this is more serious than a performance penalty. If the math registers aren't restored the userspace thread will still be run with the facility disabled. Userspace will not be able to read invalid values. On the first access it will take an facility unavailable exception and the kernel will detected an active transaction, at which point it will abort the transaction. There is the possibility for a pathological case preventing any progress by transactions, however, transactions are never guaranteed to make progress. Fixes: 70fe3d9 ("powerpc: Restore FPU/VEC/VSX if previously used") Signed-off-by: Cyril Bur <[email protected]> Signed-off-by: Michael Ellerman <[email protected]>
1 parent 0e7736c commit dc16b55

File tree

1 file changed

+18
-3
lines changed

1 file changed

+18
-3
lines changed

arch/powerpc/kernel/process.c

Lines changed: 18 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -89,7 +89,13 @@ static void check_if_tm_restore_required(struct task_struct *tsk)
8989
set_thread_flag(TIF_RESTORE_TM);
9090
}
9191
}
92+
93+
static inline bool msr_tm_active(unsigned long msr)
94+
{
95+
return MSR_TM_ACTIVE(msr);
96+
}
9297
#else
98+
static inline bool msr_tm_active(unsigned long msr) { return false; }
9399
static inline void check_if_tm_restore_required(struct task_struct *tsk) { }
94100
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
95101

@@ -209,7 +215,7 @@ void enable_kernel_fp(void)
209215
EXPORT_SYMBOL(enable_kernel_fp);
210216

211217
static int restore_fp(struct task_struct *tsk) {
212-
if (tsk->thread.load_fp) {
218+
if (tsk->thread.load_fp || msr_tm_active(tsk->thread.regs->msr)) {
213219
load_fp_state(&current->thread.fp_state);
214220
current->thread.load_fp++;
215221
return 1;
@@ -279,7 +285,8 @@ EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
279285

280286
static int restore_altivec(struct task_struct *tsk)
281287
{
282-
if (cpu_has_feature(CPU_FTR_ALTIVEC) && tsk->thread.load_vec) {
288+
if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
289+
(tsk->thread.load_vec || msr_tm_active(tsk->thread.regs->msr))) {
283290
load_vr_state(&tsk->thread.vr_state);
284291
tsk->thread.used_vr = 1;
285292
tsk->thread.load_vec++;
@@ -465,7 +472,8 @@ void restore_math(struct pt_regs *regs)
465472
{
466473
unsigned long msr;
467474

468-
if (!current->thread.load_fp && !loadvec(current->thread))
475+
if (!msr_tm_active(regs->msr) &&
476+
!current->thread.load_fp && !loadvec(current->thread))
469477
return;
470478

471479
msr = regs->msr;
@@ -984,6 +992,13 @@ void restore_tm_state(struct pt_regs *regs)
984992
msr_diff = current->thread.ckpt_regs.msr & ~regs->msr;
985993
msr_diff &= MSR_FP | MSR_VEC | MSR_VSX;
986994

995+
/* Ensure that restore_math() will restore */
996+
if (msr_diff & MSR_FP)
997+
current->thread.load_fp = 1;
998+
#ifdef CONFIG_ALIVEC
999+
if (cpu_has_feature(CPU_FTR_ALTIVEC) && msr_diff & MSR_VEC)
1000+
current->thread.load_vec = 1;
1001+
#endif
9871002
restore_math(regs);
9881003

9891004
regs->msr |= msr_diff;

0 commit comments

Comments
 (0)