Skip to content

Commit 6bbd42e

Browse files
apopple-nvidiaakpm00
authored andcommitted
mmu_notifiers: call invalidate_range() when invalidating TLBs
The invalidate_range() is going to become an architecture specific mmu notifier used to keep the TLB of secondary MMUs such as an IOMMU in sync with the CPU page tables. Currently it is called from separate code paths to the main CPU TLB invalidations. This can lead to a secondary TLB not getting invalidated when required and makes it hard to reason about when exactly the secondary TLB is invalidated. To fix this move the notifier call to the architecture specific TLB maintenance functions for architectures that have secondary MMUs requiring explicit software invalidations. This fixes a SMMU bug on ARM64. On ARM64 PTE permission upgrades require a TLB invalidation. This invalidation is done by the architecture specific ptep_set_access_flags() which calls flush_tlb_page() if required. However this doesn't call the notifier resulting in infinite faults being generated by devices using the SMMU if it has previously cached a read-only PTE in it's TLB. Moving the invalidations into the TLB invalidation functions ensures all invalidations happen at the same time as the CPU invalidation. The architecture specific flush_tlb_all() routines do not call the notifier as none of the IOMMUs require this. Link: https://lkml.kernel.org/r/0287ae32d91393a582897d6c4db6f7456b1001f2.1690292440.git-series.apopple@nvidia.com Signed-off-by: Alistair Popple <[email protected]> Suggested-by: Jason Gunthorpe <[email protected]> Tested-by: SeongJae Park <[email protected]> Acked-by: Catalin Marinas <[email protected]> Reviewed-by: Jason Gunthorpe <[email protected]> Tested-by: Luis Chamberlain <[email protected]> Cc: Andrew Donnellan <[email protected]> Cc: Chaitanya Kumar Borah <[email protected]> Cc: Frederic Barrat <[email protected]> Cc: John Hubbard <[email protected]> Cc: Kevin Tian <[email protected]> Cc: Michael Ellerman <[email protected]> Cc: Nicholas Piggin <[email protected]> Cc: Nicolin Chen <[email protected]> Cc: Robin Murphy <[email protected]> Cc: Sean Christopherson <[email protected]> Cc: Tvrtko Ursulin <[email protected]> Cc: Will Deacon <[email protected]> Cc: Zhi Wang <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent 57b037d commit 6bbd42e

File tree

7 files changed

+15
-1
lines changed

7 files changed

+15
-1
lines changed

arch/arm64/include/asm/tlbflush.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@
1313
#include <linux/bitfield.h>
1414
#include <linux/mm_types.h>
1515
#include <linux/sched.h>
16+
#include <linux/mmu_notifier.h>
1617
#include <asm/cputype.h>
1718
#include <asm/mmu.h>
1819

@@ -252,6 +253,7 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
252253
__tlbi(aside1is, asid);
253254
__tlbi_user(aside1is, asid);
254255
dsb(ish);
256+
mmu_notifier_invalidate_range(mm, 0, -1UL);
255257
}
256258

257259
static inline void __flush_tlb_page_nosync(struct mm_struct *mm,
@@ -263,6 +265,8 @@ static inline void __flush_tlb_page_nosync(struct mm_struct *mm,
263265
addr = __TLBI_VADDR(uaddr, ASID(mm));
264266
__tlbi(vale1is, addr);
265267
__tlbi_user(vale1is, addr);
268+
mmu_notifier_invalidate_range(mm, uaddr & PAGE_MASK,
269+
(uaddr & PAGE_MASK) + PAGE_SIZE);
266270
}
267271

268272
static inline void flush_tlb_page_nosync(struct vm_area_struct *vma,
@@ -396,6 +400,7 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
396400
scale++;
397401
}
398402
dsb(ish);
403+
mmu_notifier_invalidate_range(vma->vm_mm, start, end);
399404
}
400405

401406
static inline void flush_tlb_range(struct vm_area_struct *vma,

arch/powerpc/include/asm/book3s/64/tlbflush.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
#define MMU_NO_CONTEXT ~0UL
66

77
#include <linux/mm_types.h>
8+
#include <linux/mmu_notifier.h>
89
#include <asm/book3s/64/tlbflush-hash.h>
910
#include <asm/book3s/64/tlbflush-radix.h>
1011

arch/powerpc/mm/book3s64/radix_hugetlbpage.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,7 @@ void radix__flush_hugetlb_tlb_range(struct vm_area_struct *vma, unsigned long st
3939
radix__flush_tlb_pwc_range_psize(vma->vm_mm, start, end, psize);
4040
else
4141
radix__flush_tlb_range_psize(vma->vm_mm, start, end, psize);
42+
mmu_notifier_invalidate_range(vma->vm_mm, start, end);
4243
}
4344

4445
void radix__huge_ptep_modify_prot_commit(struct vm_area_struct *vma,

arch/powerpc/mm/book3s64/radix_tlb.c

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -987,6 +987,7 @@ void radix__flush_tlb_mm(struct mm_struct *mm)
987987
}
988988
}
989989
preempt_enable();
990+
mmu_notifier_invalidate_range(mm, 0, -1UL);
990991
}
991992
EXPORT_SYMBOL(radix__flush_tlb_mm);
992993

@@ -1020,6 +1021,7 @@ static void __flush_all_mm(struct mm_struct *mm, bool fullmm)
10201021
_tlbiel_pid_multicast(mm, pid, RIC_FLUSH_ALL);
10211022
}
10221023
preempt_enable();
1024+
mmu_notifier_invalidate_range(mm, 0, -1UL);
10231025
}
10241026

10251027
void radix__flush_all_mm(struct mm_struct *mm)
@@ -1228,6 +1230,7 @@ static inline void __radix__flush_tlb_range(struct mm_struct *mm,
12281230
}
12291231
out:
12301232
preempt_enable();
1233+
mmu_notifier_invalidate_range(mm, start, end);
12311234
}
12321235

12331236
void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
@@ -1392,6 +1395,7 @@ static void __radix__flush_tlb_range_psize(struct mm_struct *mm,
13921395
}
13931396
out:
13941397
preempt_enable();
1398+
mmu_notifier_invalidate_range(mm, start, end);
13951399
}
13961400

13971401
void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,

arch/x86/include/asm/tlbflush.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
#define _ASM_X86_TLBFLUSH_H
44

55
#include <linux/mm_types.h>
6+
#include <linux/mmu_notifier.h>
67
#include <linux/sched.h>
78

89
#include <asm/processor.h>
@@ -282,6 +283,7 @@ static inline void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *b
282283
{
283284
inc_mm_tlb_gen(mm);
284285
cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
286+
mmu_notifier_invalidate_range(mm, 0, -1UL);
285287
}
286288

287289
static inline void arch_flush_tlb_batched_pending(struct mm_struct *mm)

arch/x86/mm/tlb.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@
1010
#include <linux/debugfs.h>
1111
#include <linux/sched/smt.h>
1212
#include <linux/task_work.h>
13+
#include <linux/mmu_notifier.h>
1314

1415
#include <asm/tlbflush.h>
1516
#include <asm/mmu_context.h>
@@ -1036,6 +1037,7 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
10361037

10371038
put_flush_tlb_info();
10381039
put_cpu();
1040+
mmu_notifier_invalidate_range(mm, start, end);
10391041
}
10401042

10411043

include/asm-generic/tlb.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -456,7 +456,6 @@ static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
456456
return;
457457

458458
tlb_flush(tlb);
459-
mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
460459
__tlb_reset_range(tlb);
461460
}
462461

0 commit comments

Comments
 (0)