Skip to content

Commit aec44e0

Browse files
xzpetertorvalds
authored andcommitted
hugetlb: pass vma into huge_pte_alloc() and huge_pmd_share()
Patch series "hugetlb: Disable huge pmd unshare for uffd-wp", v4. This series tries to disable huge pmd unshare of hugetlbfs backed memory for uffd-wp. Although uffd-wp of hugetlbfs is still during rfc stage, the idea of this series may be needed for multiple tasks (Axel's uffd minor fault series, and Mike's soft dirty series), so I picked it out from the larger series. This patch (of 4): It is a preparation work to be able to behave differently in the per architecture huge_pte_alloc() according to different VMA attributes. Pass it deeper into huge_pmd_share() so that we can avoid the find_vma() call. [[email protected]: build fix] Link: https://lkml.kernel.org/r/20210304164653.GB397383@xz-x1Link: https://lkml.kernel.org/r/[email protected] Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Peter Xu <[email protected]> Suggested-by: Mike Kravetz <[email protected]> Cc: Adam Ruprecht <[email protected]> Cc: Alexander Viro <[email protected]> Cc: Alexey Dobriyan <[email protected]> Cc: Andrea Arcangeli <[email protected]> Cc: Anshuman Khandual <[email protected]> Cc: Axel Rasmussen <[email protected]> Cc: Cannon Matthews <[email protected]> Cc: Catalin Marinas <[email protected]> Cc: Chinwen Chang <[email protected]> Cc: David Rientjes <[email protected]> Cc: "Dr . David Alan Gilbert" <[email protected]> Cc: Huang Ying <[email protected]> Cc: Ingo Molnar <[email protected]> Cc: Jann Horn <[email protected]> Cc: Jerome Glisse <[email protected]> Cc: Kirill A. Shutemov <[email protected]> Cc: Lokesh Gidra <[email protected]> Cc: "Matthew Wilcox (Oracle)" <[email protected]> Cc: Michael Ellerman <[email protected]> Cc: "Michal Koutn" <[email protected]> Cc: Michel Lespinasse <[email protected]> Cc: Mike Rapoport <[email protected]> Cc: Mina Almasry <[email protected]> Cc: Nicholas Piggin <[email protected]> Cc: Oliver Upton <[email protected]> Cc: Shaohua Li <[email protected]> Cc: Shawn Anastasio <[email protected]> Cc: Steven Price <[email protected]> Cc: Steven Rostedt <[email protected]> Cc: Vlastimil Babka <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 786b311 commit aec44e0

File tree

11 files changed

+24
-20
lines changed

11 files changed

+24
-20
lines changed

arch/arm64/mm/hugetlbpage.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -252,7 +252,7 @@ void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
252252
set_pte(ptep, pte);
253253
}
254254

255-
pte_t *huge_pte_alloc(struct mm_struct *mm,
255+
pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
256256
unsigned long addr, unsigned long sz)
257257
{
258258
pgd_t *pgdp;
@@ -286,7 +286,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
286286
} else if (sz == PMD_SIZE) {
287287
if (IS_ENABLED(CONFIG_ARCH_WANT_HUGE_PMD_SHARE) &&
288288
pud_none(READ_ONCE(*pudp)))
289-
ptep = huge_pmd_share(mm, addr, pudp);
289+
ptep = huge_pmd_share(mm, vma, addr, pudp);
290290
else
291291
ptep = (pte_t *)pmd_alloc(mm, pudp, addr);
292292
} else if (sz == (CONT_PMD_SIZE)) {

arch/ia64/mm/hugetlbpage.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,8 @@ unsigned int hpage_shift = HPAGE_SHIFT_DEFAULT;
2525
EXPORT_SYMBOL(hpage_shift);
2626

2727
pte_t *
28-
huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
28+
huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
29+
unsigned long addr, unsigned long sz)
2930
{
3031
unsigned long taddr = htlbpage_to_page(addr);
3132
pgd_t *pgd;

arch/mips/mm/hugetlbpage.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,8 +21,8 @@
2121
#include <asm/tlb.h>
2222
#include <asm/tlbflush.h>
2323

24-
pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr,
25-
unsigned long sz)
24+
pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
25+
unsigned long addr, unsigned long sz)
2626
{
2727
pgd_t *pgd;
2828
p4d_t *p4d;

arch/parisc/mm/hugetlbpage.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
4444
}
4545

4646

47-
pte_t *huge_pte_alloc(struct mm_struct *mm,
47+
pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
4848
unsigned long addr, unsigned long sz)
4949
{
5050
pgd_t *pgd;

arch/powerpc/mm/hugetlbpage.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -106,7 +106,8 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
106106
* At this point we do the placement change only for BOOK3S 64. This would
107107
* possibly work on other subarchs.
108108
*/
109-
pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
109+
pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
110+
unsigned long addr, unsigned long sz)
110111
{
111112
pgd_t *pg;
112113
p4d_t *p4;

arch/s390/mm/hugetlbpage.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -189,7 +189,7 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
189189
return pte;
190190
}
191191

192-
pte_t *huge_pte_alloc(struct mm_struct *mm,
192+
pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
193193
unsigned long addr, unsigned long sz)
194194
{
195195
pgd_t *pgdp;

arch/sh/mm/hugetlbpage.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@
2121
#include <asm/tlbflush.h>
2222
#include <asm/cacheflush.h>
2323

24-
pte_t *huge_pte_alloc(struct mm_struct *mm,
24+
pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
2525
unsigned long addr, unsigned long sz)
2626
{
2727
pgd_t *pgd;

arch/sparc/mm/hugetlbpage.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -279,7 +279,7 @@ unsigned long pud_leaf_size(pud_t pud) { return 1UL << tte_to_shift(*(pte_t *)&p
279279
unsigned long pmd_leaf_size(pmd_t pmd) { return 1UL << tte_to_shift(*(pte_t *)&pmd); }
280280
unsigned long pte_leaf_size(pte_t pte) { return 1UL << tte_to_shift(pte); }
281281

282-
pte_t *huge_pte_alloc(struct mm_struct *mm,
282+
pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
283283
unsigned long addr, unsigned long sz)
284284
{
285285
pgd_t *pgd;

include/linux/hugetlb.h

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -152,7 +152,8 @@ void hugetlb_fix_reserve_counts(struct inode *inode);
152152
extern struct mutex *hugetlb_fault_mutex_table;
153153
u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx);
154154

155-
pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
155+
pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
156+
unsigned long addr, pud_t *pud);
156157

157158
struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage);
158159

@@ -161,7 +162,7 @@ extern struct list_head huge_boot_pages;
161162

162163
/* arch callbacks */
163164

164-
pte_t *huge_pte_alloc(struct mm_struct *mm,
165+
pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
165166
unsigned long addr, unsigned long sz);
166167
pte_t *huge_pte_offset(struct mm_struct *mm,
167168
unsigned long addr, unsigned long sz);

mm/hugetlb.c

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -3795,7 +3795,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
37953795
src_pte = huge_pte_offset(src, addr, sz);
37963796
if (!src_pte)
37973797
continue;
3798-
dst_pte = huge_pte_alloc(dst, addr, sz);
3798+
dst_pte = huge_pte_alloc(dst, vma, addr, sz);
37993799
if (!dst_pte) {
38003800
ret = -ENOMEM;
38013801
break;
@@ -4563,7 +4563,7 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
45634563
*/
45644564
mapping = vma->vm_file->f_mapping;
45654565
i_mmap_lock_read(mapping);
4566-
ptep = huge_pte_alloc(mm, haddr, huge_page_size(h));
4566+
ptep = huge_pte_alloc(mm, vma, haddr, huge_page_size(h));
45674567
if (!ptep) {
45684568
i_mmap_unlock_read(mapping);
45694569
return VM_FAULT_OOM;
@@ -5370,9 +5370,9 @@ void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
53705370
* if !vma_shareable check at the beginning of the routine. i_mmap_rwsem is
53715371
* only required for subsequent processing.
53725372
*/
5373-
pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
5373+
pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
5374+
unsigned long addr, pud_t *pud)
53745375
{
5375-
struct vm_area_struct *vma = find_vma(mm, addr);
53765376
struct address_space *mapping = vma->vm_file->f_mapping;
53775377
pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
53785378
vma->vm_pgoff;
@@ -5450,7 +5450,8 @@ int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
54505450
}
54515451
#define want_pmd_share() (1)
54525452
#else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
5453-
pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
5453+
pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
5454+
unsigned long addr, pud_t *pud)
54545455
{
54555456
return NULL;
54565457
}
@@ -5469,7 +5470,7 @@ void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
54695470
#endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
54705471

54715472
#ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
5472-
pte_t *huge_pte_alloc(struct mm_struct *mm,
5473+
pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
54735474
unsigned long addr, unsigned long sz)
54745475
{
54755476
pgd_t *pgd;
@@ -5488,7 +5489,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
54885489
} else {
54895490
BUG_ON(sz != PMD_SIZE);
54905491
if (want_pmd_share() && pud_none(*pud))
5491-
pte = huge_pmd_share(mm, addr, pud);
5492+
pte = huge_pmd_share(mm, vma, addr, pud);
54925493
else
54935494
pte = (pte_t *)pmd_alloc(mm, pud, addr);
54945495
}

0 commit comments

Comments
 (0)