Skip to content

Commit 89824bf

Browse files
48capvts-mat
authored andcommitted
hugetlb: unshare some PMDs when splitting VMAs
jira VULN-71585 cve-pre CVE-2025-38084 commit-author James Houghton <[email protected]> commit b30c14c upstream-diff Stable 5.15 backport bd9a23a was used for the actual (clean) cherry-pick PMD sharing can only be done in PUD_SIZE-aligned pieces of VMAs; however, it is possible that HugeTLB VMAs are split without unsharing the PMDs first. Without this fix, it is possible to hit the uffd-wp-related WARN_ON_ONCE in hugetlb_change_protection [1]. The key there is that hugetlb_unshare_all_pmds will not attempt to unshare PMDs in non-PUD_SIZE-aligned sections of the VMA. It might seem ideal to unshare in hugetlb_vm_op_open, but we need to unshare in both the new and old VMAs, so unsharing in hugetlb_vm_op_split seems natural. [1]: https://lore.kernel.org/linux-mm/CADrL8HVeOkj0QH5VZZbRzybNE8CG-tEGFshnA+bG9nMgcWtBSg@mail.gmail.com/ Link: https://lkml.kernel.org/r/[email protected] Fixes: 6dfeaff ("hugetlb/userfaultfd: unshare all pmds for hugetlbfs when register wp") Signed-off-by: James Houghton <[email protected]> Reviewed-by: Mike Kravetz <[email protected]> Acked-by: Peter Xu <[email protected]> Cc: Axel Rasmussen <[email protected]> Cc: Muchun Song <[email protected]> Cc: <[email protected]> Signed-off-by: Andrew Morton <[email protected]> (cherry picked from commit b30c14c) Signed-off-by: Marcin Wcisło <[email protected]>
1 parent 9f30757 commit 89824bf

File tree

1 file changed

+35
-9
lines changed

1 file changed

+35
-9
lines changed

mm/hugetlb.c

Lines changed: 35 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -95,6 +95,8 @@ struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
9595

9696
/* Forward declaration */
9797
static int hugetlb_acct_memory(struct hstate *h, long delta);
98+
static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
99+
unsigned long start, unsigned long end);
98100

99101
static inline bool subpool_is_free(struct hugepage_subpool *spool)
100102
{
@@ -4628,6 +4630,25 @@ static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
46284630
{
46294631
if (addr & ~(huge_page_mask(hstate_vma(vma))))
46304632
return -EINVAL;
4633+
4634+
/*
4635+
* PMD sharing is only possible for PUD_SIZE-aligned address ranges
4636+
* in HugeTLB VMAs. If we will lose PUD_SIZE alignment due to this
4637+
* split, unshare PMDs in the PUD_SIZE interval surrounding addr now.
4638+
*/
4639+
if (addr & ~PUD_MASK) {
4640+
/*
4641+
* hugetlb_vm_op_split is called right before we attempt to
4642+
* split the VMA. We will need to unshare PMDs in the old and
4643+
* new VMAs, so let's unshare before we split.
4644+
*/
4645+
unsigned long floor = addr & PUD_MASK;
4646+
unsigned long ceil = floor + PUD_SIZE;
4647+
4648+
if (floor >= vma->vm_start && ceil <= vma->vm_end)
4649+
hugetlb_unshare_pmds(vma, floor, ceil);
4650+
}
4651+
46314652
return 0;
46324653
}
46334654

@@ -7036,26 +7057,21 @@ void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason)
70367057
}
70377058
}
70387059

7039-
/*
7040-
* This function will unconditionally remove all the shared pmd pgtable entries
7041-
* within the specific vma for a hugetlbfs memory range.
7042-
*/
7043-
void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
7060+
static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
7061+
unsigned long start,
7062+
unsigned long end)
70447063
{
70457064
struct hstate *h = hstate_vma(vma);
70467065
unsigned long sz = huge_page_size(h);
70477066
struct mm_struct *mm = vma->vm_mm;
70487067
struct mmu_notifier_range range;
7049-
unsigned long address, start, end;
7068+
unsigned long address;
70507069
spinlock_t *ptl;
70517070
pte_t *ptep;
70527071

70537072
if (!(vma->vm_flags & VM_MAYSHARE))
70547073
return;
70557074

7056-
start = ALIGN(vma->vm_start, PUD_SIZE);
7057-
end = ALIGN_DOWN(vma->vm_end, PUD_SIZE);
7058-
70597075
if (start >= end)
70607076
return;
70617077

@@ -7088,6 +7104,16 @@ void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
70887104
mmu_notifier_invalidate_range_end(&range);
70897105
}
70907106

7107+
/*
7108+
* This function will unconditionally remove all the shared pmd pgtable entries
7109+
* within the specific vma for a hugetlbfs memory range.
7110+
*/
7111+
void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
7112+
{
7113+
hugetlb_unshare_pmds(vma, ALIGN(vma->vm_start, PUD_SIZE),
7114+
ALIGN_DOWN(vma->vm_end, PUD_SIZE));
7115+
}
7116+
70917117
#ifdef CONFIG_CMA
70927118
static bool cma_reserve_called __initdata;
70937119

0 commit comments

Comments
 (0)