Skip to content

Commit a5c3b9f

Browse files
Anshuman Khandualtorvalds
authored andcommitted
mm/debug_vm_pgtable: add tests validating advanced arch page table helpers
This adds new tests validating for these following arch advanced page table helpers. These tests create and test specific mapping types at various page table levels. 1. pxxp_set_wrprotect() 2. pxxp_get_and_clear() 3. pxxp_set_access_flags() 4. pxxp_get_and_clear_full() 5. pxxp_test_and_clear_young() 6. pxx_leaf() 7. pxx_set_huge() 8. pxx_(clear|mk)_savedwrite() 9. huge_pxxp_xxx() [[email protected]: drop RANDOM_ORVALUE from hugetlb_advanced_tests()] Link: http://lkml.kernel.org/r/[email protected] Suggested-by: Catalin Marinas <[email protected]> Signed-off-by: Anshuman Khandual <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Tested-by: Vineet Gupta <[email protected]> [arc] Reviewed-by: Zi Yan <[email protected]> Cc: Mike Rapoport <[email protected]> Cc: Vineet Gupta <[email protected]> Cc: Catalin Marinas <[email protected]> Cc: Will Deacon <[email protected]> Cc: Benjamin Herrenschmidt <[email protected]> Cc: Paul Mackerras <[email protected]> Cc: Michael Ellerman <[email protected]> Cc: Heiko Carstens <[email protected]> Cc: Vasily Gorbik <[email protected]> Cc: Christian Borntraeger <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: Ingo Molnar <[email protected]> Cc: Borislav Petkov <[email protected]> Cc: "H. Peter Anvin" <[email protected]> Cc: Kirill A. Shutemov <[email protected]> Cc: Paul Walmsley <[email protected]> Cc: Palmer Dabbelt <[email protected]> Cc: Jonathan Corbet <[email protected]> Cc: Mike Rapoport <[email protected]> Cc: Steven Price <[email protected]> Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Linus Torvalds <[email protected]>
1 parent 0528940 commit a5c3b9f

File tree

1 file changed

+312
-0
lines changed

1 file changed

+312
-0
lines changed

mm/debug_vm_pgtable.c

Lines changed: 312 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,13 +21,15 @@
2121
#include <linux/module.h>
2222
#include <linux/pfn_t.h>
2323
#include <linux/printk.h>
24+
#include <linux/pgtable.h>
2425
#include <linux/random.h>
2526
#include <linux/spinlock.h>
2627
#include <linux/swap.h>
2728
#include <linux/swapops.h>
2829
#include <linux/start_kernel.h>
2930
#include <linux/sched/mm.h>
3031
#include <asm/pgalloc.h>
32+
#include <asm/tlbflush.h>
3133

3234
#define VMFLAGS (VM_READ|VM_WRITE|VM_EXEC)
3335

@@ -55,6 +57,55 @@ static void __init pte_basic_tests(unsigned long pfn, pgprot_t prot)
5557
WARN_ON(pte_write(pte_wrprotect(pte_mkwrite(pte))));
5658
}
5759

60+
static void __init pte_advanced_tests(struct mm_struct *mm,
61+
struct vm_area_struct *vma, pte_t *ptep,
62+
unsigned long pfn, unsigned long vaddr,
63+
pgprot_t prot)
64+
{
65+
pte_t pte = pfn_pte(pfn, prot);
66+
67+
pte = pfn_pte(pfn, prot);
68+
set_pte_at(mm, vaddr, ptep, pte);
69+
ptep_set_wrprotect(mm, vaddr, ptep);
70+
pte = ptep_get(ptep);
71+
WARN_ON(pte_write(pte));
72+
73+
pte = pfn_pte(pfn, prot);
74+
set_pte_at(mm, vaddr, ptep, pte);
75+
ptep_get_and_clear(mm, vaddr, ptep);
76+
pte = ptep_get(ptep);
77+
WARN_ON(!pte_none(pte));
78+
79+
pte = pfn_pte(pfn, prot);
80+
pte = pte_wrprotect(pte);
81+
pte = pte_mkclean(pte);
82+
set_pte_at(mm, vaddr, ptep, pte);
83+
pte = pte_mkwrite(pte);
84+
pte = pte_mkdirty(pte);
85+
ptep_set_access_flags(vma, vaddr, ptep, pte, 1);
86+
pte = ptep_get(ptep);
87+
WARN_ON(!(pte_write(pte) && pte_dirty(pte)));
88+
89+
pte = pfn_pte(pfn, prot);
90+
set_pte_at(mm, vaddr, ptep, pte);
91+
ptep_get_and_clear_full(mm, vaddr, ptep, 1);
92+
pte = ptep_get(ptep);
93+
WARN_ON(!pte_none(pte));
94+
95+
pte = pte_mkyoung(pte);
96+
set_pte_at(mm, vaddr, ptep, pte);
97+
ptep_test_and_clear_young(vma, vaddr, ptep);
98+
pte = ptep_get(ptep);
99+
WARN_ON(pte_young(pte));
100+
}
101+
102+
static void __init pte_savedwrite_tests(unsigned long pfn, pgprot_t prot)
103+
{
104+
pte_t pte = pfn_pte(pfn, prot);
105+
106+
WARN_ON(!pte_savedwrite(pte_mk_savedwrite(pte_clear_savedwrite(pte))));
107+
WARN_ON(pte_savedwrite(pte_clear_savedwrite(pte_mk_savedwrite(pte))));
108+
}
58109
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
59110
static void __init pmd_basic_tests(unsigned long pfn, pgprot_t prot)
60111
{
@@ -77,6 +128,90 @@ static void __init pmd_basic_tests(unsigned long pfn, pgprot_t prot)
77128
WARN_ON(!pmd_bad(pmd_mkhuge(pmd)));
78129
}
79130

131+
static void __init pmd_advanced_tests(struct mm_struct *mm,
132+
struct vm_area_struct *vma, pmd_t *pmdp,
133+
unsigned long pfn, unsigned long vaddr,
134+
pgprot_t prot)
135+
{
136+
pmd_t pmd = pfn_pmd(pfn, prot);
137+
138+
if (!has_transparent_hugepage())
139+
return;
140+
141+
/* Align the address wrt HPAGE_PMD_SIZE */
142+
vaddr = (vaddr & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE;
143+
144+
pmd = pfn_pmd(pfn, prot);
145+
set_pmd_at(mm, vaddr, pmdp, pmd);
146+
pmdp_set_wrprotect(mm, vaddr, pmdp);
147+
pmd = READ_ONCE(*pmdp);
148+
WARN_ON(pmd_write(pmd));
149+
150+
pmd = pfn_pmd(pfn, prot);
151+
set_pmd_at(mm, vaddr, pmdp, pmd);
152+
pmdp_huge_get_and_clear(mm, vaddr, pmdp);
153+
pmd = READ_ONCE(*pmdp);
154+
WARN_ON(!pmd_none(pmd));
155+
156+
pmd = pfn_pmd(pfn, prot);
157+
pmd = pmd_wrprotect(pmd);
158+
pmd = pmd_mkclean(pmd);
159+
set_pmd_at(mm, vaddr, pmdp, pmd);
160+
pmd = pmd_mkwrite(pmd);
161+
pmd = pmd_mkdirty(pmd);
162+
pmdp_set_access_flags(vma, vaddr, pmdp, pmd, 1);
163+
pmd = READ_ONCE(*pmdp);
164+
WARN_ON(!(pmd_write(pmd) && pmd_dirty(pmd)));
165+
166+
pmd = pmd_mkhuge(pfn_pmd(pfn, prot));
167+
set_pmd_at(mm, vaddr, pmdp, pmd);
168+
pmdp_huge_get_and_clear_full(vma, vaddr, pmdp, 1);
169+
pmd = READ_ONCE(*pmdp);
170+
WARN_ON(!pmd_none(pmd));
171+
172+
pmd = pmd_mkyoung(pmd);
173+
set_pmd_at(mm, vaddr, pmdp, pmd);
174+
pmdp_test_and_clear_young(vma, vaddr, pmdp);
175+
pmd = READ_ONCE(*pmdp);
176+
WARN_ON(pmd_young(pmd));
177+
}
178+
179+
static void __init pmd_leaf_tests(unsigned long pfn, pgprot_t prot)
180+
{
181+
pmd_t pmd = pfn_pmd(pfn, prot);
182+
183+
/*
184+
* PMD based THP is a leaf entry.
185+
*/
186+
pmd = pmd_mkhuge(pmd);
187+
WARN_ON(!pmd_leaf(pmd));
188+
}
189+
190+
static void __init pmd_huge_tests(pmd_t *pmdp, unsigned long pfn, pgprot_t prot)
191+
{
192+
pmd_t pmd;
193+
194+
if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP))
195+
return;
196+
/*
197+
* X86 defined pmd_set_huge() verifies that the given
198+
* PMD is not a populated non-leaf entry.
199+
*/
200+
WRITE_ONCE(*pmdp, __pmd(0));
201+
WARN_ON(!pmd_set_huge(pmdp, __pfn_to_phys(pfn), prot));
202+
WARN_ON(!pmd_clear_huge(pmdp));
203+
pmd = READ_ONCE(*pmdp);
204+
WARN_ON(!pmd_none(pmd));
205+
}
206+
207+
static void __init pmd_savedwrite_tests(unsigned long pfn, pgprot_t prot)
208+
{
209+
pmd_t pmd = pfn_pmd(pfn, prot);
210+
211+
WARN_ON(!pmd_savedwrite(pmd_mk_savedwrite(pmd_clear_savedwrite(pmd))));
212+
WARN_ON(pmd_savedwrite(pmd_clear_savedwrite(pmd_mk_savedwrite(pmd))));
213+
}
214+
80215
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
81216
static void __init pud_basic_tests(unsigned long pfn, pgprot_t prot)
82217
{
@@ -100,12 +235,119 @@ static void __init pud_basic_tests(unsigned long pfn, pgprot_t prot)
100235
*/
101236
WARN_ON(!pud_bad(pud_mkhuge(pud)));
102237
}
238+
239+
static void __init pud_advanced_tests(struct mm_struct *mm,
240+
struct vm_area_struct *vma, pud_t *pudp,
241+
unsigned long pfn, unsigned long vaddr,
242+
pgprot_t prot)
243+
{
244+
pud_t pud = pfn_pud(pfn, prot);
245+
246+
if (!has_transparent_hugepage())
247+
return;
248+
249+
/* Align the address wrt HPAGE_PUD_SIZE */
250+
vaddr = (vaddr & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE;
251+
252+
set_pud_at(mm, vaddr, pudp, pud);
253+
pudp_set_wrprotect(mm, vaddr, pudp);
254+
pud = READ_ONCE(*pudp);
255+
WARN_ON(pud_write(pud));
256+
257+
#ifndef __PAGETABLE_PMD_FOLDED
258+
pud = pfn_pud(pfn, prot);
259+
set_pud_at(mm, vaddr, pudp, pud);
260+
pudp_huge_get_and_clear(mm, vaddr, pudp);
261+
pud = READ_ONCE(*pudp);
262+
WARN_ON(!pud_none(pud));
263+
264+
pud = pfn_pud(pfn, prot);
265+
set_pud_at(mm, vaddr, pudp, pud);
266+
pudp_huge_get_and_clear_full(mm, vaddr, pudp, 1);
267+
pud = READ_ONCE(*pudp);
268+
WARN_ON(!pud_none(pud));
269+
#endif /* __PAGETABLE_PMD_FOLDED */
270+
pud = pfn_pud(pfn, prot);
271+
pud = pud_wrprotect(pud);
272+
pud = pud_mkclean(pud);
273+
set_pud_at(mm, vaddr, pudp, pud);
274+
pud = pud_mkwrite(pud);
275+
pud = pud_mkdirty(pud);
276+
pudp_set_access_flags(vma, vaddr, pudp, pud, 1);
277+
pud = READ_ONCE(*pudp);
278+
WARN_ON(!(pud_write(pud) && pud_dirty(pud)));
279+
280+
pud = pud_mkyoung(pud);
281+
set_pud_at(mm, vaddr, pudp, pud);
282+
pudp_test_and_clear_young(vma, vaddr, pudp);
283+
pud = READ_ONCE(*pudp);
284+
WARN_ON(pud_young(pud));
285+
}
286+
287+
static void __init pud_leaf_tests(unsigned long pfn, pgprot_t prot)
288+
{
289+
pud_t pud = pfn_pud(pfn, prot);
290+
291+
/*
292+
* PUD based THP is a leaf entry.
293+
*/
294+
pud = pud_mkhuge(pud);
295+
WARN_ON(!pud_leaf(pud));
296+
}
297+
298+
static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot)
299+
{
300+
pud_t pud;
301+
302+
if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP))
303+
return;
304+
/*
305+
* X86 defined pud_set_huge() verifies that the given
306+
* PUD is not a populated non-leaf entry.
307+
*/
308+
WRITE_ONCE(*pudp, __pud(0));
309+
WARN_ON(!pud_set_huge(pudp, __pfn_to_phys(pfn), prot));
310+
WARN_ON(!pud_clear_huge(pudp));
311+
pud = READ_ONCE(*pudp);
312+
WARN_ON(!pud_none(pud));
313+
}
103314
#else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
104315
static void __init pud_basic_tests(unsigned long pfn, pgprot_t prot) { }
316+
static void __init pud_advanced_tests(struct mm_struct *mm,
317+
struct vm_area_struct *vma, pud_t *pudp,
318+
unsigned long pfn, unsigned long vaddr,
319+
pgprot_t prot)
320+
{
321+
}
322+
static void __init pud_leaf_tests(unsigned long pfn, pgprot_t prot) { }
323+
static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot)
324+
{
325+
}
105326
#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
106327
#else /* !CONFIG_TRANSPARENT_HUGEPAGE */
107328
static void __init pmd_basic_tests(unsigned long pfn, pgprot_t prot) { }
108329
static void __init pud_basic_tests(unsigned long pfn, pgprot_t prot) { }
330+
static void __init pmd_advanced_tests(struct mm_struct *mm,
331+
struct vm_area_struct *vma, pmd_t *pmdp,
332+
unsigned long pfn, unsigned long vaddr,
333+
pgprot_t prot)
334+
{
335+
}
336+
static void __init pud_advanced_tests(struct mm_struct *mm,
337+
struct vm_area_struct *vma, pud_t *pudp,
338+
unsigned long pfn, unsigned long vaddr,
339+
pgprot_t prot)
340+
{
341+
}
342+
static void __init pmd_leaf_tests(unsigned long pfn, pgprot_t prot) { }
343+
static void __init pud_leaf_tests(unsigned long pfn, pgprot_t prot) { }
344+
static void __init pmd_huge_tests(pmd_t *pmdp, unsigned long pfn, pgprot_t prot)
345+
{
346+
}
347+
static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot)
348+
{
349+
}
350+
static void __init pmd_savedwrite_tests(unsigned long pfn, pgprot_t prot) { }
109351
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
110352

111353
static void __init p4d_basic_tests(unsigned long pfn, pgprot_t prot)
@@ -495,8 +737,56 @@ static void __init hugetlb_basic_tests(unsigned long pfn, pgprot_t prot)
495737
WARN_ON(!pte_huge(pte_mkhuge(pte)));
496738
#endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
497739
}
740+
741+
static void __init hugetlb_advanced_tests(struct mm_struct *mm,
742+
struct vm_area_struct *vma,
743+
pte_t *ptep, unsigned long pfn,
744+
unsigned long vaddr, pgprot_t prot)
745+
{
746+
struct page *page = pfn_to_page(pfn);
747+
pte_t pte = ptep_get(ptep);
748+
unsigned long paddr = __pfn_to_phys(pfn) & PMD_MASK;
749+
750+
pte = pte_mkhuge(mk_pte(pfn_to_page(PHYS_PFN(paddr)), prot));
751+
set_huge_pte_at(mm, vaddr, ptep, pte);
752+
barrier();
753+
WARN_ON(!pte_same(pte, huge_ptep_get(ptep)));
754+
huge_pte_clear(mm, vaddr, ptep, PMD_SIZE);
755+
pte = huge_ptep_get(ptep);
756+
WARN_ON(!huge_pte_none(pte));
757+
758+
pte = mk_huge_pte(page, prot);
759+
set_huge_pte_at(mm, vaddr, ptep, pte);
760+
barrier();
761+
huge_ptep_set_wrprotect(mm, vaddr, ptep);
762+
pte = huge_ptep_get(ptep);
763+
WARN_ON(huge_pte_write(pte));
764+
765+
pte = mk_huge_pte(page, prot);
766+
set_huge_pte_at(mm, vaddr, ptep, pte);
767+
barrier();
768+
huge_ptep_get_and_clear(mm, vaddr, ptep);
769+
pte = huge_ptep_get(ptep);
770+
WARN_ON(!huge_pte_none(pte));
771+
772+
pte = mk_huge_pte(page, prot);
773+
pte = huge_pte_wrprotect(pte);
774+
set_huge_pte_at(mm, vaddr, ptep, pte);
775+
barrier();
776+
pte = huge_pte_mkwrite(pte);
777+
pte = huge_pte_mkdirty(pte);
778+
huge_ptep_set_access_flags(vma, vaddr, ptep, pte, 1);
779+
pte = huge_ptep_get(ptep);
780+
WARN_ON(!(huge_pte_write(pte) && huge_pte_dirty(pte)));
781+
}
498782
#else /* !CONFIG_HUGETLB_PAGE */
499783
static void __init hugetlb_basic_tests(unsigned long pfn, pgprot_t prot) { }
784+
static void __init hugetlb_advanced_tests(struct mm_struct *mm,
785+
struct vm_area_struct *vma,
786+
pte_t *ptep, unsigned long pfn,
787+
unsigned long vaddr, pgprot_t prot)
788+
{
789+
}
500790
#endif /* CONFIG_HUGETLB_PAGE */
501791

502792
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -568,6 +858,7 @@ static unsigned long __init get_random_vaddr(void)
568858

569859
static int __init debug_vm_pgtable(void)
570860
{
861+
struct vm_area_struct *vma;
571862
struct mm_struct *mm;
572863
pgd_t *pgdp;
573864
p4d_t *p4dp, *saved_p4dp;
@@ -596,6 +887,12 @@ static int __init debug_vm_pgtable(void)
596887
*/
597888
protnone = __P000;
598889

890+
vma = vm_area_alloc(mm);
891+
if (!vma) {
892+
pr_err("vma allocation failed\n");
893+
return 1;
894+
}
895+
599896
/*
600897
* PFN for mapping at PTE level is determined from a standard kernel
601898
* text symbol. But pfns for higher page table levels are derived by
@@ -644,6 +941,20 @@ static int __init debug_vm_pgtable(void)
644941
p4d_clear_tests(mm, p4dp);
645942
pgd_clear_tests(mm, pgdp);
646943

944+
pte_advanced_tests(mm, vma, ptep, pte_aligned, vaddr, prot);
945+
pmd_advanced_tests(mm, vma, pmdp, pmd_aligned, vaddr, prot);
946+
pud_advanced_tests(mm, vma, pudp, pud_aligned, vaddr, prot);
947+
hugetlb_advanced_tests(mm, vma, ptep, pte_aligned, vaddr, prot);
948+
949+
pmd_leaf_tests(pmd_aligned, prot);
950+
pud_leaf_tests(pud_aligned, prot);
951+
952+
pmd_huge_tests(pmdp, pmd_aligned, prot);
953+
pud_huge_tests(pudp, pud_aligned, prot);
954+
955+
pte_savedwrite_tests(pte_aligned, prot);
956+
pmd_savedwrite_tests(pmd_aligned, prot);
957+
647958
pte_unmap_unlock(ptep, ptl);
648959

649960
pmd_populate_tests(mm, pmdp, saved_ptep);
@@ -678,6 +989,7 @@ static int __init debug_vm_pgtable(void)
678989
pmd_free(mm, saved_pmdp);
679990
pte_free(mm, saved_ptep);
680991

992+
vm_area_free(vma);
681993
mm_dec_nr_puds(mm);
682994
mm_dec_nr_pmds(mm);
683995
mm_dec_nr_ptes(mm);

0 commit comments

Comments
 (0)