Skip to content

Commit 6d75132

Browse files
committed
Merge branch 'mm-hotfixes-stable' into mm-stable
2 parents 088b8aa + 5929899 commit 6d75132

File tree

23 files changed

+192
-106
lines changed

23 files changed

+192
-106
lines changed

arch/powerpc/mm/book3s64/radix_pgtable.c

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -937,15 +937,6 @@ pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long addre
937937
pmd = *pmdp;
938938
pmd_clear(pmdp);
939939

940-
/*
941-
* pmdp collapse_flush need to ensure that there are no parallel gup
942-
* walk after this call. This is needed so that we can have stable
943-
* page ref count when collapsing a page. We don't allow a collapse page
944-
* if we have gup taken on the page. We can ensure that by sending IPI
945-
* because gup walk happens with IRQ disabled.
946-
*/
947-
serialize_against_pte_lookup(vma->vm_mm);
948-
949940
radix__flush_tlb_collapsed_pmd(vma->vm_mm, address);
950941

951942
return pmd;

arch/x86/lib/usercopy.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
4444
* called from other contexts.
4545
*/
4646
pagefault_disable();
47-
ret = __copy_from_user_inatomic(to, from, n);
47+
ret = raw_copy_from_user(to, from, n);
4848
pagefault_enable();
4949

5050
return ret;

arch/x86/mm/Makefile

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,10 +4,12 @@ KCOV_INSTRUMENT_tlb.o := n
44
KCOV_INSTRUMENT_mem_encrypt.o := n
55
KCOV_INSTRUMENT_mem_encrypt_amd.o := n
66
KCOV_INSTRUMENT_mem_encrypt_identity.o := n
7+
KCOV_INSTRUMENT_pgprot.o := n
78

89
KASAN_SANITIZE_mem_encrypt.o := n
910
KASAN_SANITIZE_mem_encrypt_amd.o := n
1011
KASAN_SANITIZE_mem_encrypt_identity.o := n
12+
KASAN_SANITIZE_pgprot.o := n
1113

1214
# Disable KCSAN entirely, because otherwise we get warnings that some functions
1315
# reference __initdata sections.
@@ -17,6 +19,7 @@ ifdef CONFIG_FUNCTION_TRACER
1719
CFLAGS_REMOVE_mem_encrypt.o = -pg
1820
CFLAGS_REMOVE_mem_encrypt_amd.o = -pg
1921
CFLAGS_REMOVE_mem_encrypt_identity.o = -pg
22+
CFLAGS_REMOVE_pgprot.o = -pg
2023
endif
2124

2225
obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o mmap.o \

fs/ntfs/super.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2092,7 +2092,8 @@ static bool load_system_files(ntfs_volume *vol)
20922092
// TODO: Initialize security.
20932093
/* Get the extended system files' directory inode. */
20942094
vol->extend_ino = ntfs_iget(sb, FILE_Extend);
2095-
if (IS_ERR(vol->extend_ino) || is_bad_inode(vol->extend_ino)) {
2095+
if (IS_ERR(vol->extend_ino) || is_bad_inode(vol->extend_ino) ||
2096+
!S_ISDIR(vol->extend_ino->i_mode)) {
20962097
if (!IS_ERR(vol->extend_ino))
20972098
iput(vol->extend_ino);
20982099
ntfs_error(sb, "Failed to load $Extend.");

fs/xfs/xfs_notify_failure.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -175,13 +175,13 @@ xfs_dax_notify_failure(
175175
u64 ddev_start;
176176
u64 ddev_end;
177177

178-
if (!(mp->m_sb.sb_flags & SB_BORN)) {
178+
if (!(mp->m_super->s_flags & SB_BORN)) {
179179
xfs_warn(mp, "filesystem is not ready for notify_failure()!");
180180
return -EIO;
181181
}
182182

183183
if (mp->m_rtdev_targp && mp->m_rtdev_targp->bt_daxdev == dax_dev) {
184-
xfs_warn(mp,
184+
xfs_debug(mp,
185185
"notify_failure() not supported on realtime device!");
186186
return -EOPNOTSUPP;
187187
}
@@ -194,7 +194,7 @@ xfs_dax_notify_failure(
194194
}
195195

196196
if (!xfs_has_rmapbt(mp)) {
197-
xfs_warn(mp, "notify_failure() needs rmapbt enabled!");
197+
xfs_debug(mp, "notify_failure() needs rmapbt enabled!");
198198
return -EOPNOTSUPP;
199199
}
200200

include/linux/memremap.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -139,6 +139,11 @@ struct dev_pagemap {
139139
};
140140
};
141141

142+
static inline bool pgmap_has_memory_failure(struct dev_pagemap *pgmap)
143+
{
144+
return pgmap->ops && pgmap->ops->memory_failure;
145+
}
146+
142147
static inline struct vmem_altmap *pgmap_altmap(struct dev_pagemap *pgmap)
143148
{
144149
if (pgmap->flags & PGMAP_ALTMAP_VALID)

mm/damon/dbgfs.c

Lines changed: 14 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -884,6 +884,7 @@ static int dbgfs_rm_context(char *name)
884884
struct dentry *root, *dir, **new_dirs;
885885
struct damon_ctx **new_ctxs;
886886
int i, j;
887+
int ret = 0;
887888

888889
if (damon_nr_running_ctxs())
889890
return -EBUSY;
@@ -898,14 +899,16 @@ static int dbgfs_rm_context(char *name)
898899

899900
new_dirs = kmalloc_array(dbgfs_nr_ctxs - 1, sizeof(*dbgfs_dirs),
900901
GFP_KERNEL);
901-
if (!new_dirs)
902-
return -ENOMEM;
902+
if (!new_dirs) {
903+
ret = -ENOMEM;
904+
goto out_dput;
905+
}
903906

904907
new_ctxs = kmalloc_array(dbgfs_nr_ctxs - 1, sizeof(*dbgfs_ctxs),
905908
GFP_KERNEL);
906909
if (!new_ctxs) {
907-
kfree(new_dirs);
908-
return -ENOMEM;
910+
ret = -ENOMEM;
911+
goto out_new_dirs;
909912
}
910913

911914
for (i = 0, j = 0; i < dbgfs_nr_ctxs; i++) {
@@ -925,7 +928,13 @@ static int dbgfs_rm_context(char *name)
925928
dbgfs_ctxs = new_ctxs;
926929
dbgfs_nr_ctxs--;
927930

928-
return 0;
931+
goto out_dput;
932+
933+
out_new_dirs:
934+
kfree(new_dirs);
935+
out_dput:
936+
dput(dir);
937+
return ret;
929938
}
930939

931940
static ssize_t dbgfs_rm_context_write(struct file *file,

mm/frontswap.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -125,6 +125,9 @@ void frontswap_init(unsigned type, unsigned long *map)
125125
* p->frontswap set to something valid to work properly.
126126
*/
127127
frontswap_map_set(sis, map);
128+
129+
if (!frontswap_enabled())
130+
return;
128131
frontswap_ops->init(type);
129132
}
130133

mm/gup.c

Lines changed: 28 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -2394,8 +2394,28 @@ static void __maybe_unused undo_dev_pagemap(int *nr, int nr_start,
23942394
}
23952395

23962396
#ifdef CONFIG_ARCH_HAS_PTE_SPECIAL
2397-
static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
2398-
unsigned int flags, struct page **pages, int *nr)
2397+
/*
2398+
* Fast-gup relies on pte change detection to avoid concurrent pgtable
2399+
* operations.
2400+
*
2401+
* To pin the page, fast-gup needs to do below in order:
2402+
* (1) pin the page (by prefetching pte), then (2) check pte not changed.
2403+
*
2404+
* For the rest of pgtable operations where pgtable updates can be racy
2405+
* with fast-gup, we need to do (1) clear pte, then (2) check whether page
2406+
* is pinned.
2407+
*
2408+
* Above will work for all pte-level operations, including THP split.
2409+
*
2410+
* For THP collapse, it's a bit more complicated because fast-gup may be
2411+
* walking a pgtable page that is being freed (pte is still valid but pmd
2412+
* can be cleared already). To avoid race in such condition, we need to
2413+
* also check pmd here to make sure pmd doesn't change (corresponds to
2414+
* pmdp_collapse_flush() in the THP collapse code path).
2415+
*/
2416+
static int gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr,
2417+
unsigned long end, unsigned int flags,
2418+
struct page **pages, int *nr)
23992419
{
24002420
struct dev_pagemap *pgmap = NULL;
24012421
int nr_start = *nr, ret = 0;
@@ -2441,7 +2461,8 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
24412461
goto pte_unmap;
24422462
}
24432463

2444-
if (unlikely(pte_val(pte) != pte_val(*ptep))) {
2464+
if (unlikely(pmd_val(pmd) != pmd_val(*pmdp)) ||
2465+
unlikely(pte_val(pte) != pte_val(*ptep))) {
24452466
gup_put_folio(folio, 1, flags);
24462467
goto pte_unmap;
24472468
}
@@ -2488,8 +2509,9 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
24882509
* get_user_pages_fast_only implementation that can pin pages. Thus it's still
24892510
* useful to have gup_huge_pmd even if we can't operate on ptes.
24902511
*/
2491-
static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
2492-
unsigned int flags, struct page **pages, int *nr)
2512+
static int gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr,
2513+
unsigned long end, unsigned int flags,
2514+
struct page **pages, int *nr)
24932515
{
24942516
return 0;
24952517
}
@@ -2813,7 +2835,7 @@ static int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr, unsigned lo
28132835
if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr,
28142836
PMD_SHIFT, next, flags, pages, nr))
28152837
return 0;
2816-
} else if (!gup_pte_range(pmd, addr, next, flags, pages, nr))
2838+
} else if (!gup_pte_range(pmd, pmdp, addr, next, flags, pages, nr))
28172839
return 0;
28182840
} while (pmdp++, addr = next, addr != end);
28192841

mm/huge_memory.c

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2900,11 +2900,9 @@ static void split_huge_pages_all(void)
29002900
max_zone_pfn = zone_end_pfn(zone);
29012901
for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
29022902
int nr_pages;
2903-
if (!pfn_valid(pfn))
2904-
continue;
29052903

2906-
page = pfn_to_page(pfn);
2907-
if (!get_page_unless_zero(page))
2904+
page = pfn_to_online_page(pfn);
2905+
if (!page || !get_page_unless_zero(page))
29082906
continue;
29092907

29102908
if (zone != page_zone(page))

0 commit comments

Comments
 (0)