Skip to content

Commit b9a8a41

Browse files
author
Matthew Wilcox (Oracle)
committed
truncate,shmem: Handle truncates that split large folios
Handle folio splitting in the parts of the truncation functions which already handle partial pages. Factor all that code out into a new function called truncate_inode_partial_folio(). Signed-off-by: Matthew Wilcox (Oracle) <[email protected]> Reviewed-by: Jan Kara <[email protected]> Reviewed-by: William Kucharski <[email protected]>
1 parent f6357c3 commit b9a8a41

File tree

3 files changed

+122
-105
lines changed

3 files changed

+122
-105
lines changed

mm/internal.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -98,6 +98,8 @@ unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
9898
pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
9999
void filemap_free_folio(struct address_space *mapping, struct folio *folio);
100100
int truncate_inode_folio(struct address_space *mapping, struct folio *folio);
101+
bool truncate_inode_partial_folio(struct folio *folio, loff_t start,
102+
loff_t end);
101103

102104
/**
103105
* folio_evictable - Test whether a folio is evictable.

mm/shmem.c

Lines changed: 45 additions & 63 deletions
Original file line numberDiff line numberDiff line change
@@ -880,30 +880,26 @@ void shmem_unlock_mapping(struct address_space *mapping)
880880
}
881881
}
882882

883-
/*
884-
* Check whether a hole-punch or truncation needs to split a huge page,
885-
* returning true if no split was required, or the split has been successful.
886-
*
887-
* Eviction (or truncation to 0 size) should never need to split a huge page;
888-
* but in rare cases might do so, if shmem_undo_range() failed to trylock on
889-
* head, and then succeeded to trylock on tail.
890-
*
891-
* A split can only succeed when there are no additional references on the
892-
* huge page: so the split below relies upon find_get_entries() having stopped
893-
* when it found a subpage of the huge page, without getting further references.
894-
*/
895-
static bool shmem_punch_compound(struct page *page, pgoff_t start, pgoff_t end)
883+
static struct folio *shmem_get_partial_folio(struct inode *inode, pgoff_t index)
896884
{
897-
if (!PageTransCompound(page))
898-
return true;
899-
900-
/* Just proceed to delete a huge page wholly within the range punched */
901-
if (PageHead(page) &&
902-
page->index >= start && page->index + HPAGE_PMD_NR <= end)
903-
return true;
885+
struct folio *folio;
886+
struct page *page;
904887

905-
/* Try to split huge page, so we can truly punch the hole or truncate */
906-
return split_huge_page(page) >= 0;
888+
/*
889+
* At first avoid shmem_getpage(,,,SGP_READ): that fails
890+
* beyond i_size, and reports fallocated pages as holes.
891+
*/
892+
folio = __filemap_get_folio(inode->i_mapping, index,
893+
FGP_ENTRY | FGP_LOCK, 0);
894+
if (!xa_is_value(folio))
895+
return folio;
896+
/*
897+
* But read a page back from swap if any of it is within i_size
898+
* (although in some cases this is just a waste of time).
899+
*/
900+
page = NULL;
901+
shmem_getpage(inode, index, &page, SGP_READ);
902+
return page ? page_folio(page) : NULL;
907903
}
908904

909905
/*
@@ -917,10 +913,10 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
917913
struct shmem_inode_info *info = SHMEM_I(inode);
918914
pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
919915
pgoff_t end = (lend + 1) >> PAGE_SHIFT;
920-
unsigned int partial_start = lstart & (PAGE_SIZE - 1);
921-
unsigned int partial_end = (lend + 1) & (PAGE_SIZE - 1);
922916
struct folio_batch fbatch;
923917
pgoff_t indices[PAGEVEC_SIZE];
918+
struct folio *folio;
919+
bool same_folio;
924920
long nr_swaps_freed = 0;
925921
pgoff_t index;
926922
int i;
@@ -936,7 +932,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
936932
while (index < end && find_lock_entries(mapping, index, end - 1,
937933
&fbatch, indices)) {
938934
for (i = 0; i < folio_batch_count(&fbatch); i++) {
939-
struct folio *folio = fbatch.folios[i];
935+
folio = fbatch.folios[i];
940936

941937
index = indices[i];
942938

@@ -959,33 +955,30 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
959955
index++;
960956
}
961957

962-
if (partial_start) {
963-
struct page *page = NULL;
964-
shmem_getpage(inode, start - 1, &page, SGP_READ);
965-
if (page) {
966-
unsigned int top = PAGE_SIZE;
967-
if (start > end) {
968-
top = partial_end;
969-
partial_end = 0;
970-
}
971-
zero_user_segment(page, partial_start, top);
972-
set_page_dirty(page);
973-
unlock_page(page);
974-
put_page(page);
958+
same_folio = (lstart >> PAGE_SHIFT) == (lend >> PAGE_SHIFT);
959+
folio = shmem_get_partial_folio(inode, lstart >> PAGE_SHIFT);
960+
if (folio) {
961+
same_folio = lend < folio_pos(folio) + folio_size(folio);
962+
folio_mark_dirty(folio);
963+
if (!truncate_inode_partial_folio(folio, lstart, lend)) {
964+
start = folio->index + folio_nr_pages(folio);
965+
if (same_folio)
966+
end = folio->index;
975967
}
968+
folio_unlock(folio);
969+
folio_put(folio);
970+
folio = NULL;
976971
}
977-
if (partial_end) {
978-
struct page *page = NULL;
979-
shmem_getpage(inode, end, &page, SGP_READ);
980-
if (page) {
981-
zero_user_segment(page, 0, partial_end);
982-
set_page_dirty(page);
983-
unlock_page(page);
984-
put_page(page);
985-
}
972+
973+
if (!same_folio)
974+
folio = shmem_get_partial_folio(inode, lend >> PAGE_SHIFT);
975+
if (folio) {
976+
folio_mark_dirty(folio);
977+
if (!truncate_inode_partial_folio(folio, lstart, lend))
978+
end = folio->index;
979+
folio_unlock(folio);
980+
folio_put(folio);
986981
}
987-
if (start >= end)
988-
return;
989982

990983
index = start;
991984
while (index < end) {
@@ -1001,7 +994,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
1001994
continue;
1002995
}
1003996
for (i = 0; i < folio_batch_count(&fbatch); i++) {
1004-
struct folio *folio = fbatch.folios[i];
997+
folio = fbatch.folios[i];
1005998

1006999
index = indices[i];
10071000
if (xa_is_value(folio)) {
@@ -1019,8 +1012,6 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
10191012
folio_lock(folio);
10201013

10211014
if (!unfalloc || !folio_test_uptodate(folio)) {
1022-
struct page *page = folio_file_page(folio,
1023-
index);
10241015
if (folio_mapping(folio) != mapping) {
10251016
/* Page was replaced by swap: retry */
10261017
folio_unlock(folio);
@@ -1029,18 +1020,9 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
10291020
}
10301021
VM_BUG_ON_FOLIO(folio_test_writeback(folio),
10311022
folio);
1032-
if (shmem_punch_compound(page, start, end))
1033-
truncate_inode_folio(mapping, folio);
1034-
else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
1035-
/* Wipe the page and don't get stuck */
1036-
clear_highpage(page);
1037-
flush_dcache_page(page);
1038-
folio_mark_dirty(folio);
1039-
if (index <
1040-
round_up(start, HPAGE_PMD_NR))
1041-
start = index + 1;
1042-
}
1023+
truncate_inode_folio(mapping, folio);
10431024
}
1025+
index = folio->index + folio_nr_pages(folio) - 1;
10441026
folio_unlock(folio);
10451027
}
10461028
folio_batch_remove_exceptionals(&fbatch);

mm/truncate.c

Lines changed: 75 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -228,6 +228,58 @@ int truncate_inode_folio(struct address_space *mapping, struct folio *folio)
228228
return 0;
229229
}
230230

231+
/*
232+
* Handle partial folios. The folio may be entirely within the
233+
* range if a split has raced with us. If not, we zero the part of the
234+
* folio that's within the [start, end] range, and then split the folio if
235+
* it's large. split_page_range() will discard pages which now lie beyond
236+
* i_size, and we rely on the caller to discard pages which lie within a
237+
* newly created hole.
238+
*
239+
* Returns false if splitting failed so the caller can avoid
240+
* discarding the entire folio which is stubbornly unsplit.
241+
*/
242+
bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end)
243+
{
244+
loff_t pos = folio_pos(folio);
245+
unsigned int offset, length;
246+
247+
if (pos < start)
248+
offset = start - pos;
249+
else
250+
offset = 0;
251+
length = folio_size(folio);
252+
if (pos + length <= (u64)end)
253+
length = length - offset;
254+
else
255+
length = end + 1 - pos - offset;
256+
257+
folio_wait_writeback(folio);
258+
if (length == folio_size(folio)) {
259+
truncate_inode_folio(folio->mapping, folio);
260+
return true;
261+
}
262+
263+
/*
264+
* We may be zeroing pages we're about to discard, but it avoids
265+
* doing a complex calculation here, and then doing the zeroing
266+
* anyway if the page split fails.
267+
*/
268+
folio_zero_range(folio, offset, length);
269+
270+
cleancache_invalidate_page(folio->mapping, &folio->page);
271+
if (folio_has_private(folio))
272+
do_invalidatepage(&folio->page, offset, length);
273+
if (!folio_test_large(folio))
274+
return true;
275+
if (split_huge_page(&folio->page) == 0)
276+
return true;
277+
if (folio_test_dirty(folio))
278+
return false;
279+
truncate_inode_folio(folio->mapping, folio);
280+
return true;
281+
}
282+
231283
/*
232284
* Used to get rid of pages on hardware memory corruption.
233285
*/
@@ -294,20 +346,16 @@ void truncate_inode_pages_range(struct address_space *mapping,
294346
{
295347
pgoff_t start; /* inclusive */
296348
pgoff_t end; /* exclusive */
297-
unsigned int partial_start; /* inclusive */
298-
unsigned int partial_end; /* exclusive */
299349
struct folio_batch fbatch;
300350
pgoff_t indices[PAGEVEC_SIZE];
301351
pgoff_t index;
302352
int i;
353+
struct folio *folio;
354+
bool same_folio;
303355

304356
if (mapping_empty(mapping))
305357
goto out;
306358

307-
/* Offsets within partial pages */
308-
partial_start = lstart & (PAGE_SIZE - 1);
309-
partial_end = (lend + 1) & (PAGE_SIZE - 1);
310-
311359
/*
312360
* 'start' and 'end' always covers the range of pages to be fully
313361
* truncated. Partial pages are covered with 'partial_start' at the
@@ -340,47 +388,32 @@ void truncate_inode_pages_range(struct address_space *mapping,
340388
cond_resched();
341389
}
342390

343-
if (partial_start) {
344-
struct page *page = find_lock_page(mapping, start - 1);
345-
if (page) {
346-
unsigned int top = PAGE_SIZE;
347-
if (start > end) {
348-
/* Truncation within a single page */
349-
top = partial_end;
350-
partial_end = 0;
351-
}
352-
wait_on_page_writeback(page);
353-
zero_user_segment(page, partial_start, top);
354-
cleancache_invalidate_page(mapping, page);
355-
if (page_has_private(page))
356-
do_invalidatepage(page, partial_start,
357-
top - partial_start);
358-
unlock_page(page);
359-
put_page(page);
391+
same_folio = (lstart >> PAGE_SHIFT) == (lend >> PAGE_SHIFT);
392+
folio = __filemap_get_folio(mapping, lstart >> PAGE_SHIFT, FGP_LOCK, 0);
393+
if (folio) {
394+
same_folio = lend < folio_pos(folio) + folio_size(folio);
395+
if (!truncate_inode_partial_folio(folio, lstart, lend)) {
396+
start = folio->index + folio_nr_pages(folio);
397+
if (same_folio)
398+
end = folio->index;
360399
}
400+
folio_unlock(folio);
401+
folio_put(folio);
402+
folio = NULL;
361403
}
362-
if (partial_end) {
363-
struct page *page = find_lock_page(mapping, end);
364-
if (page) {
365-
wait_on_page_writeback(page);
366-
zero_user_segment(page, 0, partial_end);
367-
cleancache_invalidate_page(mapping, page);
368-
if (page_has_private(page))
369-
do_invalidatepage(page, 0,
370-
partial_end);
371-
unlock_page(page);
372-
put_page(page);
373-
}
404+
405+
if (!same_folio)
406+
folio = __filemap_get_folio(mapping, lend >> PAGE_SHIFT,
407+
FGP_LOCK, 0);
408+
if (folio) {
409+
if (!truncate_inode_partial_folio(folio, lstart, lend))
410+
end = folio->index;
411+
folio_unlock(folio);
412+
folio_put(folio);
374413
}
375-
/*
376-
* If the truncation happened within a single page no pages
377-
* will be released, just zeroed, so we can bail out now.
378-
*/
379-
if (start >= end)
380-
goto out;
381414

382415
index = start;
383-
for ( ; ; ) {
416+
while (index < end) {
384417
cond_resched();
385418
if (!find_get_entries(mapping, index, end - 1, &fbatch,
386419
indices)) {

0 commit comments

Comments
 (0)