@@ -880,30 +880,26 @@ void shmem_unlock_mapping(struct address_space *mapping)
880
880
}
881
881
}
882
882
883
- /*
884
- * Check whether a hole-punch or truncation needs to split a huge page,
885
- * returning true if no split was required, or the split has been successful.
886
- *
887
- * Eviction (or truncation to 0 size) should never need to split a huge page;
888
- * but in rare cases might do so, if shmem_undo_range() failed to trylock on
889
- * head, and then succeeded to trylock on tail.
890
- *
891
- * A split can only succeed when there are no additional references on the
892
- * huge page: so the split below relies upon find_get_entries() having stopped
893
- * when it found a subpage of the huge page, without getting further references.
894
- */
895
- static bool shmem_punch_compound (struct page * page , pgoff_t start , pgoff_t end )
883
+ static struct folio * shmem_get_partial_folio (struct inode * inode , pgoff_t index )
896
884
{
897
- if (!PageTransCompound (page ))
898
- return true;
899
-
900
- /* Just proceed to delete a huge page wholly within the range punched */
901
- if (PageHead (page ) &&
902
- page -> index >= start && page -> index + HPAGE_PMD_NR <= end )
903
- return true;
885
+ struct folio * folio ;
886
+ struct page * page ;
904
887
905
- /* Try to split huge page, so we can truly punch the hole or truncate */
906
- return split_huge_page (page ) >= 0 ;
888
+ /*
889
+ * At first avoid shmem_getpage(,,,SGP_READ): that fails
890
+ * beyond i_size, and reports fallocated pages as holes.
891
+ */
892
+ folio = __filemap_get_folio (inode -> i_mapping , index ,
893
+ FGP_ENTRY | FGP_LOCK , 0 );
894
+ if (!xa_is_value (folio ))
895
+ return folio ;
896
+ /*
897
+ * But read a page back from swap if any of it is within i_size
898
+ * (although in some cases this is just a waste of time).
899
+ */
900
+ page = NULL ;
901
+ shmem_getpage (inode , index , & page , SGP_READ );
902
+ return page ? page_folio (page ) : NULL ;
907
903
}
908
904
909
905
/*
@@ -917,10 +913,10 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
917
913
struct shmem_inode_info * info = SHMEM_I (inode );
918
914
pgoff_t start = (lstart + PAGE_SIZE - 1 ) >> PAGE_SHIFT ;
919
915
pgoff_t end = (lend + 1 ) >> PAGE_SHIFT ;
920
- unsigned int partial_start = lstart & (PAGE_SIZE - 1 );
921
- unsigned int partial_end = (lend + 1 ) & (PAGE_SIZE - 1 );
922
916
struct folio_batch fbatch ;
923
917
pgoff_t indices [PAGEVEC_SIZE ];
918
+ struct folio * folio ;
919
+ bool same_folio ;
924
920
long nr_swaps_freed = 0 ;
925
921
pgoff_t index ;
926
922
int i ;
@@ -936,7 +932,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
936
932
while (index < end && find_lock_entries (mapping , index , end - 1 ,
937
933
& fbatch , indices )) {
938
934
for (i = 0 ; i < folio_batch_count (& fbatch ); i ++ ) {
939
- struct folio * folio = fbatch .folios [i ];
935
+ folio = fbatch .folios [i ];
940
936
941
937
index = indices [i ];
942
938
@@ -959,33 +955,30 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
959
955
index ++ ;
960
956
}
961
957
962
- if (partial_start ) {
963
- struct page * page = NULL ;
964
- shmem_getpage (inode , start - 1 , & page , SGP_READ );
965
- if (page ) {
966
- unsigned int top = PAGE_SIZE ;
967
- if (start > end ) {
968
- top = partial_end ;
969
- partial_end = 0 ;
970
- }
971
- zero_user_segment (page , partial_start , top );
972
- set_page_dirty (page );
973
- unlock_page (page );
974
- put_page (page );
958
+ same_folio = (lstart >> PAGE_SHIFT ) == (lend >> PAGE_SHIFT );
959
+ folio = shmem_get_partial_folio (inode , lstart >> PAGE_SHIFT );
960
+ if (folio ) {
961
+ same_folio = lend < folio_pos (folio ) + folio_size (folio );
962
+ folio_mark_dirty (folio );
963
+ if (!truncate_inode_partial_folio (folio , lstart , lend )) {
964
+ start = folio -> index + folio_nr_pages (folio );
965
+ if (same_folio )
966
+ end = folio -> index ;
975
967
}
968
+ folio_unlock (folio );
969
+ folio_put (folio );
970
+ folio = NULL ;
976
971
}
977
- if ( partial_end ) {
978
- struct page * page = NULL ;
979
- shmem_getpage (inode , end , & page , SGP_READ );
980
- if (page ) {
981
- zero_user_segment ( page , 0 , partial_end );
982
- set_page_dirty ( page );
983
- unlock_page ( page ) ;
984
- put_page ( page );
985
- }
972
+
973
+ if (! same_folio )
974
+ folio = shmem_get_partial_folio (inode , lend >> PAGE_SHIFT );
975
+ if (folio ) {
976
+ folio_mark_dirty ( folio );
977
+ if (! truncate_inode_partial_folio ( folio , lstart , lend ))
978
+ end = folio -> index ;
979
+ folio_unlock ( folio );
980
+ folio_put ( folio );
986
981
}
987
- if (start >= end )
988
- return ;
989
982
990
983
index = start ;
991
984
while (index < end ) {
@@ -1001,7 +994,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
1001
994
continue ;
1002
995
}
1003
996
for (i = 0 ; i < folio_batch_count (& fbatch ); i ++ ) {
1004
- struct folio * folio = fbatch .folios [i ];
997
+ folio = fbatch .folios [i ];
1005
998
1006
999
index = indices [i ];
1007
1000
if (xa_is_value (folio )) {
@@ -1019,8 +1012,6 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
1019
1012
folio_lock (folio );
1020
1013
1021
1014
if (!unfalloc || !folio_test_uptodate (folio )) {
1022
- struct page * page = folio_file_page (folio ,
1023
- index );
1024
1015
if (folio_mapping (folio ) != mapping ) {
1025
1016
/* Page was replaced by swap: retry */
1026
1017
folio_unlock (folio );
@@ -1029,18 +1020,9 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
1029
1020
}
1030
1021
VM_BUG_ON_FOLIO (folio_test_writeback (folio ),
1031
1022
folio );
1032
- if (shmem_punch_compound (page , start , end ))
1033
- truncate_inode_folio (mapping , folio );
1034
- else if (IS_ENABLED (CONFIG_TRANSPARENT_HUGEPAGE )) {
1035
- /* Wipe the page and don't get stuck */
1036
- clear_highpage (page );
1037
- flush_dcache_page (page );
1038
- folio_mark_dirty (folio );
1039
- if (index <
1040
- round_up (start , HPAGE_PMD_NR ))
1041
- start = index + 1 ;
1042
- }
1023
+ truncate_inode_folio (mapping , folio );
1043
1024
}
1025
+ index = folio -> index + folio_nr_pages (folio ) - 1 ;
1044
1026
folio_unlock (folio );
1045
1027
}
1046
1028
folio_batch_remove_exceptionals (& fbatch );
0 commit comments