@@ -135,7 +135,6 @@ static void page_cache_delete(struct address_space *mapping,
135
135
}
136
136
137
137
VM_BUG_ON_FOLIO (!folio_test_locked (folio ), folio );
138
- VM_BUG_ON_FOLIO (nr != 1 && shadow , folio );
139
138
140
139
xas_store (& xas , shadow );
141
140
xas_init_marks (& xas );
@@ -286,7 +285,7 @@ static void page_cache_delete_batch(struct address_space *mapping,
286
285
struct folio_batch * fbatch )
287
286
{
288
287
XA_STATE (xas , & mapping -> i_pages , fbatch -> folios [0 ]-> index );
289
- int total_pages = 0 ;
288
+ long total_pages = 0 ;
290
289
int i = 0 ;
291
290
struct folio * folio ;
292
291
@@ -313,18 +312,12 @@ static void page_cache_delete_batch(struct address_space *mapping,
313
312
314
313
WARN_ON_ONCE (!folio_test_locked (folio ));
315
314
316
- if (folio -> index == xas .xa_index )
317
- folio -> mapping = NULL ;
315
+ folio -> mapping = NULL ;
318
316
/* Leave folio->index set: truncation lookup relies on it */
319
317
320
- /*
321
- * Move to the next folio in the batch if this is a regular
322
- * folio or the index is of the last sub-page of this folio.
323
- */
324
- if (folio -> index + folio_nr_pages (folio ) - 1 == xas .xa_index )
325
- i ++ ;
318
+ i ++ ;
326
319
xas_store (& xas , NULL );
327
- total_pages ++ ;
320
+ total_pages += folio_nr_pages ( folio ) ;
328
321
}
329
322
mapping -> nrpages -= total_pages ;
330
323
}
@@ -2089,24 +2082,27 @@ unsigned find_lock_entries(struct address_space *mapping, pgoff_t start,
2089
2082
indices [fbatch -> nr ] = xas .xa_index ;
2090
2083
if (!folio_batch_add (fbatch , folio ))
2091
2084
break ;
2092
- goto next ;
2085
+ continue ;
2093
2086
unlock :
2094
2087
folio_unlock (folio );
2095
2088
put :
2096
2089
folio_put (folio );
2097
- next :
2098
- if (!xa_is_value (folio ) && folio_test_large (folio )) {
2099
- xas_set (& xas , folio -> index + folio_nr_pages (folio ));
2100
- /* Did we wrap on 32-bit? */
2101
- if (!xas .xa_index )
2102
- break ;
2103
- }
2104
2090
}
2105
2091
rcu_read_unlock ();
2106
2092
2107
2093
return folio_batch_count (fbatch );
2108
2094
}
2109
2095
2096
+ static inline
2097
+ bool folio_more_pages (struct folio * folio , pgoff_t index , pgoff_t max )
2098
+ {
2099
+ if (!folio_test_large (folio ) || folio_test_hugetlb (folio ))
2100
+ return false;
2101
+ if (index >= max )
2102
+ return false;
2103
+ return index < folio -> index + folio_nr_pages (folio ) - 1 ;
2104
+ }
2105
+
2110
2106
/**
2111
2107
* find_get_pages_range - gang pagecache lookup
2112
2108
* @mapping: The address_space to search
@@ -2145,11 +2141,17 @@ unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
2145
2141
if (xa_is_value (folio ))
2146
2142
continue ;
2147
2143
2144
+ again :
2148
2145
pages [ret ] = folio_file_page (folio , xas .xa_index );
2149
2146
if (++ ret == nr_pages ) {
2150
2147
* start = xas .xa_index + 1 ;
2151
2148
goto out ;
2152
2149
}
2150
+ if (folio_more_pages (folio , xas .xa_index , end )) {
2151
+ xas .xa_index ++ ;
2152
+ folio_ref_inc (folio );
2153
+ goto again ;
2154
+ }
2153
2155
}
2154
2156
2155
2157
/*
@@ -2207,9 +2209,15 @@ unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
2207
2209
if (unlikely (folio != xas_reload (& xas )))
2208
2210
goto put_page ;
2209
2211
2210
- pages [ret ] = & folio -> page ;
2212
+ again :
2213
+ pages [ret ] = folio_file_page (folio , xas .xa_index );
2211
2214
if (++ ret == nr_pages )
2212
2215
break ;
2216
+ if (folio_more_pages (folio , xas .xa_index , ULONG_MAX )) {
2217
+ xas .xa_index ++ ;
2218
+ folio_ref_inc (folio );
2219
+ goto again ;
2220
+ }
2213
2221
continue ;
2214
2222
put_page :
2215
2223
folio_put (folio );
@@ -2334,8 +2342,7 @@ static void filemap_get_read_batch(struct address_space *mapping,
2334
2342
break ;
2335
2343
if (folio_test_readahead (folio ))
2336
2344
break ;
2337
- xas .xa_index = folio -> index + folio_nr_pages (folio ) - 1 ;
2338
- xas .xa_offset = (xas .xa_index >> xas .xa_shift ) & XA_CHUNK_MASK ;
2345
+ xas_advance (& xas , folio -> index + folio_nr_pages (folio ) - 1 );
2339
2346
continue ;
2340
2347
put_folio :
2341
2348
folio_put (folio );
@@ -3284,6 +3291,7 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
3284
3291
addr = vma -> vm_start + ((start_pgoff - vma -> vm_pgoff ) << PAGE_SHIFT );
3285
3292
vmf -> pte = pte_offset_map_lock (vma -> vm_mm , vmf -> pmd , addr , & vmf -> ptl );
3286
3293
do {
3294
+ again :
3287
3295
page = folio_file_page (folio , xas .xa_index );
3288
3296
if (PageHWPoison (page ))
3289
3297
goto unlock ;
@@ -3305,9 +3313,18 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
3305
3313
do_set_pte (vmf , page , addr );
3306
3314
/* no need to invalidate: a not-present page won't be cached */
3307
3315
update_mmu_cache (vma , addr , vmf -> pte );
3316
+ if (folio_more_pages (folio , xas .xa_index , end_pgoff )) {
3317
+ xas .xa_index ++ ;
3318
+ folio_ref_inc (folio );
3319
+ goto again ;
3320
+ }
3308
3321
folio_unlock (folio );
3309
3322
continue ;
3310
3323
unlock :
3324
+ if (folio_more_pages (folio , xas .xa_index , end_pgoff )) {
3325
+ xas .xa_index ++ ;
3326
+ goto again ;
3327
+ }
3311
3328
folio_unlock (folio );
3312
3329
folio_put (folio );
3313
3330
} while ((folio = next_map_page (mapping , & xas , end_pgoff )) != NULL );
0 commit comments