@@ -1027,11 +1027,53 @@ static int move_to_new_folio(struct folio *dst, struct folio *src,
10271027 return rc ;
10281028}
10291029
1030- static int __unmap_and_move (struct folio * src , struct folio * dst ,
1030+ /*
1031+ * To record some information during migration, we use some unused
1032+ * fields (mapping and private) of struct folio of the newly allocated
1033+ * destination folio. This is safe because nobody is using them
1034+ * except us.
1035+ */
1036+ static void __migrate_folio_record (struct folio * dst ,
1037+ unsigned long page_was_mapped ,
1038+ struct anon_vma * anon_vma )
1039+ {
1040+ dst -> mapping = (void * )anon_vma ;
1041+ dst -> private = (void * )page_was_mapped ;
1042+ }
1043+
1044+ static void __migrate_folio_extract (struct folio * dst ,
1045+ int * page_was_mappedp ,
1046+ struct anon_vma * * anon_vmap )
1047+ {
1048+ * anon_vmap = (void * )dst -> mapping ;
1049+ * page_was_mappedp = (unsigned long )dst -> private ;
1050+ dst -> mapping = NULL ;
1051+ dst -> private = NULL ;
1052+ }
1053+
1054+ /* Cleanup src folio upon migration success */
1055+ static void migrate_folio_done (struct folio * src ,
1056+ enum migrate_reason reason )
1057+ {
1058+ /*
1059+ * Compaction can migrate also non-LRU pages which are
1060+ * not accounted to NR_ISOLATED_*. They can be recognized
1061+ * as __PageMovable
1062+ */
1063+ if (likely (!__folio_test_movable (src )))
1064+ mod_node_page_state (folio_pgdat (src ), NR_ISOLATED_ANON +
1065+ folio_is_file_lru (src ), - folio_nr_pages (src ));
1066+
1067+ if (reason != MR_MEMORY_FAILURE )
1068+ /* We release the page in page_handle_poison. */
1069+ folio_put (src );
1070+ }
1071+
1072+ static int __migrate_folio_unmap (struct folio * src , struct folio * dst ,
10311073 int force , enum migrate_mode mode )
10321074{
10331075 int rc = - EAGAIN ;
1034- bool page_was_mapped = false ;
1076+ int page_was_mapped = 0 ;
10351077 struct anon_vma * anon_vma = NULL ;
10361078 bool is_lru = !__PageMovable (& src -> page );
10371079
@@ -1107,8 +1149,8 @@ static int __unmap_and_move(struct folio *src, struct folio *dst,
11071149 goto out_unlock ;
11081150
11091151 if (unlikely (!is_lru )) {
1110- rc = move_to_new_folio (dst , src , mode );
1111- goto out_unlock_both ;
1152+ __migrate_folio_record (dst , page_was_mapped , anon_vma );
1153+ return MIGRATEPAGE_UNMAP ;
11121154 }
11131155
11141156 /*
@@ -1133,11 +1175,42 @@ static int __unmap_and_move(struct folio *src, struct folio *dst,
11331175 VM_BUG_ON_FOLIO (folio_test_anon (src ) &&
11341176 !folio_test_ksm (src ) && !anon_vma , src );
11351177 try_to_migrate (src , 0 );
1136- page_was_mapped = true ;
1178+ page_was_mapped = 1 ;
11371179 }
11381180
1139- if (!folio_mapped (src ))
1140- rc = move_to_new_folio (dst , src , mode );
1181+ if (!folio_mapped (src )) {
1182+ __migrate_folio_record (dst , page_was_mapped , anon_vma );
1183+ return MIGRATEPAGE_UNMAP ;
1184+ }
1185+
1186+ if (page_was_mapped )
1187+ remove_migration_ptes (src , src , false);
1188+
1189+ out_unlock_both :
1190+ folio_unlock (dst );
1191+ out_unlock :
1192+ /* Drop an anon_vma reference if we took one */
1193+ if (anon_vma )
1194+ put_anon_vma (anon_vma );
1195+ folio_unlock (src );
1196+ out :
1197+
1198+ return rc ;
1199+ }
1200+
1201+ static int __migrate_folio_move (struct folio * src , struct folio * dst ,
1202+ enum migrate_mode mode )
1203+ {
1204+ int rc ;
1205+ int page_was_mapped = 0 ;
1206+ struct anon_vma * anon_vma = NULL ;
1207+ bool is_lru = !__PageMovable (& src -> page );
1208+
1209+ __migrate_folio_extract (dst , & page_was_mapped , & anon_vma );
1210+
1211+ rc = move_to_new_folio (dst , src , mode );
1212+ if (unlikely (!is_lru ))
1213+ goto out_unlock_both ;
11411214
11421215 /*
11431216 * When successful, push dst to LRU immediately: so that if it
@@ -1160,12 +1233,10 @@ static int __unmap_and_move(struct folio *src, struct folio *dst,
11601233
11611234out_unlock_both :
11621235 folio_unlock (dst );
1163- out_unlock :
11641236 /* Drop an anon_vma reference if we took one */
11651237 if (anon_vma )
11661238 put_anon_vma (anon_vma );
11671239 folio_unlock (src );
1168- out :
11691240 /*
11701241 * If migration is successful, decrease refcount of dst,
11711242 * which will not free the page because new page owner increased
@@ -1177,19 +1248,15 @@ static int __unmap_and_move(struct folio *src, struct folio *dst,
11771248 return rc ;
11781249}
11791250
1180- /*
1181- * Obtain the lock on folio, remove all ptes and migrate the folio
1182- * to the newly allocated folio in dst.
1183- */
1184- static int unmap_and_move (new_page_t get_new_page ,
1185- free_page_t put_new_page ,
1186- unsigned long private , struct folio * src ,
1187- int force , enum migrate_mode mode ,
1188- enum migrate_reason reason ,
1189- struct list_head * ret )
1251+ /* Obtain the lock on page, remove all ptes. */
1252+ static int migrate_folio_unmap (new_page_t get_new_page , free_page_t put_new_page ,
1253+ unsigned long private , struct folio * src ,
1254+ struct folio * * dstp , int force ,
1255+ enum migrate_mode mode , enum migrate_reason reason ,
1256+ struct list_head * ret )
11901257{
11911258 struct folio * dst ;
1192- int rc = MIGRATEPAGE_SUCCESS ;
1259+ int rc = MIGRATEPAGE_UNMAP ;
11931260 struct page * newpage = NULL ;
11941261
11951262 if (!thp_migration_supported () && folio_test_transhuge (src ))
@@ -1200,20 +1267,49 @@ static int unmap_and_move(new_page_t get_new_page,
12001267 folio_clear_active (src );
12011268 folio_clear_unevictable (src );
12021269 /* free_pages_prepare() will clear PG_isolated. */
1203- goto out ;
1270+ list_del (& src -> lru );
1271+ migrate_folio_done (src , reason );
1272+ return MIGRATEPAGE_SUCCESS ;
12041273 }
12051274
12061275 newpage = get_new_page (& src -> page , private );
12071276 if (!newpage )
12081277 return - ENOMEM ;
12091278 dst = page_folio (newpage );
1279+ * dstp = dst ;
12101280
12111281 dst -> private = NULL ;
1212- rc = __unmap_and_move (src , dst , force , mode );
1282+ rc = __migrate_folio_unmap (src , dst , force , mode );
1283+ if (rc == MIGRATEPAGE_UNMAP )
1284+ return rc ;
1285+
1286+ /*
1287+ * A folio that has not been unmapped will be restored to
1288+ * right list unless we want to retry.
1289+ */
1290+ if (rc != - EAGAIN )
1291+ list_move_tail (& src -> lru , ret );
1292+
1293+ if (put_new_page )
1294+ put_new_page (& dst -> page , private );
1295+ else
1296+ folio_put (dst );
1297+
1298+ return rc ;
1299+ }
1300+
1301+ /* Migrate the folio to the newly allocated folio in dst. */
1302+ static int migrate_folio_move (free_page_t put_new_page , unsigned long private ,
1303+ struct folio * src , struct folio * dst ,
1304+ enum migrate_mode mode , enum migrate_reason reason ,
1305+ struct list_head * ret )
1306+ {
1307+ int rc ;
1308+
1309+ rc = __migrate_folio_move (src , dst , mode );
12131310 if (rc == MIGRATEPAGE_SUCCESS )
12141311 set_page_owner_migrate_reason (& dst -> page , reason );
12151312
1216- out :
12171313 if (rc != - EAGAIN ) {
12181314 /*
12191315 * A folio that has been migrated has all references
@@ -1229,20 +1325,7 @@ static int unmap_and_move(new_page_t get_new_page,
12291325 * we want to retry.
12301326 */
12311327 if (rc == MIGRATEPAGE_SUCCESS ) {
1232- /*
1233- * Compaction can migrate also non-LRU folios which are
1234- * not accounted to NR_ISOLATED_*. They can be recognized
1235- * as __folio_test_movable
1236- */
1237- if (likely (!__folio_test_movable (src )))
1238- mod_node_page_state (folio_pgdat (src ), NR_ISOLATED_ANON +
1239- folio_is_file_lru (src ), - folio_nr_pages (src ));
1240-
1241- if (reason != MR_MEMORY_FAILURE )
1242- /*
1243- * We release the folio in page_handle_poison.
1244- */
1245- folio_put (src );
1328+ migrate_folio_done (src , reason );
12461329 } else {
12471330 if (rc != - EAGAIN )
12481331 list_add_tail (& src -> lru , ret );
@@ -1534,7 +1617,7 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
15341617 int pass = 0 ;
15351618 bool is_large = false;
15361619 bool is_thp = false;
1537- struct folio * folio , * folio2 ;
1620+ struct folio * folio , * folio2 , * dst = NULL ;
15381621 int rc , nr_pages ;
15391622 LIST_HEAD (split_folios );
15401623 bool nosplit = (reason == MR_NUMA_MISPLACED );
@@ -1561,9 +1644,13 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
15611644
15621645 cond_resched ();
15631646
1564- rc = unmap_and_move (get_new_page , put_new_page ,
1565- private , folio , pass > 2 , mode ,
1566- reason , ret_folios );
1647+ rc = migrate_folio_unmap (get_new_page , put_new_page , private ,
1648+ folio , & dst , pass > 2 , mode ,
1649+ reason , ret_folios );
1650+ if (rc == MIGRATEPAGE_UNMAP )
1651+ rc = migrate_folio_move (put_new_page , private ,
1652+ folio , dst , mode ,
1653+ reason , ret_folios );
15671654 /*
15681655 * The rules are:
15691656 * Success: folio will be freed
0 commit comments