Skip to content

Commit 042a308

Browse files
Hugh Dickinstorvalds
Hugh Dickins
authored andcommitted
mm/khugepaged: minor reorderings in collapse_shmem()
Several cleanups in collapse_shmem(): most of which probably do not really matter, beyond doing things in a more familiar and reassuring order. Simplify the failure gotos in the main loop, and on success update stats while interrupts still disabled from the last iteration. Link: http://lkml.kernel.org/r/[email protected] Fixes: f3f0e1d ("khugepaged: add support of collapse for tmpfs/shmem pages") Signed-off-by: Hugh Dickins <[email protected]> Acked-by: Kirill A. Shutemov <[email protected]> Cc: Jerome Glisse <[email protected]> Cc: Konstantin Khlebnikov <[email protected]> Cc: Matthew Wilcox <[email protected]> Cc: <[email protected]> [4.8+] Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 2af8ff2 commit 042a308

File tree

1 file changed

+32
-40
lines changed

1 file changed

+32
-40
lines changed

mm/khugepaged.c

Lines changed: 32 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -1329,10 +1329,10 @@ static void collapse_shmem(struct mm_struct *mm,
13291329
goto out;
13301330
}
13311331

1332+
__SetPageLocked(new_page);
1333+
__SetPageSwapBacked(new_page);
13321334
new_page->index = start;
13331335
new_page->mapping = mapping;
1334-
__SetPageSwapBacked(new_page);
1335-
__SetPageLocked(new_page);
13361336
BUG_ON(!page_ref_freeze(new_page, 1));
13371337

13381338
/*
@@ -1366,13 +1366,13 @@ static void collapse_shmem(struct mm_struct *mm,
13661366
if (index == start) {
13671367
if (!xas_next_entry(&xas, end - 1)) {
13681368
result = SCAN_TRUNCATED;
1369-
break;
1369+
goto xa_locked;
13701370
}
13711371
xas_set(&xas, index);
13721372
}
13731373
if (!shmem_charge(mapping->host, 1)) {
13741374
result = SCAN_FAIL;
1375-
break;
1375+
goto xa_locked;
13761376
}
13771377
xas_store(&xas, new_page + (index % HPAGE_PMD_NR));
13781378
nr_none++;
@@ -1387,13 +1387,12 @@ static void collapse_shmem(struct mm_struct *mm,
13871387
result = SCAN_FAIL;
13881388
goto xa_unlocked;
13891389
}
1390-
xas_lock_irq(&xas);
1391-
xas_set(&xas, index);
13921390
} else if (trylock_page(page)) {
13931391
get_page(page);
1392+
xas_unlock_irq(&xas);
13941393
} else {
13951394
result = SCAN_PAGE_LOCK;
1396-
break;
1395+
goto xa_locked;
13971396
}
13981397

13991398
/*
@@ -1408,11 +1407,10 @@ static void collapse_shmem(struct mm_struct *mm,
14081407
result = SCAN_TRUNCATED;
14091408
goto out_unlock;
14101409
}
1411-
xas_unlock_irq(&xas);
14121410

14131411
if (isolate_lru_page(page)) {
14141412
result = SCAN_DEL_PAGE_LRU;
1415-
goto out_isolate_failed;
1413+
goto out_unlock;
14161414
}
14171415

14181416
if (page_mapped(page))
@@ -1432,7 +1430,9 @@ static void collapse_shmem(struct mm_struct *mm,
14321430
*/
14331431
if (!page_ref_freeze(page, 3)) {
14341432
result = SCAN_PAGE_COUNT;
1435-
goto out_lru;
1433+
xas_unlock_irq(&xas);
1434+
putback_lru_page(page);
1435+
goto out_unlock;
14361436
}
14371437

14381438
/*
@@ -1444,24 +1444,26 @@ static void collapse_shmem(struct mm_struct *mm,
14441444
/* Finally, replace with the new page. */
14451445
xas_store(&xas, new_page + (index % HPAGE_PMD_NR));
14461446
continue;
1447-
out_lru:
1448-
xas_unlock_irq(&xas);
1449-
putback_lru_page(page);
1450-
out_isolate_failed:
1451-
unlock_page(page);
1452-
put_page(page);
1453-
goto xa_unlocked;
14541447
out_unlock:
14551448
unlock_page(page);
14561449
put_page(page);
1457-
break;
1450+
goto xa_unlocked;
14581451
}
1459-
xas_unlock_irq(&xas);
14601452

1453+
__inc_node_page_state(new_page, NR_SHMEM_THPS);
1454+
if (nr_none) {
1455+
struct zone *zone = page_zone(new_page);
1456+
1457+
__mod_node_page_state(zone->zone_pgdat, NR_FILE_PAGES, nr_none);
1458+
__mod_node_page_state(zone->zone_pgdat, NR_SHMEM, nr_none);
1459+
}
1460+
1461+
xa_locked:
1462+
xas_unlock_irq(&xas);
14611463
xa_unlocked:
1464+
14621465
if (result == SCAN_SUCCEED) {
14631466
struct page *page, *tmp;
1464-
struct zone *zone = page_zone(new_page);
14651467

14661468
/*
14671469
* Replacing old pages with new one has succeeded, now we
@@ -1476,11 +1478,11 @@ static void collapse_shmem(struct mm_struct *mm,
14761478
copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
14771479
page);
14781480
list_del(&page->lru);
1479-
unlock_page(page);
1480-
page_ref_unfreeze(page, 1);
14811481
page->mapping = NULL;
1482+
page_ref_unfreeze(page, 1);
14821483
ClearPageActive(page);
14831484
ClearPageUnevictable(page);
1485+
unlock_page(page);
14841486
put_page(page);
14851487
index++;
14861488
}
@@ -1489,28 +1491,17 @@ static void collapse_shmem(struct mm_struct *mm,
14891491
index++;
14901492
}
14911493

1492-
local_irq_disable();
1493-
__inc_node_page_state(new_page, NR_SHMEM_THPS);
1494-
if (nr_none) {
1495-
__mod_node_page_state(zone->zone_pgdat, NR_FILE_PAGES, nr_none);
1496-
__mod_node_page_state(zone->zone_pgdat, NR_SHMEM, nr_none);
1497-
}
1498-
local_irq_enable();
1499-
1500-
/*
1501-
* Remove pte page tables, so we can re-fault
1502-
* the page as huge.
1503-
*/
1504-
retract_page_tables(mapping, start);
1505-
15061494
/* Everything is ready, let's unfreeze the new_page */
1507-
set_page_dirty(new_page);
15081495
SetPageUptodate(new_page);
15091496
page_ref_unfreeze(new_page, HPAGE_PMD_NR);
1497+
set_page_dirty(new_page);
15101498
mem_cgroup_commit_charge(new_page, memcg, false, true);
15111499
lru_cache_add_anon(new_page);
1512-
unlock_page(new_page);
15131500

1501+
/*
1502+
* Remove pte page tables, so we can re-fault the page as huge.
1503+
*/
1504+
retract_page_tables(mapping, start);
15141505
*hpage = NULL;
15151506

15161507
khugepaged_pages_collapsed++;
@@ -1543,8 +1534,8 @@ static void collapse_shmem(struct mm_struct *mm,
15431534
xas_store(&xas, page);
15441535
xas_pause(&xas);
15451536
xas_unlock_irq(&xas);
1546-
putback_lru_page(page);
15471537
unlock_page(page);
1538+
putback_lru_page(page);
15481539
xas_lock_irq(&xas);
15491540
}
15501541
VM_BUG_ON(nr_none);
@@ -1553,9 +1544,10 @@ static void collapse_shmem(struct mm_struct *mm,
15531544
/* Unfreeze new_page, caller would take care about freeing it */
15541545
page_ref_unfreeze(new_page, 1);
15551546
mem_cgroup_cancel_charge(new_page, memcg, true);
1556-
unlock_page(new_page);
15571547
new_page->mapping = NULL;
15581548
}
1549+
1550+
unlock_page(new_page);
15591551
out:
15601552
VM_BUG_ON(!list_empty(&pagelist));
15611553
/* TODO: tracepoints */

0 commit comments

Comments
 (0)