Skip to content

Commit 1da2f32

Browse files
rikvanrieltorvalds
authored andcommitted
mm,thp,compaction,cma: allow THP migration for CMA allocations
The code to implement THP migrations already exists, and the code for CMA to clear out a region of memory already exists. Only a few small tweaks are needed to allow CMA to move THP memory when attempting an allocation from alloc_contig_range. With these changes, migrating THPs from a CMA area works when allocating a 1GB hugepage from CMA memory. [[email protected]: fix hugetlbfs pages per Mike, cleanup per Vlastimil] Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Rik van Riel <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Reviewed-by: Zi Yan <[email protected]> Reviewed-by: Vlastimil Babka <[email protected]> Cc: Michal Hocko <[email protected]> Cc: Vlastimil Babka <[email protected]> Cc: Mel Gorman <[email protected]> Cc: David Rientjes <[email protected]> Cc: Andrea Arcangeli <[email protected]> Cc: Mike Kravetz <[email protected]> Cc: Joonsoo Kim <[email protected]> Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Linus Torvalds <[email protected]>
1 parent b06eda0 commit 1da2f32

File tree

2 files changed

+20
-11
lines changed

2 files changed

+20
-11
lines changed

mm/compaction.c

Lines changed: 13 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -894,12 +894,13 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
894894

895895
/*
896896
* Regardless of being on LRU, compound pages such as THP and
897-
* hugetlbfs are not to be compacted. We can potentially save
898-
* a lot of iterations if we skip them at once. The check is
899-
* racy, but we can consider only valid values and the only
900-
* danger is skipping too much.
897+
* hugetlbfs are not to be compacted unless we are attempting
898+
* an allocation much larger than the huge page size (eg CMA).
899+
* We can potentially save a lot of iterations if we skip them
900+
* at once. The check is racy, but we can consider only valid
901+
* values and the only danger is skipping too much.
901902
*/
902-
if (PageCompound(page)) {
903+
if (PageCompound(page) && !cc->alloc_contig) {
903904
const unsigned int order = compound_order(page);
904905

905906
if (likely(order < MAX_ORDER))
@@ -969,7 +970,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
969970
* and it's on LRU. It can only be a THP so the order
970971
* is safe to read and it's 0 for tail pages.
971972
*/
972-
if (unlikely(PageCompound(page))) {
973+
if (unlikely(PageCompound(page) && !cc->alloc_contig)) {
973974
low_pfn += compound_nr(page) - 1;
974975
goto isolate_fail;
975976
}
@@ -981,12 +982,15 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
981982
if (__isolate_lru_page(page, isolate_mode) != 0)
982983
goto isolate_fail;
983984

984-
VM_BUG_ON_PAGE(PageCompound(page), page);
985+
/* The whole page is taken off the LRU; skip the tail pages. */
986+
if (PageCompound(page))
987+
low_pfn += compound_nr(page) - 1;
985988

986989
/* Successfully isolated */
987990
del_page_from_lru_list(page, lruvec, page_lru(page));
988-
inc_node_page_state(page,
989-
NR_ISOLATED_ANON + page_is_file_cache(page));
991+
mod_node_page_state(page_pgdat(page),
992+
NR_ISOLATED_ANON + page_is_file_cache(page),
993+
hpage_nr_pages(page));
990994

991995
isolate_success:
992996
list_add(&page->lru, &cc->migratepages);

mm/page_alloc.c

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8251,15 +8251,20 @@ struct page *has_unmovable_pages(struct zone *zone, struct page *page,
82518251

82528252
/*
82538253
* Hugepages are not in LRU lists, but they're movable.
8254+
* THPs are on the LRU, but need to be counted as #small pages.
82548255
* We need not scan over tail pages because we don't
82558256
* handle each tail page individually in migration.
82568257
*/
8257-
if (PageHuge(page)) {
8258+
if (PageHuge(page) || PageTransCompound(page)) {
82588259
struct page *head = compound_head(page);
82598260
unsigned int skip_pages;
82608261

8261-
if (!hugepage_migration_supported(page_hstate(head)))
8262+
if (PageHuge(page)) {
8263+
if (!hugepage_migration_supported(page_hstate(head)))
8264+
return page;
8265+
} else if (!PageLRU(head) && !__PageMovable(head)) {
82628266
return page;
8267+
}
82638268

82648269
skip_pages = compound_nr(head) - (page - head);
82658270
iter += skip_pages - 1;

0 commit comments

Comments
 (0)