Skip to content

Commit d882c00

Browse files
davidhildenbrandtorvalds
authored andcommitted
mm: pass migratetype into memmap_init_zone() and move_pfn_range_to_zone()
On the memory onlining path, we want to start with MIGRATE_ISOLATE, to un-isolate the pages after memory onlining is complete. Let's allow passing in the migratetype. Signed-off-by: David Hildenbrand <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Reviewed-by: Oscar Salvador <[email protected]> Acked-by: Michal Hocko <[email protected]> Cc: Wei Yang <[email protected]> Cc: Baoquan He <[email protected]> Cc: Pankaj Gupta <[email protected]> Cc: Tony Luck <[email protected]> Cc: Fenghua Yu <[email protected]> Cc: Logan Gunthorpe <[email protected]> Cc: Dan Williams <[email protected]> Cc: Mike Rapoport <[email protected]> Cc: "Matthew Wilcox (Oracle)" <[email protected]> Cc: Michel Lespinasse <[email protected]> Cc: Charan Teja Reddy <[email protected]> Cc: Mel Gorman <[email protected]> Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Linus Torvalds <[email protected]>
1 parent 4eb29bd commit d882c00

File tree

6 files changed

+27
-17
lines changed

6 files changed

+27
-17
lines changed

arch/ia64/mm/init.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -537,7 +537,7 @@ virtual_memmap_init(u64 start, u64 end, void *arg)
537537
if (map_start < map_end)
538538
memmap_init_zone((unsigned long)(map_end - map_start),
539539
args->nid, args->zone, page_to_pfn(map_start),
540-
MEMINIT_EARLY, NULL);
540+
MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
541541
return 0;
542542
}
543543

@@ -547,7 +547,7 @@ memmap_init (unsigned long size, int nid, unsigned long zone,
547547
{
548548
if (!vmem_map) {
549549
memmap_init_zone(size, nid, zone, start_pfn,
550-
MEMINIT_EARLY, NULL);
550+
MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
551551
} else {
552552
struct page *start;
553553
struct memmap_init_callback_data args;

include/linux/memory_hotplug.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -351,7 +351,8 @@ extern int add_memory_resource(int nid, struct resource *resource);
351351
extern int add_memory_driver_managed(int nid, u64 start, u64 size,
352352
const char *resource_name);
353353
extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
354-
unsigned long nr_pages, struct vmem_altmap *altmap);
354+
unsigned long nr_pages,
355+
struct vmem_altmap *altmap, int migratetype);
355356
extern void remove_pfn_range_from_zone(struct zone *zone,
356357
unsigned long start_pfn,
357358
unsigned long nr_pages);

include/linux/mm.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2440,7 +2440,7 @@ extern int __meminit __early_pfn_to_nid(unsigned long pfn,
24402440

24412441
extern void set_dma_reserve(unsigned long new_dma_reserve);
24422442
extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long,
2443-
enum meminit_context, struct vmem_altmap *);
2443+
enum meminit_context, struct vmem_altmap *, int migratetype);
24442444
extern void setup_per_zone_wmarks(void);
24452445
extern int __meminit init_per_zone_wmark_min(void);
24462446
extern void mem_init(void);

mm/memory_hotplug.c

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -701,9 +701,14 @@ static void __meminit resize_pgdat_range(struct pglist_data *pgdat, unsigned lon
701701
* Associate the pfn range with the given zone, initializing the memmaps
702702
* and resizing the pgdat/zone data to span the added pages. After this
703703
* call, all affected pages are PG_reserved.
704+
*
705+
* All aligned pageblocks are initialized to the specified migratetype
706+
* (usually MIGRATE_MOVABLE). Besides setting the migratetype, no related
707+
* zone stats (e.g., nr_isolate_pageblock) are touched.
704708
*/
705709
void __ref move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
706-
unsigned long nr_pages, struct vmem_altmap *altmap)
710+
unsigned long nr_pages,
711+
struct vmem_altmap *altmap, int migratetype)
707712
{
708713
struct pglist_data *pgdat = zone->zone_pgdat;
709714
int nid = pgdat->node_id;
@@ -728,7 +733,7 @@ void __ref move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
728733
* are reserved so nobody should be touching them so we should be safe
729734
*/
730735
memmap_init_zone(nr_pages, nid, zone_idx(zone), start_pfn,
731-
MEMINIT_HOTPLUG, altmap);
736+
MEMINIT_HOTPLUG, altmap, migratetype);
732737

733738
set_zone_contiguous(zone);
734739
}
@@ -808,7 +813,7 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages,
808813

809814
/* associate pfn range with the zone */
810815
zone = zone_for_pfn_range(online_type, nid, pfn, nr_pages);
811-
move_pfn_range_to_zone(zone, pfn, nr_pages, NULL);
816+
move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_MOVABLE);
812817

813818
arg.start_pfn = pfn;
814819
arg.nr_pages = nr_pages;

mm/memremap.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -266,7 +266,8 @@ static int pagemap_range(struct dev_pagemap *pgmap, struct mhp_params *params,
266266

267267
zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE];
268268
move_pfn_range_to_zone(zone, PHYS_PFN(range->start),
269-
PHYS_PFN(range_len(range)), params->altmap);
269+
PHYS_PFN(range_len(range)), params->altmap,
270+
MIGRATE_MOVABLE);
270271
}
271272

272273
mem_hotplug_done();

mm/page_alloc.c

Lines changed: 12 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -5990,10 +5990,15 @@ overlap_memmap_init(unsigned long zone, unsigned long *pfn)
59905990
* Initially all pages are reserved - free ones are freed
59915991
* up by memblock_free_all() once the early boot process is
59925992
* done. Non-atomic initialization, single-pass.
5993+
*
5994+
* All aligned pageblocks are initialized to the specified migratetype
5995+
* (usually MIGRATE_MOVABLE). Besides setting the migratetype, no related
5996+
* zone stats (e.g., nr_isolate_pageblock) are touched.
59935997
*/
59945998
void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
5995-
unsigned long start_pfn, enum meminit_context context,
5996-
struct vmem_altmap *altmap)
5999+
unsigned long start_pfn,
6000+
enum meminit_context context,
6001+
struct vmem_altmap *altmap, int migratetype)
59976002
{
59986003
unsigned long pfn, end_pfn = start_pfn + size;
59996004
struct page *page;
@@ -6037,14 +6042,12 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
60376042
__SetPageReserved(page);
60386043

60396044
/*
6040-
* Mark the block movable so that blocks are reserved for
6041-
* movable at startup. This will force kernel allocations
6042-
* to reserve their blocks rather than leaking throughout
6043-
* the address space during boot when many long-lived
6044-
* kernel allocations are made.
6045+
* Usually, we want to mark the pageblock MIGRATE_MOVABLE,
6046+
* such that unmovable allocations won't be scattered all
6047+
* over the place during system boot.
60456048
*/
60466049
if (IS_ALIGNED(pfn, pageblock_nr_pages)) {
6047-
set_pageblock_migratetype(page, MIGRATE_MOVABLE);
6050+
set_pageblock_migratetype(page, migratetype);
60486051
cond_resched();
60496052
}
60506053
pfn++;
@@ -6144,7 +6147,7 @@ void __meminit __weak memmap_init(unsigned long size, int nid,
61446147
if (end_pfn > start_pfn) {
61456148
size = end_pfn - start_pfn;
61466149
memmap_init_zone(size, nid, zone, start_pfn,
6147-
MEMINIT_EARLY, NULL);
6150+
MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
61486151
}
61496152
}
61506153
}

0 commit comments

Comments
 (0)