aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/ia64/mm/init.c5
-rw-r--r--arch/s390/mm/vmem.c3
-rw-r--r--include/linux/mm.h3
-rw-r--r--include/linux/mmzone.h8
-rw-r--r--mm/memory_hotplug.c6
-rw-r--r--mm/page_alloc.c25
6 files changed, 34 insertions, 16 deletions
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 1a3d8a2feb9..1373fae7657 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -543,7 +543,8 @@ virtual_memmap_init (u64 start, u64 end, void *arg)
if (map_start < map_end)
memmap_init_zone((unsigned long)(map_end - map_start),
- args->nid, args->zone, page_to_pfn(map_start));
+ args->nid, args->zone, page_to_pfn(map_start),
+ MEMMAP_EARLY);
return 0;
}
@@ -552,7 +553,7 @@ memmap_init (unsigned long size, int nid, unsigned long zone,
unsigned long start_pfn)
{
if (!vmem_map)
- memmap_init_zone(size, nid, zone, start_pfn);
+ memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY);
else {
struct page *start;
struct memmap_init_callback_data args;
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 7f2944d3ec2..cd3d93e8c21 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -61,7 +61,8 @@ void memmap_init(unsigned long size, int nid, unsigned long zone,
if (map_start < map_end)
memmap_init_zone((unsigned long)(map_end - map_start),
- nid, zone, page_to_pfn(map_start));
+ nid, zone, page_to_pfn(map_start),
+ MEMMAP_EARLY);
}
}
diff --git a/include/linux/mm.h b/include/linux/mm.h
index a17b147c61e..76912231af4 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -978,7 +978,8 @@ extern int early_pfn_to_nid(unsigned long pfn);
#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
extern void set_dma_reserve(unsigned long new_dma_reserve);
-extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long);
+extern void memmap_init_zone(unsigned long, int, unsigned long,
+ unsigned long, enum memmap_context);
extern void setup_per_zone_pages_min(void);
extern void mem_init(void);
extern void show_mem(void);
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index e339a7345f2..b262f47961f 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -450,9 +450,13 @@ void build_all_zonelists(void);
void wakeup_kswapd(struct zone *zone, int order);
int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
int classzone_idx, int alloc_flags);
-
+enum memmap_context {
+ MEMMAP_EARLY,
+ MEMMAP_HOTPLUG,
+};
extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
- unsigned long size);
+ unsigned long size,
+ enum memmap_context context);
#ifdef CONFIG_HAVE_MEMORY_PRESENT
void memory_present(int nid, unsigned long start, unsigned long end);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 0c055a090f4..84279127fcd 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -67,11 +67,13 @@ static int __add_zone(struct zone *zone, unsigned long phys_start_pfn)
zone_type = zone - pgdat->node_zones;
if (!populated_zone(zone)) {
int ret = 0;
- ret = init_currently_empty_zone(zone, phys_start_pfn, nr_pages);
+ ret = init_currently_empty_zone(zone, phys_start_pfn,
+ nr_pages, MEMMAP_HOTPLUG);
if (ret < 0)
return ret;
}
- memmap_init_zone(nr_pages, nid, zone_type, phys_start_pfn);
+ memmap_init_zone(nr_pages, nid, zone_type,
+ phys_start_pfn, MEMMAP_HOTPLUG);
return 0;
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a49f96b7ea4..fc5b5442e94 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1956,17 +1956,24 @@ static inline unsigned long wait_table_bits(unsigned long size)
* done. Non-atomic initialization, single-pass.
*/
void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
- unsigned long start_pfn)
+ unsigned long start_pfn, enum memmap_context context)
{
struct page *page;
unsigned long end_pfn = start_pfn + size;
unsigned long pfn;
for (pfn = start_pfn; pfn < end_pfn; pfn++) {
- if (!early_pfn_valid(pfn))
- continue;
- if (!early_pfn_in_nid(pfn, nid))
- continue;
+ /*
+ * There can be holes in boot-time mem_map[]s
+ * handed to this function. They do not
+ * exist on hotplugged memory.
+ */
+ if (context == MEMMAP_EARLY) {
+ if (!early_pfn_valid(pfn))
+ continue;
+ if (!early_pfn_in_nid(pfn, nid))
+ continue;
+ }
page = pfn_to_page(pfn);
set_page_links(page, zone, nid, pfn);
init_page_count(page);
@@ -1993,7 +2000,7 @@ void zone_init_free_lists(struct pglist_data *pgdat, struct zone *zone,
#ifndef __HAVE_ARCH_MEMMAP_INIT
#define memmap_init(size, nid, zone, start_pfn) \
- memmap_init_zone((size), (nid), (zone), (start_pfn))
+ memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
#endif
static int __cpuinit zone_batchsize(struct zone *zone)
@@ -2239,7 +2246,8 @@ static __meminit void zone_pcp_init(struct zone *zone)
__meminit int init_currently_empty_zone(struct zone *zone,
unsigned long zone_start_pfn,
- unsigned long size)
+ unsigned long size,
+ enum memmap_context context)
{
struct pglist_data *pgdat = zone->zone_pgdat;
int ret;
@@ -2683,7 +2691,8 @@ static void __meminit free_area_init_core(struct pglist_data *pgdat,
if (!size)
continue;
- ret = init_currently_empty_zone(zone, zone_start_pfn, size);
+ ret = init_currently_empty_zone(zone, zone_start_pfn,
+ size, MEMMAP_EARLY);
BUG_ON(ret);
zone_start_pfn += size;
}