aboutsummaryrefslogtreecommitdiff
path: root/mm/sparse.c
diff options
context:
space:
mode:
authorYasunori Goto <y-goto@jp.fujitsu.com>2008-04-28 02:13:34 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2008-04-28 08:58:26 -0700
commit0c0a4a517a31e05efb38304668198a873bfec6ca (patch)
tree3d02fe9dbf160cd5d328c1e2cf4b40ce37426c5f /mm/sparse.c
parent86f6dae1377523689bd8468fed2f2dd180fc0560 (diff)
memory hotplug: free memmaps allocated by bootmem
This patch is to free memmaps which is allocated by bootmem. Freeing usemap is not necessary. The pages of usemap may be necessary for other sections. If removing section is last section on the node, its section is the final user of usemap page. (usemaps are allocated on its section by previous patch.) But it shouldn't be freed too, because the section must be logical offline state which all pages are isolated against page allocater. If it is freed, page alloctor may use it which will be removed physically soon. It will be disaster. So, this patch keeps it as it is. Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com> Cc: Badari Pulavarty <pbadari@us.ibm.com> Cc: Yinghai Lu <yhlu.kernel@gmail.com> Cc: Yasunori Goto <y-goto@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/sparse.c')
-rw-r--r--mm/sparse.c51
1 files changed, 47 insertions, 4 deletions
diff --git a/mm/sparse.c b/mm/sparse.c
index 08f053218ee..dff71f173ae 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -8,6 +8,7 @@
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/vmalloc.h>
+#include "internal.h"
#include <asm/dma.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
@@ -376,6 +377,9 @@ static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
{
return; /* XXX: Not implemented yet */
}
+static void free_map_bootmem(struct page *page, unsigned long nr_pages)
+{
+}
#else
static struct page *__kmalloc_section_memmap(unsigned long nr_pages)
{
@@ -413,17 +417,47 @@ static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
free_pages((unsigned long)memmap,
get_order(sizeof(struct page) * nr_pages));
}
+
+static void free_map_bootmem(struct page *page, unsigned long nr_pages)
+{
+ unsigned long maps_section_nr, removing_section_nr, i;
+ int magic;
+
+ for (i = 0; i < nr_pages; i++, page++) {
+ magic = atomic_read(&page->_mapcount);
+
+ BUG_ON(magic == NODE_INFO);
+
+ maps_section_nr = pfn_to_section_nr(page_to_pfn(page));
+ removing_section_nr = page->private;
+
+ /*
+ * When this function is called, the removing section is
+ * logical offlined state. This means all pages are isolated
+ * from page allocator. If removing section's memmap is placed
+ * on the same section, it must not be freed.
+ * If it is freed, page allocator may allocate it which will
+ * be removed physically soon.
+ */
+ if (maps_section_nr != removing_section_nr)
+ put_page_bootmem(page);
+ }
+}
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
static void free_section_usemap(struct page *memmap, unsigned long *usemap)
{
+ struct page *usemap_page;
+ unsigned long nr_pages;
+
if (!usemap)
return;
+ usemap_page = virt_to_page(usemap);
/*
* Check to see if allocation came from hot-plug-add
*/
- if (PageSlab(virt_to_page(usemap))) {
+ if (PageSlab(usemap_page)) {
kfree(usemap);
if (memmap)
__kfree_section_memmap(memmap, PAGES_PER_SECTION);
@@ -431,10 +465,19 @@ static void free_section_usemap(struct page *memmap, unsigned long *usemap)
}
/*
- * TODO: Allocations came from bootmem - how do I free up ?
+ * The usemap came from bootmem. This is packed with other usemaps
+ * on the section which has pgdat at boot time. Just keep it as is now.
*/
- printk(KERN_WARNING "Not freeing up allocations from bootmem "
- "- leaking memory\n");
+
+ if (memmap) {
+ struct page *memmap_page;
+ memmap_page = virt_to_page(memmap);
+
+ nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
+ >> PAGE_SHIFT;
+
+ free_map_bootmem(memmap_page, nr_pages);
+ }
}
/*