aboutsummaryrefslogtreecommitdiff
path: root/mm/slab.c
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-06-18 13:24:12 +1000
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-18 13:12:57 -0700
commitdcce284a259373f9e5570f2e33f79eca84fcf565 (patch)
treeafc4b23208974f17c080ea3d2ecfbaca4254c010 /mm/slab.c
parent9729a6eb5878a3daa18395f2b5fb38bf9359a761 (diff)
mm: Extend gfp masking to the page allocator
The page allocator also needs the masking of gfp flags during boot, so this moves it out of slab/slub and uses it with the page allocator as well. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Acked-by: Pekka Enberg <penberg@cs.helsinki.fi> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c15
1 files changed, 2 insertions, 13 deletions
diff --git a/mm/slab.c b/mm/slab.c
index d08692303f6..e74a16e4ced 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -305,12 +305,6 @@ struct kmem_list3 {
};
/*
- * The slab allocator is initialized with interrupts disabled. Therefore, make
- * sure early boot allocations don't accidentally enable interrupts.
- */
-static gfp_t slab_gfp_mask __read_mostly = SLAB_GFP_BOOT_MASK;
-
-/*
* Need this for bootstrapping a per node allocator.
*/
#define NUM_INIT_LISTS (3 * MAX_NUMNODES)
@@ -1559,11 +1553,6 @@ void __init kmem_cache_init_late(void)
{
struct kmem_cache *cachep;
- /*
- * Interrupts are enabled now so all GFP allocations are safe.
- */
- slab_gfp_mask = __GFP_BITS_MASK;
-
/* 6) resize the head arrays to their final sizes */
mutex_lock(&cache_chain_mutex);
list_for_each_entry(cachep, &cache_chain, next)
@@ -3307,7 +3296,7 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
unsigned long save_flags;
void *ptr;
- flags &= slab_gfp_mask;
+ flags &= gfp_allowed_mask;
lockdep_trace_alloc(flags);
@@ -3392,7 +3381,7 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
unsigned long save_flags;
void *objp;
- flags &= slab_gfp_mask;
+ flags &= gfp_allowed_mask;
lockdep_trace_alloc(flags);