mm-slab-wrap-functions.patch

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
diff --git a/mm/slab.c b/mm/slab.c
index 22b1682..698bf71 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -116,6 +116,7 @@
 #include	<linux/kmemcheck.h>
 #include	<linux/memory.h>
 #include	<linux/prefetch.h>
+#include	<linux/locallock.h>
 
 #include	<net/sock.h>
 
@@ -633,12 +634,49 @@
 #endif
 
 static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
+static DEFINE_LOCAL_IRQ_LOCK(slab_lock);
+
+#ifndef CONFIG_PREEMPT_RT_BASE
+# define slab_on_each_cpu(func, cp)	on_each_cpu(func, cp, 1)
+#else
+/*
+ * execute func() for all CPUs. On PREEMPT_RT we dont actually have
+ * to run on the remote CPUs - we only have to take their CPU-locks.
+ * (This is a rare operation, so cacheline bouncing is not an issue.)
+ */
+static void
+slab_on_each_cpu(void (*func)(void *arg, int this_cpu), void *arg)
+{
+	unsigned int i;
+
+	get_cpu_light();
+	for_each_online_cpu(i)
+		func(arg, i);
+	put_cpu_light();
+}
+
+static void lock_slab_on(unsigned int cpu)
+{
+	local_lock_irq_on(slab_lock, cpu);
+}
+
+static void unlock_slab_on(unsigned int cpu)
+{
+	local_unlock_irq_on(slab_lock, cpu);
+}
+#endif
 
 static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
 {
 	return cachep->array[smp_processor_id()];
 }
 
+static inline struct array_cache *cpu_cache_get_on_cpu(struct kmem_cache *cachep,
+						       int cpu)
+{
+	return cachep->array[cpu];
+}
+
 static size_t slab_mgmt_size(size_t nr_objs, size_t align)
 {
 	return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align);
@@ -1073,9 +1111,10 @@
 	if (n->alien) {
 		struct array_cache *ac = n->alien[node];
 
-		if (ac && ac->avail && spin_trylock_irq(&ac->lock)) {
+		if (ac && ac->avail &&
+		    local_spin_trylock_irq(slab_lock, &ac->lock)) {
 			__drain_alien_cache(cachep, ac, node);
-			spin_unlock_irq(&ac->lock);
+			local_spin_unlock_irq(slab_lock, &ac->lock);
 		}
 	}
 }
@@ -1090,9 +1129,9 @@
 	for_each_online_node(i) {
 		ac = alien[i];
 		if (ac) {
-			spin_lock_irqsave(&ac->lock, flags);
+			local_spin_lock_irqsave(slab_lock, &ac->lock, flags);
 			__drain_alien_cache(cachep, ac, i);
-			spin_unlock_irqrestore(&ac->lock, flags);
+			local_spin_unlock_irqrestore(slab_lock, &ac->lock, flags);
 		}
 	}
 }
@@ -1171,11 +1210,11 @@
 			cachep->node[node] = n;
 		}
 
-		spin_lock_irq(&cachep->node[node]->list_lock);
+		local_spin_lock_irq(slab_lock, &cachep->nodelists[node]->list_lock);
 		cachep->node[node]->free_limit =
 			(1 + nr_cpus_node(node)) *
 			cachep->batchcount + cachep->num;
-		spin_unlock_irq(&cachep->node[node]->list_lock);
+		local_spin_unlock_irq(slab_lock, &cachep->nodelists[node]->list_lock);
 	}
 	return 0;
 }
@@ -1200,7 +1239,7 @@
 		if (!n)
 			goto free_array_cache;
 
-		spin_lock_irq(&n->list_lock);
+		local_spin_lock_irq(slab_lock, &n->list_lock);
 
 		/* Free limit for this kmem_cache_node */
 		n->free_limit -= cachep->batchcount;
@@ -1208,7 +1247,7 @@
 			free_block(cachep, nc->entry, nc->avail, node);
 
 		if (!cpumask_empty(mask)) {
-			spin_unlock_irq(&n->list_lock);
+			local_spin_unlock_irq(slab_lock, &n->list_lock);
 			goto free_array_cache;
 		}
 
@@ -1222,7 +1261,7 @@
 		alien = n->alien;
 		n->alien = NULL;
 
-		spin_unlock_irq(&n->list_lock);
+		local_spin_unlock_irq(slab_lock, &n->list_lock);
 
 		kfree(shared);
 		if (alien) {
@@ -1296,7 +1335,7 @@
 		n = cachep->node[node];
 		BUG_ON(!n);
 
-		spin_lock_irq(&n->list_lock);
+		local_spin_lock_irq(slab_lock, &n->list_lock);
 		if (!n->shared) {
 			/*
 			 * We are serialised from CPU_DEAD or
@@ -1311,7 +1350,7 @@
 			alien = NULL;
 		}
 #endif
-		spin_unlock_irq(&n->list_lock);
+		local_spin_unlock_irq(slab_lock, &n->list_lock);
 		kfree(shared);
 		free_alien_cache(alien);
 		if (cachep->flags & SLAB_DEBUG_OBJECTS)
@@ -1512,6 +1551,8 @@
 	if (num_possible_nodes() == 1)
 		use_alien_caches = 0;
 
+	local_irq_lock_init(slab_lock);
+
 	for (i = 0; i < NUM_INIT_LISTS; i++)
 		kmem_cache_node_init(&init_kmem_cache_node[i]);
 
@@ -2408,7 +2449,7 @@
 #if DEBUG
 static void check_irq_off(void)
 {
-	BUG_ON(!irqs_disabled());
+	BUG_ON_NONRT(!irqs_disabled());
 }
 
 static void check_irq_on(void)
@@ -2443,26 +2484,37 @@
 			struct array_cache *ac,
 			int force, int node);
 
-static void do_drain(void *arg)
+static void __do_drain(void *arg, unsigned int cpu)
 {
 	struct kmem_cache *cachep = arg;
 	struct array_cache *ac;
-	int node = numa_mem_id();
+	int node = cpu_to_mem(cpu);
 
-	check_irq_off();
-	ac = cpu_cache_get(cachep);
+	ac = cpu_cache_get_on_cpu(cachep, cpu);
 	spin_lock(&cachep->node[node]->list_lock);
 	free_block(cachep, ac->entry, ac->avail, node);
 	spin_unlock(&cachep->node[node]->list_lock);
 	ac->avail = 0;
 }
 
+#ifndef CONFIG_PREEMPT_RT_BASE
+static void do_drain(void *arg)
+{
+	__do_drain(arg, smp_processor_id());
+}
+#else
+static void do_drain(void *arg, int this_cpu)
+{
+	__do_drain(arg, this_cpu);
+}
+#endif
+
 static void drain_cpu_caches(struct kmem_cache *cachep)
 {
 	struct kmem_cache_node *n;
 	int node;
 
-	on_each_cpu(do_drain, cachep, 1);
+	slab_on_each_cpu(do_drain, cachep);
 	check_irq_on();
 	for_each_online_node(node) {
 		n = cachep->node[node];
@@ -2493,10 +2545,10 @@
 	nr_freed = 0;
 	while (nr_freed < tofree && !list_empty(&n->slabs_free)) {
 
-		spin_lock_irq(&n->list_lock);
+		local_spin_lock_irq(slab_lock, &n->list_lock);
 		p = n->slabs_free.prev;
 		if (p == &n->slabs_free) {
-			spin_unlock_irq(&n->list_lock);
+			local_spin_unlock_irq(slab_lock, &n->list_lock);
 			goto out;
 		}
 
@@ -2510,7 +2562,7 @@
 		 * to the cache.
 		 */
 		n->free_objects -= cache->num;
-		spin_unlock_irq(&n->list_lock);
+		local_spin_unlock_irq(slab_lock, &n->list_lock);
 		slab_destroy(cache, slabp);
 		nr_freed++;
 	}
@@ -2785,7 +2837,7 @@
 	offset *= cachep->colour_off;
 
 	if (local_flags & __GFP_WAIT)
-		local_irq_enable();
+		local_unlock_irq(slab_lock);
 
 	/*
 	 * The test for missing atomic flag is performed here, rather than
@@ -2815,7 +2867,7 @@
 	cache_init_objs(cachep, slabp);
 
 	if (local_flags & __GFP_WAIT)
-		local_irq_disable();
+		local_lock_irq(slab_lock);
 	check_irq_off();
 	spin_lock(&n->list_lock);
 
@@ -2829,7 +2881,7 @@
 	kmem_freepages(cachep, objp);
 failed:
 	if (local_flags & __GFP_WAIT)
-		local_irq_disable();
+		local_lock_irq(slab_lock);
 	return 0;
 }
 
@@ -3243,11 +3295,11 @@
 		 * set and go into memory reserves if necessary.
 		 */
 		if (local_flags & __GFP_WAIT)
-			local_irq_enable();
+			local_unlock_irq(slab_lock);
 		kmem_flagcheck(cache, flags);
 		obj = kmem_getpages(cache, local_flags, numa_mem_id());
 		if (local_flags & __GFP_WAIT)
-			local_irq_disable();
+			local_lock_irq(slab_lock);
 		if (obj) {
 			/*
 			 * Insert into the appropriate per node queues
@@ -3368,7 +3420,7 @@
 	cachep = memcg_kmem_get_cache(cachep, flags);
 
 	cache_alloc_debugcheck_before(cachep, flags);
-	local_irq_save(save_flags);
+	local_lock_irqsave(slab_lock, save_flags);
 
 	if (nodeid == NUMA_NO_NODE)
 		nodeid = slab_node;
@@ -3393,7 +3445,7 @@
 	/* ___cache_alloc_node can fall back to other nodes */
 	ptr = ____cache_alloc_node(cachep, flags, nodeid);
   out:
-	local_irq_restore(save_flags);
+	local_unlock_irqrestore(slab_lock, save_flags);
 	ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
 	kmemleak_alloc_recursive(ptr, cachep->object_size, 1, cachep->flags,
 				 flags);
@@ -3455,9 +3507,9 @@
 	cachep = memcg_kmem_get_cache(cachep, flags);
 
 	cache_alloc_debugcheck_before(cachep, flags);
-	local_irq_save(save_flags);
+	local_lock_irqsave(slab_lock, save_flags);
 	objp = __do_cache_alloc(cachep, flags);
-	local_irq_restore(save_flags);
+	local_unlock_irqrestore(slab_lock, save_flags);
 	objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
 	kmemleak_alloc_recursive(objp, cachep->object_size, 1, cachep->flags,
 				 flags);
@@ -3774,9 +3826,9 @@
 	debug_check_no_locks_freed(objp, cachep->object_size);
 	if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
 		debug_check_no_obj_freed(objp, cachep->object_size);
-	local_irq_save(flags);
+	local_lock_irqsave(slab_lock, flags);
 	__cache_free(cachep, objp, _RET_IP_);
-	local_irq_restore(flags);
+	local_unlock_irqrestore(slab_lock, flags);
 
 	trace_kmem_cache_free(_RET_IP_, objp);
 }
@@ -3805,9 +3857,9 @@
 	debug_check_no_locks_freed(objp, c->object_size);
 
 	debug_check_no_obj_freed(objp, c->object_size);
-	local_irq_save(flags);
+	local_lock_irqsave(slab_lock, flags);
 	__cache_free(c, (void *)objp, _RET_IP_);
-	local_irq_restore(flags);
+	local_unlock_irqrestore(slab_lock, flags);
 }
 EXPORT_SYMBOL(kfree);
 
@@ -3844,7 +3896,7 @@
 		if (n) {
 			struct array_cache *shared = n->shared;
 
-			spin_lock_irq(&n->list_lock);
+			local_spin_lock_irq(slab_lock, &n->list_lock);
 
 			if (shared)
 				free_block(cachep, shared->entry,
@@ -3857,7 +3909,7 @@
 			}
 			n->free_limit = (1 + nr_cpus_node(node)) *
 					cachep->batchcount + cachep->num;
-			spin_unlock_irq(&n->list_lock);
+			local_spin_unlock_irq(slab_lock, &n->list_lock);
 			kfree(shared);
 			free_alien_cache(new_alien);
 			continue;
@@ -3904,18 +3956,29 @@
 	struct array_cache *new[0];
 };
 
-static void do_ccupdate_local(void *info)
+static void __do_ccupdate_local(void *info, int cpu)
 {
 	struct ccupdate_struct *new = info;
 	struct array_cache *old;
 
-	check_irq_off();
-	old = cpu_cache_get(new->cachep);
+	old = cpu_cache_get_on_cpu(new->cachep, cpu);
 
-	new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()];
-	new->new[smp_processor_id()] = old;
+	new->cachep->array[cpu] = new->new[cpu];
+	new->new[cpu] = old;
 }
 
+#ifndef CONFIG_PREEMPT_RT_BASE
+static void do_ccupdate_local(void *info)
+{
+	__do_ccupdate_local(info, smp_processor_id());
+}
+#else
+static void do_ccupdate_local(void *info, int cpu)
+{
+	__do_ccupdate_local(info, cpu);
+}
+#endif
+
 /* Always called with the slab_mutex held */
 static int __do_tune_cpucache(struct kmem_cache *cachep, int limit,
 				int batchcount, int shared, gfp_t gfp)
@@ -3940,7 +4003,7 @@
 	}
 	new->cachep = cachep;
 
-	on_each_cpu(do_ccupdate_local, (void *)new, 1);
+	slab_on_each_cpu(do_ccupdate_local, (void *)new);
 
 	check_irq_on();
 	cachep->batchcount = batchcount;
@@ -3951,9 +4014,11 @@
 		struct array_cache *ccold = new->new[i];
 		if (!ccold)
 			continue;
-		spin_lock_irq(&cachep->node[cpu_to_mem(i)]->list_lock);
+		local_spin_lock_irq(slab_lock,
+				    &cachep->node[cpu_to_mem(i)]->list_lock);
 		free_block(cachep, ccold->entry, ccold->avail, cpu_to_mem(i));
-		spin_unlock_irq(&cachep->node[cpu_to_mem(i)]->list_lock);
+		local_spin_unlock_irq(slab_lock,
+x				      &cachep->node[cpu_to_mem(i)]->list_lock);
 		kfree(ccold);
 	}
 	kfree(new);
@@ -4068,7 +4133,7 @@
 	if (ac->touched && !force) {
 		ac->touched = 0;
 	} else {
-		spin_lock_irq(&n->list_lock);
+		local_spin_lock_irq(slab_lock, &n->list_lock);
 		if (ac->avail) {
 			tofree = force ? ac->avail : (ac->limit + 4) / 5;
 			if (tofree > ac->avail)
@@ -4078,7 +4143,7 @@
 			memmove(ac->entry, &(ac->entry[tofree]),
 				sizeof(void *) * ac->avail);
 		}
-		spin_unlock_irq(&n->list_lock);
+		local_spin_unlock_irq(slab_lock, &n->list_lock);
 	}
 }
 
@@ -4171,7 +4236,7 @@
 			continue;
 
 		check_irq_on();
-		spin_lock_irq(&n->list_lock);
+		local_spin_lock_irq(slab_lock, &n->list_lock);
 
 		list_for_each_entry(slabp, &n->slabs_full, list) {
 			if (slabp->inuse != cachep->num && !error)
@@ -4196,7 +4261,7 @@
 		if (n->shared)
 			shared_avail += n->shared->avail;
 
-		spin_unlock_irq(&n->list_lock);
+		local_spin_unlock_irq(slab_lock, &n->list_lock);
 	}
 	num_slabs += active_slabs;
 	num_objs = num_slabs * cachep->num;
@@ -4396,13 +4461,13 @@
 			continue;
 
 		check_irq_on();
-		spin_lock_irq(&n->list_lock);
+		local_spin_lock_irq(slab_lock, &n->list_lock);
 
 		list_for_each_entry(slabp, &n->slabs_full, list)
 			handle_slab(x, cachep, slabp);
 		list_for_each_entry(slabp, &n->slabs_partial, list)
 			handle_slab(x, cachep, slabp);
-		spin_unlock_irq(&n->list_lock);
+		local_spin_unlock_irq(slab_lock, &n->list_lock);
 	}
 	name = cachep->name;
 	if (x[0] == x[1]) {