aboutsummaryrefslogtreecommitdiff
path: root/kernel/lockdep.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/lockdep.c')
-rw-r--r--kernel/lockdep.c88
1 files changed, 48 insertions, 40 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 2594e1ce41c..ec21304856d 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -431,20 +431,7 @@ static struct stack_trace lockdep_init_trace = {
/*
* Various lockdep statistics:
*/
-atomic_t chain_lookup_hits;
-atomic_t chain_lookup_misses;
-atomic_t hardirqs_on_events;
-atomic_t hardirqs_off_events;
-atomic_t redundant_hardirqs_on;
-atomic_t redundant_hardirqs_off;
-atomic_t softirqs_on_events;
-atomic_t softirqs_off_events;
-atomic_t redundant_softirqs_on;
-atomic_t redundant_softirqs_off;
-atomic_t nr_unused_locks;
-atomic_t nr_cyclic_checks;
-atomic_t nr_find_usage_forwards_checks;
-atomic_t nr_find_usage_backwards_checks;
+DEFINE_PER_CPU(struct lockdep_stats, lockdep_stats);
#endif
/*
@@ -748,7 +735,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
return NULL;
}
class = lock_classes + nr_lock_classes++;
- debug_atomic_inc(&nr_unused_locks);
+ debug_atomic_inc(nr_unused_locks);
class->key = key;
class->name = lock->name;
class->subclass = subclass;
@@ -818,7 +805,8 @@ static struct lock_list *alloc_list_entry(void)
* Add a new dependency to the head of the list:
*/
static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
- struct list_head *head, unsigned long ip, int distance)
+ struct list_head *head, unsigned long ip,
+ int distance, struct stack_trace *trace)
{
struct lock_list *entry;
/*
@@ -829,11 +817,9 @@ static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
if (!entry)
return 0;
- if (!save_trace(&entry->trace))
- return 0;
-
entry->class = this;
entry->distance = distance;
+ entry->trace = *trace;
/*
* Since we never remove from the dependency list, the list can
* be walked lockless by other CPUs, it's only allocation
@@ -1205,7 +1191,7 @@ check_noncircular(struct lock_list *root, struct lock_class *target,
{
int result;
- debug_atomic_inc(&nr_cyclic_checks);
+ debug_atomic_inc(nr_cyclic_checks);
result = __bfs_forwards(root, target, class_equal, target_entry);
@@ -1242,7 +1228,7 @@ find_usage_forwards(struct lock_list *root, enum lock_usage_bit bit,
{
int result;
- debug_atomic_inc(&nr_find_usage_forwards_checks);
+ debug_atomic_inc(nr_find_usage_forwards_checks);
result = __bfs_forwards(root, (void *)bit, usage_match, target_entry);
@@ -1265,7 +1251,7 @@ find_usage_backwards(struct lock_list *root, enum lock_usage_bit bit,
{
int result;
- debug_atomic_inc(&nr_find_usage_backwards_checks);
+ debug_atomic_inc(nr_find_usage_backwards_checks);
result = __bfs_backwards(root, (void *)bit, usage_match, target_entry);
@@ -1635,12 +1621,20 @@ check_deadlock(struct task_struct *curr, struct held_lock *next,
*/
static int
check_prev_add(struct task_struct *curr, struct held_lock *prev,
- struct held_lock *next, int distance)
+ struct held_lock *next, int distance, int trylock_loop)
{
struct lock_list *entry;
int ret;
struct lock_list this;
struct lock_list *uninitialized_var(target_entry);
+ /*
+ * Static variable, serialized by the graph_lock().
+ *
+ * We use this static variable to save the stack trace in case
+ * we call into this function multiple times due to encountering
+ * trylocks in the held lock stack.
+ */
+ static struct stack_trace trace;
/*
* Prove that the new <prev> -> <next> dependency would not
@@ -1688,20 +1682,23 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
}
}
+ if (!trylock_loop && !save_trace(&trace))
+ return 0;
+
/*
* Ok, all validations passed, add the new lock
* to the previous lock's dependency list:
*/
ret = add_lock_to_list(hlock_class(prev), hlock_class(next),
&hlock_class(prev)->locks_after,
- next->acquire_ip, distance);
+ next->acquire_ip, distance, &trace);
if (!ret)
return 0;
ret = add_lock_to_list(hlock_class(next), hlock_class(prev),
&hlock_class(next)->locks_before,
- next->acquire_ip, distance);
+ next->acquire_ip, distance, &trace);
if (!ret)
return 0;
@@ -1731,6 +1728,7 @@ static int
check_prevs_add(struct task_struct *curr, struct held_lock *next)
{
int depth = curr->lockdep_depth;
+ int trylock_loop = 0;
struct held_lock *hlock;
/*
@@ -1756,7 +1754,8 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next)
* added:
*/
if (hlock->read != 2) {
- if (!check_prev_add(curr, hlock, next, distance))
+ if (!check_prev_add(curr, hlock, next,
+ distance, trylock_loop))
return 0;
/*
* Stop after the first non-trylock entry,
@@ -1779,6 +1778,7 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next)
if (curr->held_locks[depth].irq_context !=
curr->held_locks[depth-1].irq_context)
break;
+ trylock_loop = 1;
}
return 1;
out_bug:
@@ -1825,7 +1825,7 @@ static inline int lookup_chain_cache(struct task_struct *curr,
list_for_each_entry(chain, hash_head, entry) {
if (chain->chain_key == chain_key) {
cache_hit:
- debug_atomic_inc(&chain_lookup_hits);
+ debug_atomic_inc(chain_lookup_hits);
if (very_verbose(class))
printk("\nhash chain already cached, key: "
"%016Lx tail class: [%p] %s\n",
@@ -1890,7 +1890,7 @@ cache_hit:
chain_hlocks[chain->base + j] = class - lock_classes;
}
list_add_tail_rcu(&chain->entry, hash_head);
- debug_atomic_inc(&chain_lookup_misses);
+ debug_atomic_inc(chain_lookup_misses);
inc_chains();
return 1;
@@ -2311,7 +2311,12 @@ void trace_hardirqs_on_caller(unsigned long ip)
return;
if (unlikely(curr->hardirqs_enabled)) {
- debug_atomic_inc(&redundant_hardirqs_on);
+ /*
+ * Neither irq nor preemption are disabled here
+ * so this is racy by nature but loosing one hit
+ * in a stat is not a big deal.
+ */
+ __debug_atomic_inc(redundant_hardirqs_on);
return;
}
/* we'll do an OFF -> ON transition: */
@@ -2338,7 +2343,7 @@ void trace_hardirqs_on_caller(unsigned long ip)
curr->hardirq_enable_ip = ip;
curr->hardirq_enable_event = ++curr->irq_events;
- debug_atomic_inc(&hardirqs_on_events);
+ debug_atomic_inc(hardirqs_on_events);
}
EXPORT_SYMBOL(trace_hardirqs_on_caller);
@@ -2370,9 +2375,9 @@ void trace_hardirqs_off_caller(unsigned long ip)
curr->hardirqs_enabled = 0;
curr->hardirq_disable_ip = ip;
curr->hardirq_disable_event = ++curr->irq_events;
- debug_atomic_inc(&hardirqs_off_events);
+ debug_atomic_inc(hardirqs_off_events);
} else
- debug_atomic_inc(&redundant_hardirqs_off);
+ debug_atomic_inc(redundant_hardirqs_off);
}
EXPORT_SYMBOL(trace_hardirqs_off_caller);
@@ -2396,7 +2401,7 @@ void trace_softirqs_on(unsigned long ip)
return;
if (curr->softirqs_enabled) {
- debug_atomic_inc(&redundant_softirqs_on);
+ debug_atomic_inc(redundant_softirqs_on);
return;
}
@@ -2406,7 +2411,7 @@ void trace_softirqs_on(unsigned long ip)
curr->softirqs_enabled = 1;
curr->softirq_enable_ip = ip;
curr->softirq_enable_event = ++curr->irq_events;
- debug_atomic_inc(&softirqs_on_events);
+ debug_atomic_inc(softirqs_on_events);
/*
* We are going to turn softirqs on, so set the
* usage bit for all held locks, if hardirqs are
@@ -2436,10 +2441,10 @@ void trace_softirqs_off(unsigned long ip)
curr->softirqs_enabled = 0;
curr->softirq_disable_ip = ip;
curr->softirq_disable_event = ++curr->irq_events;
- debug_atomic_inc(&softirqs_off_events);
+ debug_atomic_inc(softirqs_off_events);
DEBUG_LOCKS_WARN_ON(!softirq_count());
} else
- debug_atomic_inc(&redundant_softirqs_off);
+ debug_atomic_inc(redundant_softirqs_off);
}
static void __lockdep_trace_alloc(gfp_t gfp_mask, unsigned long flags)
@@ -2644,7 +2649,7 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
return 0;
break;
case LOCK_USED:
- debug_atomic_dec(&nr_unused_locks);
+ debug_atomic_dec(nr_unused_locks);
break;
default:
if (!debug_locks_off_graph_unlock())
@@ -2750,7 +2755,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
if (!class)
return 0;
}
- debug_atomic_inc((atomic_t *)&class->ops);
+ atomic_inc((atomic_t *)&class->ops);
if (very_verbose(class)) {
printk("\nacquire class [%p] %s", class->key, class->name);
if (class->name_version > 1)
@@ -3227,7 +3232,7 @@ void lock_release(struct lockdep_map *lock, int nested,
raw_local_irq_save(flags);
check_flags(flags);
current->lockdep_recursion = 1;
- trace_lock_release(lock, nested, ip);
+ trace_lock_release(lock, ip);
__lock_release(lock, nested, ip);
current->lockdep_recursion = 0;
raw_local_irq_restore(flags);
@@ -3380,7 +3385,7 @@ found_it:
hlock->holdtime_stamp = now;
}
- trace_lock_acquired(lock, ip, waittime);
+ trace_lock_acquired(lock, ip);
stats = get_lock_stats(hlock_class(hlock));
if (waittime) {
@@ -3801,8 +3806,11 @@ void lockdep_rcu_dereference(const char *file, const int line)
{
struct task_struct *curr = current;
+#ifndef CONFIG_PROVE_RCU_REPEATEDLY
if (!debug_locks_off())
return;
+#endif /* #ifdef CONFIG_PROVE_RCU_REPEATEDLY */
+ /* Note: the following can be executed concurrently, so be careful. */
printk("\n===================================================\n");
printk( "[ INFO: suspicious rcu_dereference_check() usage. ]\n");
printk( "---------------------------------------------------\n");