aboutsummaryrefslogtreecommitdiff
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-08-11 10:30:30 +0200
committerIngo Molnar <mingo@elte.hu>2008-08-11 10:30:30 +0200
commit3295f0ef9ff048a4619ede597ad9ec9cab725654 (patch)
treef39a8ecf1958130a0b86c554399d23a65b1c3991 /kernel/workqueue.c
parent8bfe0298f7a04952d19f4a2cf510d7a6311eeed0 (diff)
lockdep: rename map_[acquire|release]() => lock_map_[acquire|release]()
the names were too generic: drivers/uio/uio.c:87: error: expected identifier or '(' before 'do' drivers/uio/uio.c:87: error: expected identifier or '(' before 'while' drivers/uio/uio.c:113: error: 'map_release' undeclared here (not in a function) Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c24
1 files changed, 12 insertions, 12 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 53564ae894a..8bb5b68fb3a 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -290,11 +290,11 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
BUG_ON(get_wq_data(work) != cwq);
work_clear_pending(work);
- map_acquire(&cwq->wq->lockdep_map);
- map_acquire(&lockdep_map);
+ lock_map_acquire(&cwq->wq->lockdep_map);
+ lock_map_acquire(&lockdep_map);
f(work);
- map_release(&lockdep_map);
- map_release(&cwq->wq->lockdep_map);
+ lock_map_release(&lockdep_map);
+ lock_map_release(&cwq->wq->lockdep_map);
if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
@@ -413,8 +413,8 @@ void flush_workqueue(struct workqueue_struct *wq)
int cpu;
might_sleep();
- map_acquire(&wq->lockdep_map);
- map_release(&wq->lockdep_map);
+ lock_map_acquire(&wq->lockdep_map);
+ lock_map_release(&wq->lockdep_map);
for_each_cpu_mask_nr(cpu, *cpu_map)
flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
}
@@ -441,8 +441,8 @@ int flush_work(struct work_struct *work)
if (!cwq)
return 0;
- map_acquire(&cwq->wq->lockdep_map);
- map_release(&cwq->wq->lockdep_map);
+ lock_map_acquire(&cwq->wq->lockdep_map);
+ lock_map_release(&cwq->wq->lockdep_map);
prev = NULL;
spin_lock_irq(&cwq->lock);
@@ -536,8 +536,8 @@ static void wait_on_work(struct work_struct *work)
might_sleep();
- map_acquire(&work->lockdep_map);
- map_release(&work->lockdep_map);
+ lock_map_acquire(&work->lockdep_map);
+ lock_map_release(&work->lockdep_map);
cwq = get_wq_data(work);
if (!cwq)
@@ -861,8 +861,8 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
if (cwq->thread == NULL)
return;
- map_acquire(&cwq->wq->lockdep_map);
- map_release(&cwq->wq->lockdep_map);
+ lock_map_acquire(&cwq->wq->lockdep_map);
+ lock_map_release(&cwq->wq->lockdep_map);
flush_cpu_workqueue(cwq);
/*