aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorKent Overstreet <kmo@daterainc.com>2014-01-19 08:26:37 +0000
committerNicholas Bellinger <nab@linux-iscsi.org>2014-01-23 20:17:18 +0000
commit6f6b5d1ec56acdeab0503d2b823f6f88a0af493e (patch)
tree7a95dca01cc9ddd652ccbfba4df9d5c30abda549 /lib
parent4a4caa29f1abcb14377e05d57c0793d338fb945d (diff)
percpu_ida: Make percpu_ida_alloc + callers accept task state bitmask
This patch changes percpu_ida_alloc() + callers to accept task state bitmask for prepare_to_wait() for code like target/iscsi that needs it for interruptible sleep, that is provided in a subsequent patch. It now expects TASK_UNINTERRUPTIBLE when the caller is able to sleep waiting for a new tag, or TASK_RUNNING when the caller cannot sleep, and is forced to return a negative value when no tags are available. v2 changes: - Include blk-mq + tcm_fc + vhost/scsi + target/iscsi changes - Drop signal_pending_state() call v3 changes: - Only call prepare_to_wait() + finish_wait() when != TASK_RUNNING (PeterZ) Reported-by: Linus Torvalds <torvalds@linux-foundation.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Jens Axboe <axboe@kernel.dk> Signed-off-by: Kent Overstreet <kmo@daterainc.com> Cc: <stable@vger.kernel.org> #3.12+ Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
Diffstat (limited to 'lib')
-rw-r--r--lib/percpu_ida.c16
1 files changed, 9 insertions, 7 deletions
diff --git a/lib/percpu_ida.c b/lib/percpu_ida.c
index 9d054bf91d0f..58b671484ac2 100644
--- a/lib/percpu_ida.c
+++ b/lib/percpu_ida.c
@@ -132,22 +132,22 @@ static inline unsigned alloc_local_tag(struct percpu_ida_cpu *tags)
/**
* percpu_ida_alloc - allocate a tag
* @pool: pool to allocate from
- * @gfp: gfp flags
+ * @state: task state for prepare_to_wait
*
* Returns a tag - an integer in the range [0..nr_tags) (passed to
* tag_pool_init()), or otherwise -ENOSPC on allocation failure.
*
* Safe to be called from interrupt context (assuming it isn't passed
- * __GFP_WAIT, of course).
+ * TASK_UNINTERRUPTIBLE, of course).
*
* @gfp indicates whether or not to wait until a free id is available (it's not
* used for internal memory allocations); thus if passed __GFP_WAIT we may sleep
* however long it takes until another thread frees an id (same semantics as a
* mempool).
*
- * Will not fail if passed __GFP_WAIT.
+ * Will not fail if passed TASK_UNINTERRUPTIBLE.
*/
-int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp)
+int percpu_ida_alloc(struct percpu_ida *pool, int state)
{
DEFINE_WAIT(wait);
struct percpu_ida_cpu *tags;
@@ -174,7 +174,8 @@ int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp)
*
* global lock held and irqs disabled, don't need percpu lock
*/
- prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);
+ if (state != TASK_RUNNING)
+ prepare_to_wait(&pool->wait, &wait, state);
if (!tags->nr_free)
alloc_global_tags(pool, tags);
@@ -191,7 +192,7 @@ int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp)
spin_unlock(&pool->lock);
local_irq_restore(flags);
- if (tag >= 0 || !(gfp & __GFP_WAIT))
+ if (tag >= 0 || state == TASK_RUNNING)
break;
schedule();
@@ -199,8 +200,9 @@ int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp)
local_irq_save(flags);
tags = this_cpu_ptr(pool->tag_cpu);
}
+ if (state != TASK_RUNNING)
+ finish_wait(&pool->wait, &wait);
- finish_wait(&pool->wait, &wait);
return tag;
}
EXPORT_SYMBOL_GPL(percpu_ida_alloc);