aboutsummaryrefslogtreecommitdiff
path: root/include/linux/blk-mq.h
diff options
context:
space:
mode:
authorJens Axboe <axboe@fb.com>2014-05-19 09:23:55 -0600
committerJens Axboe <axboe@fb.com>2014-05-19 11:02:47 -0600
commit1429d7c9467e1e3de0b0ff91d7e4d67c1a92f8a3 (patch)
tree3b15abb587392becc4ba37d0869d25a4d9420d1d /include/linux/blk-mq.h
parente93ecf602beb8439f0bdcc1fa2cbc1f31fdfb8e2 (diff)
blk-mq: switch ctx pending map to the sparser blk_align_bitmap
Each hardware queue has a bitmap of software queues with pending requests. When new IO is queued on a software queue, the bit is set, and when IO is pruned on a hardware queue run, the bit is cleared. This causes a lot of traffic. Switch this from the regular BITS_PER_LONG bitmap to a sparser layout, similarly to what was done for blk-mq tagging. 20% performance increase was observed for single threaded IO, and about 15% performanc increase on multiple threads driving the same device. Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'include/linux/blk-mq.h')
-rw-r--r--include/linux/blk-mq.h10
1 files changed, 8 insertions, 2 deletions
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index f83d15f6e1c1..952e558ee598 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -11,6 +11,12 @@ struct blk_mq_cpu_notifier {
void (*notify)(void *data, unsigned long action, unsigned int cpu);
};
+struct blk_mq_ctxmap {
+ unsigned int map_size;
+ unsigned int bits_per_word;
+ struct blk_align_bitmap *map;
+};
+
struct blk_mq_hw_ctx {
struct {
spinlock_t lock;
@@ -31,8 +37,8 @@ struct blk_mq_hw_ctx {
void *driver_data;
- unsigned int nr_ctx_map;
- unsigned long *ctx_map;
+ struct blk_mq_ctxmap ctx_map;
+
unsigned int nr_ctx;
struct blk_mq_ctx **ctxs;