blob: 9660ec8d037544e1690e8c5ec1480758be4ba0fe [file] [log] [blame]
Vivek Goyale43473b2010-09-15 17:06:35 -04001/*
2 * Interface for controlling IO bandwidth on a request queue
3 *
4 * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
5 */
6
7#include <linux/module.h>
8#include <linux/slab.h>
9#include <linux/blkdev.h>
10#include <linux/bio.h>
11#include <linux/blktrace_api.h>
12#include "blk-cgroup.h"
Tejun Heobc9fcbf2011-10-19 14:31:18 +020013#include "blk.h"
Vivek Goyale43473b2010-09-15 17:06:35 -040014
15/* Max dispatch from a group in 1 round */
16static int throtl_grp_quantum = 8;
17
18/* Total max dispatch from all groups in one round */
19static int throtl_quantum = 32;
20
21/* Throttling is performed over 100ms slice and after that slice is renewed */
22static unsigned long throtl_slice = HZ/10; /* 100 ms */
23
Tejun Heo3c798392012-04-16 13:57:25 -070024static struct blkcg_policy blkcg_policy_throtl;
Tejun Heo03814112012-03-05 13:15:14 -080025
Vivek Goyal450adcb2011-03-01 13:40:54 -050026/* A workqueue to queue throttle related work */
27static struct workqueue_struct *kthrotld_workqueue;
Vivek Goyal450adcb2011-03-01 13:40:54 -050028
Tejun Heoc9e03322013-05-14 13:52:32 -070029struct throtl_service_queue {
30 struct rb_root pending_tree; /* RB tree of active tgs */
31 struct rb_node *first_pending; /* first node in the tree */
32 unsigned int nr_pending; /* # queued in the tree */
33 unsigned long first_pending_disptime; /* disptime of the first tg */
Vivek Goyale43473b2010-09-15 17:06:35 -040034};
35
Tejun Heoc9e03322013-05-14 13:52:32 -070036#define THROTL_SERVICE_QUEUE_INITIALIZER \
37 (struct throtl_service_queue){ .pending_tree = RB_ROOT }
Vivek Goyale43473b2010-09-15 17:06:35 -040038
Tejun Heo5b2c16a2013-05-14 13:52:32 -070039enum tg_state_flags {
40 THROTL_TG_PENDING = 1 << 0, /* on parent's pending tree */
41};
42
Vivek Goyale43473b2010-09-15 17:06:35 -040043#define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node)
44
Tejun Heo8a3d2612012-04-01 14:38:44 -070045/* Per-cpu group stats */
46struct tg_stats_cpu {
47 /* total bytes transferred */
48 struct blkg_rwstat service_bytes;
49 /* total IOs serviced, post merge */
50 struct blkg_rwstat serviced;
51};
52
Vivek Goyale43473b2010-09-15 17:06:35 -040053struct throtl_grp {
Tejun Heof95a04a2012-04-16 13:57:26 -070054 /* must be the first member */
55 struct blkg_policy_data pd;
56
Tejun Heoc9e03322013-05-14 13:52:32 -070057 /* active throtl group service_queue member */
Vivek Goyale43473b2010-09-15 17:06:35 -040058 struct rb_node rb_node;
59
Tejun Heo0f3457f2013-05-14 13:52:32 -070060 /* throtl_data this group belongs to */
61 struct throtl_data *td;
62
Vivek Goyale43473b2010-09-15 17:06:35 -040063 /*
64 * Dispatch time in jiffies. This is the estimated time when group
65 * will unthrottle and is ready to dispatch more bio. It is used as
66 * key to sort active groups in service tree.
67 */
68 unsigned long disptime;
69
Vivek Goyale43473b2010-09-15 17:06:35 -040070 unsigned int flags;
71
72 /* Two lists for READ and WRITE */
73 struct bio_list bio_lists[2];
74
75 /* Number of queued bios on READ and WRITE lists */
76 unsigned int nr_queued[2];
77
78 /* bytes per second rate limits */
79 uint64_t bps[2];
80
Vivek Goyal8e89d132010-09-15 17:06:37 -040081 /* IOPS limits */
82 unsigned int iops[2];
83
Vivek Goyale43473b2010-09-15 17:06:35 -040084 /* Number of bytes disptached in current slice */
85 uint64_t bytes_disp[2];
Vivek Goyal8e89d132010-09-15 17:06:37 -040086 /* Number of bio's dispatched in current slice */
87 unsigned int io_disp[2];
Vivek Goyale43473b2010-09-15 17:06:35 -040088
89 /* When did we start a new slice */
90 unsigned long slice_start[2];
91 unsigned long slice_end[2];
Vivek Goyalfe071432010-10-01 14:49:49 +020092
Tejun Heo8a3d2612012-04-01 14:38:44 -070093 /* Per cpu stats pointer */
94 struct tg_stats_cpu __percpu *stats_cpu;
95
96 /* List of tgs waiting for per cpu stats memory to be allocated */
97 struct list_head stats_alloc_node;
Vivek Goyale43473b2010-09-15 17:06:35 -040098};
99
100struct throtl_data
101{
Vivek Goyale43473b2010-09-15 17:06:35 -0400102 /* service tree for active throtl groups */
Tejun Heoc9e03322013-05-14 13:52:32 -0700103 struct throtl_service_queue service_queue;
Vivek Goyale43473b2010-09-15 17:06:35 -0400104
Vivek Goyale43473b2010-09-15 17:06:35 -0400105 struct request_queue *queue;
106
107 /* Total Number of queued bios on READ and WRITE lists */
108 unsigned int nr_queued[2];
109
110 /*
Vivek Goyal02977e42010-10-01 14:49:48 +0200111 * number of total undestroyed groups
Vivek Goyale43473b2010-09-15 17:06:35 -0400112 */
113 unsigned int nr_undestroyed_grps;
114
115 /* Work for dispatching throttled bios */
Tejun Heocb761992013-05-14 13:52:31 -0700116 struct delayed_work dispatch_work;
Vivek Goyale43473b2010-09-15 17:06:35 -0400117};
118
Tejun Heo8a3d2612012-04-01 14:38:44 -0700119/* list and work item to allocate percpu group stats */
120static DEFINE_SPINLOCK(tg_stats_alloc_lock);
121static LIST_HEAD(tg_stats_alloc_list);
122
123static void tg_stats_alloc_fn(struct work_struct *);
124static DECLARE_DELAYED_WORK(tg_stats_alloc_work, tg_stats_alloc_fn);
125
Tejun Heof95a04a2012-04-16 13:57:26 -0700126static inline struct throtl_grp *pd_to_tg(struct blkg_policy_data *pd)
127{
128 return pd ? container_of(pd, struct throtl_grp, pd) : NULL;
129}
130
Tejun Heo3c798392012-04-16 13:57:25 -0700131static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg)
Tejun Heo03814112012-03-05 13:15:14 -0800132{
Tejun Heof95a04a2012-04-16 13:57:26 -0700133 return pd_to_tg(blkg_to_pd(blkg, &blkcg_policy_throtl));
Tejun Heo03814112012-03-05 13:15:14 -0800134}
135
Tejun Heo3c798392012-04-16 13:57:25 -0700136static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg)
Tejun Heo03814112012-03-05 13:15:14 -0800137{
Tejun Heof95a04a2012-04-16 13:57:26 -0700138 return pd_to_blkg(&tg->pd);
Tejun Heo03814112012-03-05 13:15:14 -0800139}
140
Tejun Heo03d8e112012-04-13 13:11:32 -0700141static inline struct throtl_grp *td_root_tg(struct throtl_data *td)
142{
143 return blkg_to_tg(td->queue->root_blkg);
144}
145
Tejun Heo0f3457f2013-05-14 13:52:32 -0700146#define throtl_log_tg(tg, fmt, args...) do { \
Tejun Heo54e7ed12012-04-16 13:57:23 -0700147 char __pbuf[128]; \
148 \
149 blkg_path(tg_to_blkg(tg), __pbuf, sizeof(__pbuf)); \
Tejun Heo0f3457f2013-05-14 13:52:32 -0700150 blk_add_trace_msg((tg)->td->queue, "throtl %s " fmt, __pbuf, ##args); \
Tejun Heo54e7ed12012-04-16 13:57:23 -0700151} while (0)
Vivek Goyale43473b2010-09-15 17:06:35 -0400152
153#define throtl_log(td, fmt, args...) \
154 blk_add_trace_msg((td)->queue, "throtl " fmt, ##args)
155
Tejun Heo8a3d2612012-04-01 14:38:44 -0700156/*
157 * Worker for allocating per cpu stat for tgs. This is scheduled on the
Tejun Heo3b07e9c2012-08-20 14:51:24 -0700158 * system_wq once there are some groups on the alloc_list waiting for
Tejun Heo8a3d2612012-04-01 14:38:44 -0700159 * allocation.
160 */
161static void tg_stats_alloc_fn(struct work_struct *work)
162{
163 static struct tg_stats_cpu *stats_cpu; /* this fn is non-reentrant */
164 struct delayed_work *dwork = to_delayed_work(work);
165 bool empty = false;
166
167alloc_stats:
168 if (!stats_cpu) {
169 stats_cpu = alloc_percpu(struct tg_stats_cpu);
170 if (!stats_cpu) {
171 /* allocation failed, try again after some time */
Tejun Heo3b07e9c2012-08-20 14:51:24 -0700172 schedule_delayed_work(dwork, msecs_to_jiffies(10));
Tejun Heo8a3d2612012-04-01 14:38:44 -0700173 return;
174 }
175 }
176
177 spin_lock_irq(&tg_stats_alloc_lock);
178
179 if (!list_empty(&tg_stats_alloc_list)) {
180 struct throtl_grp *tg = list_first_entry(&tg_stats_alloc_list,
181 struct throtl_grp,
182 stats_alloc_node);
183 swap(tg->stats_cpu, stats_cpu);
184 list_del_init(&tg->stats_alloc_node);
185 }
186
187 empty = list_empty(&tg_stats_alloc_list);
188 spin_unlock_irq(&tg_stats_alloc_lock);
189 if (!empty)
190 goto alloc_stats;
191}
192
Tejun Heo3c798392012-04-16 13:57:25 -0700193static void throtl_pd_init(struct blkcg_gq *blkg)
Vivek Goyala29a1712011-05-19 15:38:19 -0400194{
Tejun Heo03814112012-03-05 13:15:14 -0800195 struct throtl_grp *tg = blkg_to_tg(blkg);
Tejun Heoff26eaa2012-05-23 12:16:21 +0200196 unsigned long flags;
Tejun Heocd1604f2012-03-05 13:15:06 -0800197
Vivek Goyala29a1712011-05-19 15:38:19 -0400198 RB_CLEAR_NODE(&tg->rb_node);
Tejun Heo0f3457f2013-05-14 13:52:32 -0700199 tg->td = blkg->q->td;
Vivek Goyala29a1712011-05-19 15:38:19 -0400200 bio_list_init(&tg->bio_lists[0]);
201 bio_list_init(&tg->bio_lists[1]);
Vivek Goyala29a1712011-05-19 15:38:19 -0400202
Tejun Heoe56da7e2012-03-05 13:15:07 -0800203 tg->bps[READ] = -1;
204 tg->bps[WRITE] = -1;
205 tg->iops[READ] = -1;
206 tg->iops[WRITE] = -1;
Tejun Heo8a3d2612012-04-01 14:38:44 -0700207
208 /*
209 * Ugh... We need to perform per-cpu allocation for tg->stats_cpu
210 * but percpu allocator can't be called from IO path. Queue tg on
211 * tg_stats_alloc_list and allocate from work item.
212 */
Tejun Heoff26eaa2012-05-23 12:16:21 +0200213 spin_lock_irqsave(&tg_stats_alloc_lock, flags);
Tejun Heo8a3d2612012-04-01 14:38:44 -0700214 list_add(&tg->stats_alloc_node, &tg_stats_alloc_list);
Tejun Heo3b07e9c2012-08-20 14:51:24 -0700215 schedule_delayed_work(&tg_stats_alloc_work, 0);
Tejun Heoff26eaa2012-05-23 12:16:21 +0200216 spin_unlock_irqrestore(&tg_stats_alloc_lock, flags);
Tejun Heo8a3d2612012-04-01 14:38:44 -0700217}
218
Tejun Heo3c798392012-04-16 13:57:25 -0700219static void throtl_pd_exit(struct blkcg_gq *blkg)
Tejun Heo8a3d2612012-04-01 14:38:44 -0700220{
221 struct throtl_grp *tg = blkg_to_tg(blkg);
Tejun Heoff26eaa2012-05-23 12:16:21 +0200222 unsigned long flags;
Tejun Heo8a3d2612012-04-01 14:38:44 -0700223
Tejun Heoff26eaa2012-05-23 12:16:21 +0200224 spin_lock_irqsave(&tg_stats_alloc_lock, flags);
Tejun Heo8a3d2612012-04-01 14:38:44 -0700225 list_del_init(&tg->stats_alloc_node);
Tejun Heoff26eaa2012-05-23 12:16:21 +0200226 spin_unlock_irqrestore(&tg_stats_alloc_lock, flags);
Tejun Heo8a3d2612012-04-01 14:38:44 -0700227
228 free_percpu(tg->stats_cpu);
229}
230
Tejun Heo3c798392012-04-16 13:57:25 -0700231static void throtl_pd_reset_stats(struct blkcg_gq *blkg)
Tejun Heo8a3d2612012-04-01 14:38:44 -0700232{
233 struct throtl_grp *tg = blkg_to_tg(blkg);
234 int cpu;
235
236 if (tg->stats_cpu == NULL)
237 return;
238
239 for_each_possible_cpu(cpu) {
240 struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu);
241
242 blkg_rwstat_reset(&sc->service_bytes);
243 blkg_rwstat_reset(&sc->serviced);
244 }
Vivek Goyala29a1712011-05-19 15:38:19 -0400245}
246
Tejun Heo3c798392012-04-16 13:57:25 -0700247static struct throtl_grp *throtl_lookup_tg(struct throtl_data *td,
248 struct blkcg *blkcg)
Vivek Goyale43473b2010-09-15 17:06:35 -0400249{
Vivek Goyale43473b2010-09-15 17:06:35 -0400250 /*
Tejun Heo3c798392012-04-16 13:57:25 -0700251 * This is the common case when there are no blkcgs. Avoid lookup
252 * in this case
Tejun Heocd1604f2012-03-05 13:15:06 -0800253 */
Tejun Heo3c798392012-04-16 13:57:25 -0700254 if (blkcg == &blkcg_root)
Tejun Heo03d8e112012-04-13 13:11:32 -0700255 return td_root_tg(td);
Vivek Goyale43473b2010-09-15 17:06:35 -0400256
Tejun Heoe8989fa2012-03-05 13:15:20 -0800257 return blkg_to_tg(blkg_lookup(blkcg, td->queue));
Vivek Goyale43473b2010-09-15 17:06:35 -0400258}
259
Tejun Heocd1604f2012-03-05 13:15:06 -0800260static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td,
Tejun Heo3c798392012-04-16 13:57:25 -0700261 struct blkcg *blkcg)
Vivek Goyale43473b2010-09-15 17:06:35 -0400262{
Vivek Goyalf469a7b2011-05-19 15:38:23 -0400263 struct request_queue *q = td->queue;
Tejun Heocd1604f2012-03-05 13:15:06 -0800264 struct throtl_grp *tg = NULL;
Tejun Heo0a5a7d02012-03-05 13:15:02 -0800265
Vivek Goyalf469a7b2011-05-19 15:38:23 -0400266 /*
Tejun Heo3c798392012-04-16 13:57:25 -0700267 * This is the common case when there are no blkcgs. Avoid lookup
268 * in this case
Vivek Goyalf469a7b2011-05-19 15:38:23 -0400269 */
Tejun Heo3c798392012-04-16 13:57:25 -0700270 if (blkcg == &blkcg_root) {
Tejun Heo03d8e112012-04-13 13:11:32 -0700271 tg = td_root_tg(td);
Tejun Heocd1604f2012-03-05 13:15:06 -0800272 } else {
Tejun Heo3c798392012-04-16 13:57:25 -0700273 struct blkcg_gq *blkg;
Tejun Heocd1604f2012-03-05 13:15:06 -0800274
Tejun Heo3c96cb32012-04-13 13:11:34 -0700275 blkg = blkg_lookup_create(blkcg, q);
Tejun Heocd1604f2012-03-05 13:15:06 -0800276
277 /* if %NULL and @q is alive, fall back to root_tg */
278 if (!IS_ERR(blkg))
Tejun Heo03814112012-03-05 13:15:14 -0800279 tg = blkg_to_tg(blkg);
Bart Van Assche3f3299d2012-11-28 13:42:38 +0100280 else if (!blk_queue_dying(q))
Tejun Heo03d8e112012-04-13 13:11:32 -0700281 tg = td_root_tg(td);
Vivek Goyalf469a7b2011-05-19 15:38:23 -0400282 }
283
Vivek Goyale43473b2010-09-15 17:06:35 -0400284 return tg;
285}
286
Tejun Heoc9e03322013-05-14 13:52:32 -0700287static struct throtl_grp *throtl_rb_first(struct throtl_service_queue *sq)
Vivek Goyale43473b2010-09-15 17:06:35 -0400288{
289 /* Service tree is empty */
Tejun Heoc9e03322013-05-14 13:52:32 -0700290 if (!sq->nr_pending)
Vivek Goyale43473b2010-09-15 17:06:35 -0400291 return NULL;
292
Tejun Heoc9e03322013-05-14 13:52:32 -0700293 if (!sq->first_pending)
294 sq->first_pending = rb_first(&sq->pending_tree);
Vivek Goyale43473b2010-09-15 17:06:35 -0400295
Tejun Heoc9e03322013-05-14 13:52:32 -0700296 if (sq->first_pending)
297 return rb_entry_tg(sq->first_pending);
Vivek Goyale43473b2010-09-15 17:06:35 -0400298
299 return NULL;
300}
301
302static void rb_erase_init(struct rb_node *n, struct rb_root *root)
303{
304 rb_erase(n, root);
305 RB_CLEAR_NODE(n);
306}
307
Tejun Heoc9e03322013-05-14 13:52:32 -0700308static void throtl_rb_erase(struct rb_node *n, struct throtl_service_queue *sq)
Vivek Goyale43473b2010-09-15 17:06:35 -0400309{
Tejun Heoc9e03322013-05-14 13:52:32 -0700310 if (sq->first_pending == n)
311 sq->first_pending = NULL;
312 rb_erase_init(n, &sq->pending_tree);
313 --sq->nr_pending;
Vivek Goyale43473b2010-09-15 17:06:35 -0400314}
315
Tejun Heoc9e03322013-05-14 13:52:32 -0700316static void update_min_dispatch_time(struct throtl_service_queue *sq)
Vivek Goyale43473b2010-09-15 17:06:35 -0400317{
318 struct throtl_grp *tg;
319
Tejun Heoc9e03322013-05-14 13:52:32 -0700320 tg = throtl_rb_first(sq);
Vivek Goyale43473b2010-09-15 17:06:35 -0400321 if (!tg)
322 return;
323
Tejun Heoc9e03322013-05-14 13:52:32 -0700324 sq->first_pending_disptime = tg->disptime;
Vivek Goyale43473b2010-09-15 17:06:35 -0400325}
326
Tejun Heoc9e03322013-05-14 13:52:32 -0700327static void tg_service_queue_add(struct throtl_service_queue *sq,
328 struct throtl_grp *tg)
Vivek Goyale43473b2010-09-15 17:06:35 -0400329{
Tejun Heoc9e03322013-05-14 13:52:32 -0700330 struct rb_node **node = &sq->pending_tree.rb_node;
Vivek Goyale43473b2010-09-15 17:06:35 -0400331 struct rb_node *parent = NULL;
332 struct throtl_grp *__tg;
333 unsigned long key = tg->disptime;
334 int left = 1;
335
336 while (*node != NULL) {
337 parent = *node;
338 __tg = rb_entry_tg(parent);
339
340 if (time_before(key, __tg->disptime))
341 node = &parent->rb_left;
342 else {
343 node = &parent->rb_right;
344 left = 0;
345 }
346 }
347
348 if (left)
Tejun Heoc9e03322013-05-14 13:52:32 -0700349 sq->first_pending = &tg->rb_node;
Vivek Goyale43473b2010-09-15 17:06:35 -0400350
351 rb_link_node(&tg->rb_node, parent, node);
Tejun Heoc9e03322013-05-14 13:52:32 -0700352 rb_insert_color(&tg->rb_node, &sq->pending_tree);
Vivek Goyale43473b2010-09-15 17:06:35 -0400353}
354
Tejun Heoe2d57e62013-05-14 13:52:33 -0700355static void __throtl_enqueue_tg(struct throtl_service_queue *sq,
356 struct throtl_grp *tg)
Vivek Goyale43473b2010-09-15 17:06:35 -0400357{
Tejun Heoc9e03322013-05-14 13:52:32 -0700358 tg_service_queue_add(sq, tg);
Tejun Heo5b2c16a2013-05-14 13:52:32 -0700359 tg->flags |= THROTL_TG_PENDING;
Tejun Heoc9e03322013-05-14 13:52:32 -0700360 sq->nr_pending++;
Vivek Goyale43473b2010-09-15 17:06:35 -0400361}
362
Tejun Heoe2d57e62013-05-14 13:52:33 -0700363static void throtl_enqueue_tg(struct throtl_service_queue *sq,
364 struct throtl_grp *tg)
Vivek Goyale43473b2010-09-15 17:06:35 -0400365{
Tejun Heo5b2c16a2013-05-14 13:52:32 -0700366 if (!(tg->flags & THROTL_TG_PENDING))
Tejun Heoe2d57e62013-05-14 13:52:33 -0700367 __throtl_enqueue_tg(sq, tg);
Vivek Goyale43473b2010-09-15 17:06:35 -0400368}
369
Tejun Heoe2d57e62013-05-14 13:52:33 -0700370static void __throtl_dequeue_tg(struct throtl_service_queue *sq,
371 struct throtl_grp *tg)
Vivek Goyale43473b2010-09-15 17:06:35 -0400372{
Tejun Heoe2d57e62013-05-14 13:52:33 -0700373 throtl_rb_erase(&tg->rb_node, sq);
Tejun Heo5b2c16a2013-05-14 13:52:32 -0700374 tg->flags &= ~THROTL_TG_PENDING;
Vivek Goyale43473b2010-09-15 17:06:35 -0400375}
376
Tejun Heoe2d57e62013-05-14 13:52:33 -0700377static void throtl_dequeue_tg(struct throtl_service_queue *sq,
378 struct throtl_grp *tg)
Vivek Goyale43473b2010-09-15 17:06:35 -0400379{
Tejun Heo5b2c16a2013-05-14 13:52:32 -0700380 if (tg->flags & THROTL_TG_PENDING)
Tejun Heoe2d57e62013-05-14 13:52:33 -0700381 __throtl_dequeue_tg(sq, tg);
Vivek Goyale43473b2010-09-15 17:06:35 -0400382}
383
Tejun Heoa9131a22013-05-14 13:52:31 -0700384/* Call with queue lock held */
385static void throtl_schedule_delayed_work(struct throtl_data *td,
386 unsigned long delay)
387{
388 struct delayed_work *dwork = &td->dispatch_work;
389
Tejun Heo6a525602013-05-14 13:52:32 -0700390 mod_delayed_work(kthrotld_workqueue, dwork, delay);
391 throtl_log(td, "schedule work. delay=%lu jiffies=%lu", delay, jiffies);
Tejun Heoa9131a22013-05-14 13:52:31 -0700392}
393
Vivek Goyale43473b2010-09-15 17:06:35 -0400394static void throtl_schedule_next_dispatch(struct throtl_data *td)
395{
Tejun Heoc9e03322013-05-14 13:52:32 -0700396 struct throtl_service_queue *sq = &td->service_queue;
Vivek Goyale43473b2010-09-15 17:06:35 -0400397
Tejun Heo6a525602013-05-14 13:52:32 -0700398 /* any pending children left? */
Tejun Heoc9e03322013-05-14 13:52:32 -0700399 if (!sq->nr_pending)
Vivek Goyale43473b2010-09-15 17:06:35 -0400400 return;
401
Tejun Heoc9e03322013-05-14 13:52:32 -0700402 update_min_dispatch_time(sq);
Vivek Goyale43473b2010-09-15 17:06:35 -0400403
Tejun Heoc9e03322013-05-14 13:52:32 -0700404 if (time_before_eq(sq->first_pending_disptime, jiffies))
Vivek Goyal450adcb2011-03-01 13:40:54 -0500405 throtl_schedule_delayed_work(td, 0);
Vivek Goyale43473b2010-09-15 17:06:35 -0400406 else
Tejun Heoc9e03322013-05-14 13:52:32 -0700407 throtl_schedule_delayed_work(td, sq->first_pending_disptime - jiffies);
Vivek Goyale43473b2010-09-15 17:06:35 -0400408}
409
Tejun Heo0f3457f2013-05-14 13:52:32 -0700410static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw)
Vivek Goyale43473b2010-09-15 17:06:35 -0400411{
412 tg->bytes_disp[rw] = 0;
Vivek Goyal8e89d132010-09-15 17:06:37 -0400413 tg->io_disp[rw] = 0;
Vivek Goyale43473b2010-09-15 17:06:35 -0400414 tg->slice_start[rw] = jiffies;
415 tg->slice_end[rw] = jiffies + throtl_slice;
Tejun Heo0f3457f2013-05-14 13:52:32 -0700416 throtl_log_tg(tg, "[%c] new slice start=%lu end=%lu jiffies=%lu",
Vivek Goyale43473b2010-09-15 17:06:35 -0400417 rw == READ ? 'R' : 'W', tg->slice_start[rw],
418 tg->slice_end[rw], jiffies);
419}
420
Tejun Heo0f3457f2013-05-14 13:52:32 -0700421static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw,
422 unsigned long jiffy_end)
Vivek Goyald1ae8ff2010-12-01 19:34:46 +0100423{
424 tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
425}
426
Tejun Heo0f3457f2013-05-14 13:52:32 -0700427static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw,
428 unsigned long jiffy_end)
Vivek Goyale43473b2010-09-15 17:06:35 -0400429{
430 tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
Tejun Heo0f3457f2013-05-14 13:52:32 -0700431 throtl_log_tg(tg, "[%c] extend slice start=%lu end=%lu jiffies=%lu",
Vivek Goyale43473b2010-09-15 17:06:35 -0400432 rw == READ ? 'R' : 'W', tg->slice_start[rw],
433 tg->slice_end[rw], jiffies);
434}
435
436/* Determine if previously allocated or extended slice is complete or not */
Tejun Heo0f3457f2013-05-14 13:52:32 -0700437static bool throtl_slice_used(struct throtl_grp *tg, bool rw)
Vivek Goyale43473b2010-09-15 17:06:35 -0400438{
439 if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
440 return 0;
441
442 return 1;
443}
444
445/* Trim the used slices and adjust slice start accordingly */
Tejun Heo0f3457f2013-05-14 13:52:32 -0700446static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
Vivek Goyale43473b2010-09-15 17:06:35 -0400447{
Vivek Goyal3aad5d32010-10-01 14:51:14 +0200448 unsigned long nr_slices, time_elapsed, io_trim;
449 u64 bytes_trim, tmp;
Vivek Goyale43473b2010-09-15 17:06:35 -0400450
451 BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
452
453 /*
454 * If bps are unlimited (-1), then time slice don't get
455 * renewed. Don't try to trim the slice if slice is used. A new
456 * slice will start when appropriate.
457 */
Tejun Heo0f3457f2013-05-14 13:52:32 -0700458 if (throtl_slice_used(tg, rw))
Vivek Goyale43473b2010-09-15 17:06:35 -0400459 return;
460
Vivek Goyald1ae8ff2010-12-01 19:34:46 +0100461 /*
462 * A bio has been dispatched. Also adjust slice_end. It might happen
463 * that initially cgroup limit was very low resulting in high
464 * slice_end, but later limit was bumped up and bio was dispached
465 * sooner, then we need to reduce slice_end. A high bogus slice_end
466 * is bad because it does not allow new slice to start.
467 */
468
Tejun Heo0f3457f2013-05-14 13:52:32 -0700469 throtl_set_slice_end(tg, rw, jiffies + throtl_slice);
Vivek Goyald1ae8ff2010-12-01 19:34:46 +0100470
Vivek Goyale43473b2010-09-15 17:06:35 -0400471 time_elapsed = jiffies - tg->slice_start[rw];
472
473 nr_slices = time_elapsed / throtl_slice;
474
475 if (!nr_slices)
476 return;
Vivek Goyal3aad5d32010-10-01 14:51:14 +0200477 tmp = tg->bps[rw] * throtl_slice * nr_slices;
478 do_div(tmp, HZ);
479 bytes_trim = tmp;
Vivek Goyale43473b2010-09-15 17:06:35 -0400480
Vivek Goyal8e89d132010-09-15 17:06:37 -0400481 io_trim = (tg->iops[rw] * throtl_slice * nr_slices)/HZ;
Vivek Goyale43473b2010-09-15 17:06:35 -0400482
Vivek Goyal8e89d132010-09-15 17:06:37 -0400483 if (!bytes_trim && !io_trim)
Vivek Goyale43473b2010-09-15 17:06:35 -0400484 return;
485
486 if (tg->bytes_disp[rw] >= bytes_trim)
487 tg->bytes_disp[rw] -= bytes_trim;
488 else
489 tg->bytes_disp[rw] = 0;
490
Vivek Goyal8e89d132010-09-15 17:06:37 -0400491 if (tg->io_disp[rw] >= io_trim)
492 tg->io_disp[rw] -= io_trim;
493 else
494 tg->io_disp[rw] = 0;
495
Vivek Goyale43473b2010-09-15 17:06:35 -0400496 tg->slice_start[rw] += nr_slices * throtl_slice;
497
Tejun Heo0f3457f2013-05-14 13:52:32 -0700498 throtl_log_tg(tg, "[%c] trim slice nr=%lu bytes=%llu io=%lu"
Vivek Goyale43473b2010-09-15 17:06:35 -0400499 " start=%lu end=%lu jiffies=%lu",
Vivek Goyal8e89d132010-09-15 17:06:37 -0400500 rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
Vivek Goyale43473b2010-09-15 17:06:35 -0400501 tg->slice_start[rw], tg->slice_end[rw], jiffies);
502}
503
Tejun Heo0f3457f2013-05-14 13:52:32 -0700504static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio,
505 unsigned long *wait)
Vivek Goyale43473b2010-09-15 17:06:35 -0400506{
507 bool rw = bio_data_dir(bio);
Vivek Goyal8e89d132010-09-15 17:06:37 -0400508 unsigned int io_allowed;
Vivek Goyale43473b2010-09-15 17:06:35 -0400509 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
Vivek Goyalc49c06e2010-10-01 21:16:42 +0200510 u64 tmp;
Vivek Goyale43473b2010-09-15 17:06:35 -0400511
Vivek Goyal8e89d132010-09-15 17:06:37 -0400512 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
Vivek Goyale43473b2010-09-15 17:06:35 -0400513
Vivek Goyal8e89d132010-09-15 17:06:37 -0400514 /* Slice has just started. Consider one slice interval */
515 if (!jiffy_elapsed)
516 jiffy_elapsed_rnd = throtl_slice;
517
518 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
519
Vivek Goyalc49c06e2010-10-01 21:16:42 +0200520 /*
521 * jiffy_elapsed_rnd should not be a big value as minimum iops can be
522 * 1 then at max jiffy elapsed should be equivalent of 1 second as we
523 * will allow dispatch after 1 second and after that slice should
524 * have been trimmed.
525 */
526
527 tmp = (u64)tg->iops[rw] * jiffy_elapsed_rnd;
528 do_div(tmp, HZ);
529
530 if (tmp > UINT_MAX)
531 io_allowed = UINT_MAX;
532 else
533 io_allowed = tmp;
Vivek Goyal8e89d132010-09-15 17:06:37 -0400534
535 if (tg->io_disp[rw] + 1 <= io_allowed) {
Vivek Goyale43473b2010-09-15 17:06:35 -0400536 if (wait)
537 *wait = 0;
538 return 1;
539 }
540
Vivek Goyal8e89d132010-09-15 17:06:37 -0400541 /* Calc approx time to dispatch */
542 jiffy_wait = ((tg->io_disp[rw] + 1) * HZ)/tg->iops[rw] + 1;
543
544 if (jiffy_wait > jiffy_elapsed)
545 jiffy_wait = jiffy_wait - jiffy_elapsed;
546 else
547 jiffy_wait = 1;
548
549 if (wait)
550 *wait = jiffy_wait;
551 return 0;
552}
553
Tejun Heo0f3457f2013-05-14 13:52:32 -0700554static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
555 unsigned long *wait)
Vivek Goyal8e89d132010-09-15 17:06:37 -0400556{
557 bool rw = bio_data_dir(bio);
Vivek Goyal3aad5d32010-10-01 14:51:14 +0200558 u64 bytes_allowed, extra_bytes, tmp;
Vivek Goyal8e89d132010-09-15 17:06:37 -0400559 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
Vivek Goyale43473b2010-09-15 17:06:35 -0400560
561 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
562
563 /* Slice has just started. Consider one slice interval */
564 if (!jiffy_elapsed)
565 jiffy_elapsed_rnd = throtl_slice;
566
567 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
568
Vivek Goyal5e901a22010-10-01 21:16:38 +0200569 tmp = tg->bps[rw] * jiffy_elapsed_rnd;
570 do_div(tmp, HZ);
Vivek Goyal3aad5d32010-10-01 14:51:14 +0200571 bytes_allowed = tmp;
Vivek Goyale43473b2010-09-15 17:06:35 -0400572
573 if (tg->bytes_disp[rw] + bio->bi_size <= bytes_allowed) {
574 if (wait)
575 *wait = 0;
576 return 1;
577 }
578
579 /* Calc approx time to dispatch */
580 extra_bytes = tg->bytes_disp[rw] + bio->bi_size - bytes_allowed;
581 jiffy_wait = div64_u64(extra_bytes * HZ, tg->bps[rw]);
582
583 if (!jiffy_wait)
584 jiffy_wait = 1;
585
586 /*
587 * This wait time is without taking into consideration the rounding
588 * up we did. Add that time also.
589 */
590 jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
Vivek Goyale43473b2010-09-15 17:06:35 -0400591 if (wait)
592 *wait = jiffy_wait;
Vivek Goyal8e89d132010-09-15 17:06:37 -0400593 return 0;
594}
Vivek Goyale43473b2010-09-15 17:06:35 -0400595
Vivek Goyalaf75cd32011-05-19 15:38:31 -0400596static bool tg_no_rule_group(struct throtl_grp *tg, bool rw) {
597 if (tg->bps[rw] == -1 && tg->iops[rw] == -1)
598 return 1;
599 return 0;
600}
601
Vivek Goyal8e89d132010-09-15 17:06:37 -0400602/*
603 * Returns whether one can dispatch a bio or not. Also returns approx number
604 * of jiffies to wait before this bio is with-in IO rate and can be dispatched
605 */
Tejun Heo0f3457f2013-05-14 13:52:32 -0700606static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
607 unsigned long *wait)
Vivek Goyal8e89d132010-09-15 17:06:37 -0400608{
609 bool rw = bio_data_dir(bio);
610 unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
611
612 /*
613 * Currently whole state machine of group depends on first bio
614 * queued in the group bio list. So one should not be calling
615 * this function with a different bio if there are other bios
616 * queued.
617 */
618 BUG_ON(tg->nr_queued[rw] && bio != bio_list_peek(&tg->bio_lists[rw]));
619
620 /* If tg->bps = -1, then BW is unlimited */
621 if (tg->bps[rw] == -1 && tg->iops[rw] == -1) {
622 if (wait)
623 *wait = 0;
624 return 1;
625 }
626
627 /*
628 * If previous slice expired, start a new one otherwise renew/extend
629 * existing slice to make sure it is at least throtl_slice interval
630 * long since now.
631 */
Tejun Heo0f3457f2013-05-14 13:52:32 -0700632 if (throtl_slice_used(tg, rw))
633 throtl_start_new_slice(tg, rw);
Vivek Goyal8e89d132010-09-15 17:06:37 -0400634 else {
635 if (time_before(tg->slice_end[rw], jiffies + throtl_slice))
Tejun Heo0f3457f2013-05-14 13:52:32 -0700636 throtl_extend_slice(tg, rw, jiffies + throtl_slice);
Vivek Goyal8e89d132010-09-15 17:06:37 -0400637 }
638
Tejun Heo0f3457f2013-05-14 13:52:32 -0700639 if (tg_with_in_bps_limit(tg, bio, &bps_wait) &&
640 tg_with_in_iops_limit(tg, bio, &iops_wait)) {
Vivek Goyal8e89d132010-09-15 17:06:37 -0400641 if (wait)
642 *wait = 0;
643 return 1;
644 }
645
646 max_wait = max(bps_wait, iops_wait);
647
648 if (wait)
649 *wait = max_wait;
650
651 if (time_before(tg->slice_end[rw], jiffies + max_wait))
Tejun Heo0f3457f2013-05-14 13:52:32 -0700652 throtl_extend_slice(tg, rw, jiffies + max_wait);
Vivek Goyale43473b2010-09-15 17:06:35 -0400653
654 return 0;
655}
656
Tejun Heo3c798392012-04-16 13:57:25 -0700657static void throtl_update_dispatch_stats(struct blkcg_gq *blkg, u64 bytes,
Tejun Heo629ed0b2012-04-01 14:38:44 -0700658 int rw)
659{
Tejun Heo8a3d2612012-04-01 14:38:44 -0700660 struct throtl_grp *tg = blkg_to_tg(blkg);
661 struct tg_stats_cpu *stats_cpu;
Tejun Heo629ed0b2012-04-01 14:38:44 -0700662 unsigned long flags;
663
664 /* If per cpu stats are not allocated yet, don't do any accounting. */
Tejun Heo8a3d2612012-04-01 14:38:44 -0700665 if (tg->stats_cpu == NULL)
Tejun Heo629ed0b2012-04-01 14:38:44 -0700666 return;
667
668 /*
669 * Disabling interrupts to provide mutual exclusion between two
670 * writes on same cpu. It probably is not needed for 64bit. Not
671 * optimizing that case yet.
672 */
673 local_irq_save(flags);
674
Tejun Heo8a3d2612012-04-01 14:38:44 -0700675 stats_cpu = this_cpu_ptr(tg->stats_cpu);
Tejun Heo629ed0b2012-04-01 14:38:44 -0700676
Tejun Heo629ed0b2012-04-01 14:38:44 -0700677 blkg_rwstat_add(&stats_cpu->serviced, rw, 1);
678 blkg_rwstat_add(&stats_cpu->service_bytes, rw, bytes);
679
680 local_irq_restore(flags);
681}
682
Vivek Goyale43473b2010-09-15 17:06:35 -0400683static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
684{
685 bool rw = bio_data_dir(bio);
Vivek Goyale43473b2010-09-15 17:06:35 -0400686
687 /* Charge the bio to the group */
688 tg->bytes_disp[rw] += bio->bi_size;
Vivek Goyal8e89d132010-09-15 17:06:37 -0400689 tg->io_disp[rw]++;
Vivek Goyale43473b2010-09-15 17:06:35 -0400690
Tejun Heo629ed0b2012-04-01 14:38:44 -0700691 throtl_update_dispatch_stats(tg_to_blkg(tg), bio->bi_size, bio->bi_rw);
Vivek Goyale43473b2010-09-15 17:06:35 -0400692}
693
Tejun Heoe2d57e62013-05-14 13:52:33 -0700694static void throtl_add_bio_tg(struct throtl_service_queue *sq,
695 struct throtl_grp *tg, struct bio *bio)
Vivek Goyale43473b2010-09-15 17:06:35 -0400696{
697 bool rw = bio_data_dir(bio);
698
699 bio_list_add(&tg->bio_lists[rw], bio);
700 /* Take a bio reference on tg */
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800701 blkg_get(tg_to_blkg(tg));
Vivek Goyale43473b2010-09-15 17:06:35 -0400702 tg->nr_queued[rw]++;
Tejun Heoe2d57e62013-05-14 13:52:33 -0700703 tg->td->nr_queued[rw]++;
704 throtl_enqueue_tg(sq, tg);
Vivek Goyale43473b2010-09-15 17:06:35 -0400705}
706
Tejun Heoe2d57e62013-05-14 13:52:33 -0700707static void tg_update_disptime(struct throtl_service_queue *sq,
708 struct throtl_grp *tg)
Vivek Goyale43473b2010-09-15 17:06:35 -0400709{
710 unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
711 struct bio *bio;
712
713 if ((bio = bio_list_peek(&tg->bio_lists[READ])))
Tejun Heo0f3457f2013-05-14 13:52:32 -0700714 tg_may_dispatch(tg, bio, &read_wait);
Vivek Goyale43473b2010-09-15 17:06:35 -0400715
716 if ((bio = bio_list_peek(&tg->bio_lists[WRITE])))
Tejun Heo0f3457f2013-05-14 13:52:32 -0700717 tg_may_dispatch(tg, bio, &write_wait);
Vivek Goyale43473b2010-09-15 17:06:35 -0400718
719 min_wait = min(read_wait, write_wait);
720 disptime = jiffies + min_wait;
721
Vivek Goyale43473b2010-09-15 17:06:35 -0400722 /* Update dispatch time */
Tejun Heoe2d57e62013-05-14 13:52:33 -0700723 throtl_dequeue_tg(sq, tg);
Vivek Goyale43473b2010-09-15 17:06:35 -0400724 tg->disptime = disptime;
Tejun Heoe2d57e62013-05-14 13:52:33 -0700725 throtl_enqueue_tg(sq, tg);
Vivek Goyale43473b2010-09-15 17:06:35 -0400726}
727
Tejun Heo0f3457f2013-05-14 13:52:32 -0700728static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw,
729 struct bio_list *bl)
Vivek Goyale43473b2010-09-15 17:06:35 -0400730{
731 struct bio *bio;
732
733 bio = bio_list_pop(&tg->bio_lists[rw]);
734 tg->nr_queued[rw]--;
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800735 /* Drop bio reference on blkg */
736 blkg_put(tg_to_blkg(tg));
Vivek Goyale43473b2010-09-15 17:06:35 -0400737
Tejun Heo0f3457f2013-05-14 13:52:32 -0700738 BUG_ON(tg->td->nr_queued[rw] <= 0);
739 tg->td->nr_queued[rw]--;
Vivek Goyale43473b2010-09-15 17:06:35 -0400740
741 throtl_charge_bio(tg, bio);
742 bio_list_add(bl, bio);
743 bio->bi_rw |= REQ_THROTTLED;
744
Tejun Heo0f3457f2013-05-14 13:52:32 -0700745 throtl_trim_slice(tg, rw);
Vivek Goyale43473b2010-09-15 17:06:35 -0400746}
747
Tejun Heo0f3457f2013-05-14 13:52:32 -0700748static int throtl_dispatch_tg(struct throtl_grp *tg, struct bio_list *bl)
Vivek Goyale43473b2010-09-15 17:06:35 -0400749{
750 unsigned int nr_reads = 0, nr_writes = 0;
751 unsigned int max_nr_reads = throtl_grp_quantum*3/4;
Vivek Goyalc2f68052010-11-15 19:32:42 +0100752 unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads;
Vivek Goyale43473b2010-09-15 17:06:35 -0400753 struct bio *bio;
754
755 /* Try to dispatch 75% READS and 25% WRITES */
756
Tejun Heo0f3457f2013-05-14 13:52:32 -0700757 while ((bio = bio_list_peek(&tg->bio_lists[READ])) &&
758 tg_may_dispatch(tg, bio, NULL)) {
Vivek Goyale43473b2010-09-15 17:06:35 -0400759
Tejun Heo0f3457f2013-05-14 13:52:32 -0700760 tg_dispatch_one_bio(tg, bio_data_dir(bio), bl);
Vivek Goyale43473b2010-09-15 17:06:35 -0400761 nr_reads++;
762
763 if (nr_reads >= max_nr_reads)
764 break;
765 }
766
Tejun Heo0f3457f2013-05-14 13:52:32 -0700767 while ((bio = bio_list_peek(&tg->bio_lists[WRITE])) &&
768 tg_may_dispatch(tg, bio, NULL)) {
Vivek Goyale43473b2010-09-15 17:06:35 -0400769
Tejun Heo0f3457f2013-05-14 13:52:32 -0700770 tg_dispatch_one_bio(tg, bio_data_dir(bio), bl);
Vivek Goyale43473b2010-09-15 17:06:35 -0400771 nr_writes++;
772
773 if (nr_writes >= max_nr_writes)
774 break;
775 }
776
777 return nr_reads + nr_writes;
778}
779
Tejun Heoe2d57e62013-05-14 13:52:33 -0700780static int throtl_select_dispatch(struct throtl_service_queue *sq,
781 struct bio_list *bl)
Vivek Goyale43473b2010-09-15 17:06:35 -0400782{
783 unsigned int nr_disp = 0;
784 struct throtl_grp *tg;
Vivek Goyale43473b2010-09-15 17:06:35 -0400785
786 while (1) {
Tejun Heoc9e03322013-05-14 13:52:32 -0700787 tg = throtl_rb_first(sq);
Vivek Goyale43473b2010-09-15 17:06:35 -0400788
789 if (!tg)
790 break;
791
792 if (time_before(jiffies, tg->disptime))
793 break;
794
Tejun Heoe2d57e62013-05-14 13:52:33 -0700795 throtl_dequeue_tg(sq, tg);
Vivek Goyale43473b2010-09-15 17:06:35 -0400796
Tejun Heo0f3457f2013-05-14 13:52:32 -0700797 nr_disp += throtl_dispatch_tg(tg, bl);
Vivek Goyale43473b2010-09-15 17:06:35 -0400798
Tejun Heo2db63142013-05-14 13:52:31 -0700799 if (tg->nr_queued[0] || tg->nr_queued[1])
Tejun Heoe2d57e62013-05-14 13:52:33 -0700800 tg_update_disptime(sq, tg);
Vivek Goyale43473b2010-09-15 17:06:35 -0400801
802 if (nr_disp >= throtl_quantum)
803 break;
804 }
805
806 return nr_disp;
807}
808
Tejun Heocb761992013-05-14 13:52:31 -0700809/* work function to dispatch throttled bios */
810void blk_throtl_dispatch_work_fn(struct work_struct *work)
Vivek Goyale43473b2010-09-15 17:06:35 -0400811{
Tejun Heocb761992013-05-14 13:52:31 -0700812 struct throtl_data *td = container_of(to_delayed_work(work),
813 struct throtl_data, dispatch_work);
814 struct request_queue *q = td->queue;
Vivek Goyale43473b2010-09-15 17:06:35 -0400815 unsigned int nr_disp = 0;
816 struct bio_list bio_list_on_stack;
817 struct bio *bio;
Vivek Goyal69d60eb2011-03-09 08:27:37 +0100818 struct blk_plug plug;
Vivek Goyale43473b2010-09-15 17:06:35 -0400819
820 spin_lock_irq(q->queue_lock);
821
Vivek Goyale43473b2010-09-15 17:06:35 -0400822 bio_list_init(&bio_list_on_stack);
823
Joe Perchesd2f31a52011-06-13 20:19:27 +0200824 throtl_log(td, "dispatch nr_queued=%u read=%u write=%u",
Tejun Heo6a525602013-05-14 13:52:32 -0700825 td->nr_queued[READ] + td->nr_queued[WRITE],
826 td->nr_queued[READ], td->nr_queued[WRITE]);
Vivek Goyale43473b2010-09-15 17:06:35 -0400827
Tejun Heoe2d57e62013-05-14 13:52:33 -0700828 nr_disp = throtl_select_dispatch(&td->service_queue, &bio_list_on_stack);
Vivek Goyale43473b2010-09-15 17:06:35 -0400829
830 if (nr_disp)
831 throtl_log(td, "bios disp=%u", nr_disp);
832
833 throtl_schedule_next_dispatch(td);
Tejun Heo6a525602013-05-14 13:52:32 -0700834
Vivek Goyale43473b2010-09-15 17:06:35 -0400835 spin_unlock_irq(q->queue_lock);
836
837 /*
838 * If we dispatched some requests, unplug the queue to make sure
839 * immediate dispatch
840 */
841 if (nr_disp) {
Vivek Goyal69d60eb2011-03-09 08:27:37 +0100842 blk_start_plug(&plug);
Vivek Goyale43473b2010-09-15 17:06:35 -0400843 while((bio = bio_list_pop(&bio_list_on_stack)))
844 generic_make_request(bio);
Vivek Goyal69d60eb2011-03-09 08:27:37 +0100845 blk_finish_plug(&plug);
Vivek Goyale43473b2010-09-15 17:06:35 -0400846 }
Vivek Goyale43473b2010-09-15 17:06:35 -0400847}
848
Tejun Heof95a04a2012-04-16 13:57:26 -0700849static u64 tg_prfill_cpu_rwstat(struct seq_file *sf,
850 struct blkg_policy_data *pd, int off)
Tejun Heo41b38b62012-04-01 14:38:44 -0700851{
Tejun Heof95a04a2012-04-16 13:57:26 -0700852 struct throtl_grp *tg = pd_to_tg(pd);
Tejun Heo41b38b62012-04-01 14:38:44 -0700853 struct blkg_rwstat rwstat = { }, tmp;
854 int i, cpu;
855
856 for_each_possible_cpu(cpu) {
Tejun Heo8a3d2612012-04-01 14:38:44 -0700857 struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu);
Tejun Heo41b38b62012-04-01 14:38:44 -0700858
859 tmp = blkg_rwstat_read((void *)sc + off);
860 for (i = 0; i < BLKG_RWSTAT_NR; i++)
861 rwstat.cnt[i] += tmp.cnt[i];
862 }
863
Tejun Heof95a04a2012-04-16 13:57:26 -0700864 return __blkg_prfill_rwstat(sf, pd, &rwstat);
Tejun Heo41b38b62012-04-01 14:38:44 -0700865}
866
Tejun Heo8a3d2612012-04-01 14:38:44 -0700867static int tg_print_cpu_rwstat(struct cgroup *cgrp, struct cftype *cft,
868 struct seq_file *sf)
Tejun Heo41b38b62012-04-01 14:38:44 -0700869{
Tejun Heo3c798392012-04-16 13:57:25 -0700870 struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
Tejun Heo41b38b62012-04-01 14:38:44 -0700871
Tejun Heo3c798392012-04-16 13:57:25 -0700872 blkcg_print_blkgs(sf, blkcg, tg_prfill_cpu_rwstat, &blkcg_policy_throtl,
Tejun Heo5bc4afb12012-04-01 14:38:45 -0700873 cft->private, true);
Tejun Heo41b38b62012-04-01 14:38:44 -0700874 return 0;
875}
876
Tejun Heof95a04a2012-04-16 13:57:26 -0700877static u64 tg_prfill_conf_u64(struct seq_file *sf, struct blkg_policy_data *pd,
878 int off)
Tejun Heo60c2bc22012-04-01 14:38:43 -0700879{
Tejun Heof95a04a2012-04-16 13:57:26 -0700880 struct throtl_grp *tg = pd_to_tg(pd);
881 u64 v = *(u64 *)((void *)tg + off);
Tejun Heo60c2bc22012-04-01 14:38:43 -0700882
Tejun Heoaf133ce2012-04-01 14:38:44 -0700883 if (v == -1)
Tejun Heo60c2bc22012-04-01 14:38:43 -0700884 return 0;
Tejun Heof95a04a2012-04-16 13:57:26 -0700885 return __blkg_prfill_u64(sf, pd, v);
Tejun Heo60c2bc22012-04-01 14:38:43 -0700886}
887
Tejun Heof95a04a2012-04-16 13:57:26 -0700888static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd,
889 int off)
Tejun Heo60c2bc22012-04-01 14:38:43 -0700890{
Tejun Heof95a04a2012-04-16 13:57:26 -0700891 struct throtl_grp *tg = pd_to_tg(pd);
892 unsigned int v = *(unsigned int *)((void *)tg + off);
Tejun Heoaf133ce2012-04-01 14:38:44 -0700893
894 if (v == -1)
895 return 0;
Tejun Heof95a04a2012-04-16 13:57:26 -0700896 return __blkg_prfill_u64(sf, pd, v);
Tejun Heoaf133ce2012-04-01 14:38:44 -0700897}
898
899static int tg_print_conf_u64(struct cgroup *cgrp, struct cftype *cft,
900 struct seq_file *sf)
901{
Tejun Heo3c798392012-04-16 13:57:25 -0700902 blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp), tg_prfill_conf_u64,
903 &blkcg_policy_throtl, cft->private, false);
Tejun Heo60c2bc22012-04-01 14:38:43 -0700904 return 0;
905}
906
Tejun Heoaf133ce2012-04-01 14:38:44 -0700907static int tg_print_conf_uint(struct cgroup *cgrp, struct cftype *cft,
908 struct seq_file *sf)
Vivek Goyale43473b2010-09-15 17:06:35 -0400909{
Tejun Heo3c798392012-04-16 13:57:25 -0700910 blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp), tg_prfill_conf_uint,
911 &blkcg_policy_throtl, cft->private, false);
Tejun Heoaf133ce2012-04-01 14:38:44 -0700912 return 0;
Vivek Goyale43473b2010-09-15 17:06:35 -0400913}
914
Tejun Heoaf133ce2012-04-01 14:38:44 -0700915static int tg_set_conf(struct cgroup *cgrp, struct cftype *cft, const char *buf,
916 bool is_u64)
Tejun Heo60c2bc22012-04-01 14:38:43 -0700917{
Tejun Heo3c798392012-04-16 13:57:25 -0700918 struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
Tejun Heo60c2bc22012-04-01 14:38:43 -0700919 struct blkg_conf_ctx ctx;
Tejun Heoaf133ce2012-04-01 14:38:44 -0700920 struct throtl_grp *tg;
Tejun Heoa2b16932012-04-13 13:11:33 -0700921 struct throtl_data *td;
Tejun Heo60c2bc22012-04-01 14:38:43 -0700922 int ret;
923
Tejun Heo3c798392012-04-16 13:57:25 -0700924 ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
Tejun Heo60c2bc22012-04-01 14:38:43 -0700925 if (ret)
926 return ret;
927
Tejun Heoaf133ce2012-04-01 14:38:44 -0700928 tg = blkg_to_tg(ctx.blkg);
Tejun Heoa2b16932012-04-13 13:11:33 -0700929 td = ctx.blkg->q->td;
Tejun Heoaf133ce2012-04-01 14:38:44 -0700930
Tejun Heoa2b16932012-04-13 13:11:33 -0700931 if (!ctx.v)
932 ctx.v = -1;
Tejun Heoaf133ce2012-04-01 14:38:44 -0700933
Tejun Heoa2b16932012-04-13 13:11:33 -0700934 if (is_u64)
935 *(u64 *)((void *)tg + cft->private) = ctx.v;
936 else
937 *(unsigned int *)((void *)tg + cft->private) = ctx.v;
Tejun Heoaf133ce2012-04-01 14:38:44 -0700938
Tejun Heo0f3457f2013-05-14 13:52:32 -0700939 throtl_log_tg(tg, "limit change rbps=%llu wbps=%llu riops=%u wiops=%u",
Tejun Heo632b4492013-05-14 13:52:31 -0700940 tg->bps[READ], tg->bps[WRITE],
941 tg->iops[READ], tg->iops[WRITE]);
942
943 /*
944 * We're already holding queue_lock and know @tg is valid. Let's
945 * apply the new config directly.
946 *
947 * Restart the slices for both READ and WRITES. It might happen
948 * that a group's limit are dropped suddenly and we don't want to
949 * account recently dispatched IO with new low rate.
950 */
Tejun Heo0f3457f2013-05-14 13:52:32 -0700951 throtl_start_new_slice(tg, 0);
952 throtl_start_new_slice(tg, 1);
Tejun Heo632b4492013-05-14 13:52:31 -0700953
Tejun Heo5b2c16a2013-05-14 13:52:32 -0700954 if (tg->flags & THROTL_TG_PENDING) {
Tejun Heoe2d57e62013-05-14 13:52:33 -0700955 tg_update_disptime(&td->service_queue, tg);
Tejun Heo632b4492013-05-14 13:52:31 -0700956 throtl_schedule_next_dispatch(td);
957 }
Tejun Heo60c2bc22012-04-01 14:38:43 -0700958
959 blkg_conf_finish(&ctx);
Tejun Heoa2b16932012-04-13 13:11:33 -0700960 return 0;
Tejun Heo60c2bc22012-04-01 14:38:43 -0700961}
962
Tejun Heoaf133ce2012-04-01 14:38:44 -0700963static int tg_set_conf_u64(struct cgroup *cgrp, struct cftype *cft,
964 const char *buf)
Tejun Heo60c2bc22012-04-01 14:38:43 -0700965{
Tejun Heoaf133ce2012-04-01 14:38:44 -0700966 return tg_set_conf(cgrp, cft, buf, true);
Tejun Heo60c2bc22012-04-01 14:38:43 -0700967}
968
Tejun Heoaf133ce2012-04-01 14:38:44 -0700969static int tg_set_conf_uint(struct cgroup *cgrp, struct cftype *cft,
970 const char *buf)
Tejun Heo60c2bc22012-04-01 14:38:43 -0700971{
Tejun Heoaf133ce2012-04-01 14:38:44 -0700972 return tg_set_conf(cgrp, cft, buf, false);
Tejun Heo60c2bc22012-04-01 14:38:43 -0700973}
974
975static struct cftype throtl_files[] = {
976 {
977 .name = "throttle.read_bps_device",
Tejun Heoaf133ce2012-04-01 14:38:44 -0700978 .private = offsetof(struct throtl_grp, bps[READ]),
979 .read_seq_string = tg_print_conf_u64,
980 .write_string = tg_set_conf_u64,
Tejun Heo60c2bc22012-04-01 14:38:43 -0700981 .max_write_len = 256,
982 },
983 {
984 .name = "throttle.write_bps_device",
Tejun Heoaf133ce2012-04-01 14:38:44 -0700985 .private = offsetof(struct throtl_grp, bps[WRITE]),
986 .read_seq_string = tg_print_conf_u64,
987 .write_string = tg_set_conf_u64,
Tejun Heo60c2bc22012-04-01 14:38:43 -0700988 .max_write_len = 256,
989 },
990 {
991 .name = "throttle.read_iops_device",
Tejun Heoaf133ce2012-04-01 14:38:44 -0700992 .private = offsetof(struct throtl_grp, iops[READ]),
993 .read_seq_string = tg_print_conf_uint,
994 .write_string = tg_set_conf_uint,
Tejun Heo60c2bc22012-04-01 14:38:43 -0700995 .max_write_len = 256,
996 },
997 {
998 .name = "throttle.write_iops_device",
Tejun Heoaf133ce2012-04-01 14:38:44 -0700999 .private = offsetof(struct throtl_grp, iops[WRITE]),
1000 .read_seq_string = tg_print_conf_uint,
1001 .write_string = tg_set_conf_uint,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001002 .max_write_len = 256,
1003 },
1004 {
1005 .name = "throttle.io_service_bytes",
Tejun Heo5bc4afb12012-04-01 14:38:45 -07001006 .private = offsetof(struct tg_stats_cpu, service_bytes),
Tejun Heo8a3d2612012-04-01 14:38:44 -07001007 .read_seq_string = tg_print_cpu_rwstat,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001008 },
1009 {
1010 .name = "throttle.io_serviced",
Tejun Heo5bc4afb12012-04-01 14:38:45 -07001011 .private = offsetof(struct tg_stats_cpu, serviced),
Tejun Heo8a3d2612012-04-01 14:38:44 -07001012 .read_seq_string = tg_print_cpu_rwstat,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001013 },
1014 { } /* terminate */
1015};
1016
Vivek Goyalda527772011-03-02 19:05:33 -05001017static void throtl_shutdown_wq(struct request_queue *q)
Vivek Goyale43473b2010-09-15 17:06:35 -04001018{
1019 struct throtl_data *td = q->td;
1020
Tejun Heocb761992013-05-14 13:52:31 -07001021 cancel_delayed_work_sync(&td->dispatch_work);
Vivek Goyale43473b2010-09-15 17:06:35 -04001022}
1023
Tejun Heo3c798392012-04-16 13:57:25 -07001024static struct blkcg_policy blkcg_policy_throtl = {
Tejun Heof9fcc2d2012-04-16 13:57:27 -07001025 .pd_size = sizeof(struct throtl_grp),
1026 .cftypes = throtl_files,
1027
1028 .pd_init_fn = throtl_pd_init,
1029 .pd_exit_fn = throtl_pd_exit,
1030 .pd_reset_stats_fn = throtl_pd_reset_stats,
Vivek Goyale43473b2010-09-15 17:06:35 -04001031};
1032
Tejun Heobc16a4f2011-10-19 14:33:01 +02001033bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
Vivek Goyale43473b2010-09-15 17:06:35 -04001034{
1035 struct throtl_data *td = q->td;
1036 struct throtl_grp *tg;
Vivek Goyale43473b2010-09-15 17:06:35 -04001037 bool rw = bio_data_dir(bio), update_disptime = true;
Tejun Heo3c798392012-04-16 13:57:25 -07001038 struct blkcg *blkcg;
Tejun Heobc16a4f2011-10-19 14:33:01 +02001039 bool throttled = false;
Vivek Goyale43473b2010-09-15 17:06:35 -04001040
1041 if (bio->bi_rw & REQ_THROTTLED) {
1042 bio->bi_rw &= ~REQ_THROTTLED;
Tejun Heobc16a4f2011-10-19 14:33:01 +02001043 goto out;
Vivek Goyale43473b2010-09-15 17:06:35 -04001044 }
1045
Vivek Goyalaf75cd32011-05-19 15:38:31 -04001046 /*
1047 * A throtl_grp pointer retrieved under rcu can be used to access
1048 * basic fields like stats and io rates. If a group has no rules,
1049 * just update the dispatch stats in lockless manner and return.
1050 */
Vivek Goyalaf75cd32011-05-19 15:38:31 -04001051 rcu_read_lock();
Tejun Heo3c798392012-04-16 13:57:25 -07001052 blkcg = bio_blkcg(bio);
Tejun Heocd1604f2012-03-05 13:15:06 -08001053 tg = throtl_lookup_tg(td, blkcg);
Vivek Goyalaf75cd32011-05-19 15:38:31 -04001054 if (tg) {
Vivek Goyalaf75cd32011-05-19 15:38:31 -04001055 if (tg_no_rule_group(tg, rw)) {
Tejun Heo629ed0b2012-04-01 14:38:44 -07001056 throtl_update_dispatch_stats(tg_to_blkg(tg),
1057 bio->bi_size, bio->bi_rw);
Tejun Heo2a7f1242012-03-05 13:15:01 -08001058 goto out_unlock_rcu;
Vivek Goyalaf75cd32011-05-19 15:38:31 -04001059 }
1060 }
Vivek Goyalaf75cd32011-05-19 15:38:31 -04001061
1062 /*
1063 * Either group has not been allocated yet or it is not an unlimited
1064 * IO group
1065 */
Vivek Goyale43473b2010-09-15 17:06:35 -04001066 spin_lock_irq(q->queue_lock);
Tejun Heocd1604f2012-03-05 13:15:06 -08001067 tg = throtl_lookup_create_tg(td, blkcg);
Tejun Heobc16a4f2011-10-19 14:33:01 +02001068 if (unlikely(!tg))
1069 goto out_unlock;
Vivek Goyalf469a7b2011-05-19 15:38:23 -04001070
Vivek Goyale43473b2010-09-15 17:06:35 -04001071 if (tg->nr_queued[rw]) {
1072 /*
1073 * There is already another bio queued in same dir. No
1074 * need to update dispatch time.
1075 */
Vivek Goyal231d7042011-03-07 21:05:14 +01001076 update_disptime = false;
Vivek Goyale43473b2010-09-15 17:06:35 -04001077 goto queue_bio;
Vivek Goyalde701c72011-03-07 21:09:32 +01001078
Vivek Goyale43473b2010-09-15 17:06:35 -04001079 }
1080
1081 /* Bio is with-in rate limit of group */
Tejun Heo0f3457f2013-05-14 13:52:32 -07001082 if (tg_may_dispatch(tg, bio, NULL)) {
Vivek Goyale43473b2010-09-15 17:06:35 -04001083 throtl_charge_bio(tg, bio);
Vivek Goyal04521db2011-03-22 21:54:29 +01001084
1085 /*
1086 * We need to trim slice even when bios are not being queued
1087 * otherwise it might happen that a bio is not queued for
1088 * a long time and slice keeps on extending and trim is not
1089 * called for a long time. Now if limits are reduced suddenly
1090 * we take into account all the IO dispatched so far at new
1091 * low rate and * newly queued IO gets a really long dispatch
1092 * time.
1093 *
1094 * So keep on trimming slice even if bio is not queued.
1095 */
Tejun Heo0f3457f2013-05-14 13:52:32 -07001096 throtl_trim_slice(tg, rw);
Tejun Heobc16a4f2011-10-19 14:33:01 +02001097 goto out_unlock;
Vivek Goyale43473b2010-09-15 17:06:35 -04001098 }
1099
1100queue_bio:
Tejun Heo0f3457f2013-05-14 13:52:32 -07001101 throtl_log_tg(tg, "[%c] bio. bdisp=%llu sz=%u bps=%llu"
Vivek Goyal8e89d132010-09-15 17:06:37 -04001102 " iodisp=%u iops=%u queued=%d/%d",
1103 rw == READ ? 'R' : 'W',
Vivek Goyale43473b2010-09-15 17:06:35 -04001104 tg->bytes_disp[rw], bio->bi_size, tg->bps[rw],
Vivek Goyal8e89d132010-09-15 17:06:37 -04001105 tg->io_disp[rw], tg->iops[rw],
Vivek Goyale43473b2010-09-15 17:06:35 -04001106 tg->nr_queued[READ], tg->nr_queued[WRITE]);
1107
Tejun Heo671058f2012-03-05 13:15:29 -08001108 bio_associate_current(bio);
Tejun Heoe2d57e62013-05-14 13:52:33 -07001109 throtl_add_bio_tg(&q->td->service_queue, tg, bio);
Tejun Heobc16a4f2011-10-19 14:33:01 +02001110 throttled = true;
Vivek Goyale43473b2010-09-15 17:06:35 -04001111
1112 if (update_disptime) {
Tejun Heoe2d57e62013-05-14 13:52:33 -07001113 tg_update_disptime(&td->service_queue, tg);
Vivek Goyale43473b2010-09-15 17:06:35 -04001114 throtl_schedule_next_dispatch(td);
1115 }
1116
Tejun Heobc16a4f2011-10-19 14:33:01 +02001117out_unlock:
Vivek Goyale43473b2010-09-15 17:06:35 -04001118 spin_unlock_irq(q->queue_lock);
Tejun Heo2a7f1242012-03-05 13:15:01 -08001119out_unlock_rcu:
1120 rcu_read_unlock();
Tejun Heobc16a4f2011-10-19 14:33:01 +02001121out:
1122 return throttled;
Vivek Goyale43473b2010-09-15 17:06:35 -04001123}
1124
Tejun Heoc9a929d2011-10-19 14:42:16 +02001125/**
1126 * blk_throtl_drain - drain throttled bios
1127 * @q: request_queue to drain throttled bios for
1128 *
1129 * Dispatch all currently throttled bios on @q through ->make_request_fn().
1130 */
1131void blk_throtl_drain(struct request_queue *q)
1132 __releases(q->queue_lock) __acquires(q->queue_lock)
1133{
1134 struct throtl_data *td = q->td;
Tejun Heoc9e03322013-05-14 13:52:32 -07001135 struct throtl_service_queue *sq = &td->service_queue;
Tejun Heoc9a929d2011-10-19 14:42:16 +02001136 struct throtl_grp *tg;
1137 struct bio_list bl;
1138 struct bio *bio;
1139
Andi Kleen8bcb6c72012-03-30 12:33:28 +02001140 queue_lockdep_assert_held(q);
Tejun Heoc9a929d2011-10-19 14:42:16 +02001141
1142 bio_list_init(&bl);
1143
Tejun Heoc9e03322013-05-14 13:52:32 -07001144 while ((tg = throtl_rb_first(sq))) {
Tejun Heoe2d57e62013-05-14 13:52:33 -07001145 throtl_dequeue_tg(sq, tg);
Tejun Heoc9a929d2011-10-19 14:42:16 +02001146
1147 while ((bio = bio_list_peek(&tg->bio_lists[READ])))
Tejun Heo0f3457f2013-05-14 13:52:32 -07001148 tg_dispatch_one_bio(tg, bio_data_dir(bio), &bl);
Tejun Heoc9a929d2011-10-19 14:42:16 +02001149 while ((bio = bio_list_peek(&tg->bio_lists[WRITE])))
Tejun Heo0f3457f2013-05-14 13:52:32 -07001150 tg_dispatch_one_bio(tg, bio_data_dir(bio), &bl);
Tejun Heoc9a929d2011-10-19 14:42:16 +02001151 }
1152 spin_unlock_irq(q->queue_lock);
1153
1154 while ((bio = bio_list_pop(&bl)))
1155 generic_make_request(bio);
1156
1157 spin_lock_irq(q->queue_lock);
1158}
1159
Vivek Goyale43473b2010-09-15 17:06:35 -04001160int blk_throtl_init(struct request_queue *q)
1161{
1162 struct throtl_data *td;
Tejun Heoa2b16932012-04-13 13:11:33 -07001163 int ret;
Vivek Goyale43473b2010-09-15 17:06:35 -04001164
1165 td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
1166 if (!td)
1167 return -ENOMEM;
1168
Tejun Heoc9e03322013-05-14 13:52:32 -07001169 td->service_queue = THROTL_SERVICE_QUEUE_INITIALIZER;
Tejun Heocb761992013-05-14 13:52:31 -07001170 INIT_DELAYED_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn);
Vivek Goyale43473b2010-09-15 17:06:35 -04001171
Tejun Heocd1604f2012-03-05 13:15:06 -08001172 q->td = td;
Vivek Goyal29b12582011-05-19 15:38:24 -04001173 td->queue = q;
Vivek Goyal02977e42010-10-01 14:49:48 +02001174
Tejun Heoa2b16932012-04-13 13:11:33 -07001175 /* activate policy */
Tejun Heo3c798392012-04-16 13:57:25 -07001176 ret = blkcg_activate_policy(q, &blkcg_policy_throtl);
Tejun Heoa2b16932012-04-13 13:11:33 -07001177 if (ret)
Vivek Goyal29b12582011-05-19 15:38:24 -04001178 kfree(td);
Tejun Heoa2b16932012-04-13 13:11:33 -07001179 return ret;
Vivek Goyale43473b2010-09-15 17:06:35 -04001180}
1181
1182void blk_throtl_exit(struct request_queue *q)
1183{
Tejun Heoc875f4d2012-03-05 13:15:22 -08001184 BUG_ON(!q->td);
Vivek Goyalda527772011-03-02 19:05:33 -05001185 throtl_shutdown_wq(q);
Tejun Heo3c798392012-04-16 13:57:25 -07001186 blkcg_deactivate_policy(q, &blkcg_policy_throtl);
Tejun Heoc9a929d2011-10-19 14:42:16 +02001187 kfree(q->td);
Vivek Goyale43473b2010-09-15 17:06:35 -04001188}
1189
1190static int __init throtl_init(void)
1191{
Vivek Goyal450adcb2011-03-01 13:40:54 -05001192 kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
1193 if (!kthrotld_workqueue)
1194 panic("Failed to create kthrotld\n");
1195
Tejun Heo3c798392012-04-16 13:57:25 -07001196 return blkcg_policy_register(&blkcg_policy_throtl);
Vivek Goyale43473b2010-09-15 17:06:35 -04001197}
1198
1199module_init(throtl_init);