blob: 18ae48083f4aaa1e68d07da9c7cfd8c9d0e52029 [file] [log] [blame]
Vivek Goyal31e4c282009-12-03 12:59:42 -05001/*
2 * Common Block IO controller cgroup interface
3 *
4 * Based on ideas and code from CFQ, CFS and BFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
6 *
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 * Paolo Valente <paolo.valente@unimore.it>
9 *
10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11 * Nauman Rafique <nauman@google.com>
12 */
13#include <linux/ioprio.h>
Vivek Goyal22084192009-12-03 12:59:49 -050014#include <linux/kdev_t.h>
Vivek Goyal9d6a9862009-12-04 10:36:41 -050015#include <linux/module.h>
Stephen Rothwellaccee782009-12-07 19:29:39 +110016#include <linux/err.h>
Divyesh Shah91952912010-04-01 15:01:41 -070017#include <linux/blkdev.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090018#include <linux/slab.h>
Gui Jianfeng34d0f172010-04-13 16:05:49 +080019#include <linux/genhd.h>
Tejun Heo72e06c22012-03-05 13:15:00 -080020#include <linux/delay.h>
Tejun Heo9a9e8a22012-03-19 15:10:56 -070021#include <linux/atomic.h>
Tejun Heo72e06c22012-03-05 13:15:00 -080022#include "blk-cgroup.h"
Tejun Heo5efd6112012-03-05 13:15:12 -080023#include "blk.h"
Vivek Goyal3e252062009-12-04 10:36:42 -050024
Divyesh Shah84c124d2010-04-09 08:31:19 +020025#define MAX_KEY_LEN 100
26
Tejun Heobc0d6502012-04-13 13:11:26 -070027static DEFINE_MUTEX(blkcg_pol_mutex);
Tejun Heo923adde2012-03-05 13:15:13 -080028
Tejun Heo3c798392012-04-16 13:57:25 -070029struct blkcg blkcg_root = { .cfq_weight = 2 * CFQ_WEIGHT_DEFAULT };
30EXPORT_SYMBOL_GPL(blkcg_root);
Vivek Goyal9d6a9862009-12-04 10:36:41 -050031
Tejun Heo3c798392012-04-16 13:57:25 -070032static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
Tejun Heo035d10b2012-03-05 13:15:04 -080033
Tejun Heoa2b16932012-04-13 13:11:33 -070034static bool blkcg_policy_enabled(struct request_queue *q,
Tejun Heo3c798392012-04-16 13:57:25 -070035 const struct blkcg_policy *pol)
Tejun Heoa2b16932012-04-13 13:11:33 -070036{
37 return pol && test_bit(pol->plid, q->blkcg_pols);
38}
39
Tejun Heo03814112012-03-05 13:15:14 -080040/**
41 * blkg_free - free a blkg
42 * @blkg: blkg to free
43 *
44 * Free @blkg which may be partially allocated.
45 */
Tejun Heo3c798392012-04-16 13:57:25 -070046static void blkg_free(struct blkcg_gq *blkg)
Tejun Heo03814112012-03-05 13:15:14 -080047{
Tejun Heoe8989fa2012-03-05 13:15:20 -080048 int i;
Tejun Heo549d3aa2012-03-05 13:15:16 -080049
50 if (!blkg)
51 return;
52
Tejun Heo8bd435b2012-04-13 13:11:28 -070053 for (i = 0; i < BLKCG_MAX_POLS; i++) {
Tejun Heo3c798392012-04-16 13:57:25 -070054 struct blkcg_policy *pol = blkcg_policy[i];
Tejun Heoe8989fa2012-03-05 13:15:20 -080055 struct blkg_policy_data *pd = blkg->pd[i];
56
Tejun Heo9ade5ea2012-04-01 14:38:44 -070057 if (!pd)
58 continue;
59
Tejun Heof9fcc2d2012-04-16 13:57:27 -070060 if (pol && pol->pd_exit_fn)
61 pol->pd_exit_fn(blkg);
Tejun Heo9ade5ea2012-04-01 14:38:44 -070062
Tejun Heo9ade5ea2012-04-01 14:38:44 -070063 kfree(pd);
Tejun Heo03814112012-03-05 13:15:14 -080064 }
Tejun Heoe8989fa2012-03-05 13:15:20 -080065
Tejun Heoa0516612012-06-26 15:05:44 -070066 blk_exit_rl(&blkg->rl);
Tejun Heo549d3aa2012-03-05 13:15:16 -080067 kfree(blkg);
Tejun Heo03814112012-03-05 13:15:14 -080068}
69
70/**
71 * blkg_alloc - allocate a blkg
72 * @blkcg: block cgroup the new blkg is associated with
73 * @q: request_queue the new blkg is associated with
Tejun Heo15974992012-06-04 20:40:52 -070074 * @gfp_mask: allocation mask to use
Tejun Heo03814112012-03-05 13:15:14 -080075 *
Tejun Heoe8989fa2012-03-05 13:15:20 -080076 * Allocate a new blkg assocating @blkcg and @q.
Tejun Heo03814112012-03-05 13:15:14 -080077 */
Tejun Heo15974992012-06-04 20:40:52 -070078static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
79 gfp_t gfp_mask)
Tejun Heo03814112012-03-05 13:15:14 -080080{
Tejun Heo3c798392012-04-16 13:57:25 -070081 struct blkcg_gq *blkg;
Tejun Heoe8989fa2012-03-05 13:15:20 -080082 int i;
Tejun Heo03814112012-03-05 13:15:14 -080083
84 /* alloc and init base part */
Tejun Heo15974992012-06-04 20:40:52 -070085 blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node);
Tejun Heo03814112012-03-05 13:15:14 -080086 if (!blkg)
87 return NULL;
88
Tejun Heoc875f4d2012-03-05 13:15:22 -080089 blkg->q = q;
Tejun Heoe8989fa2012-03-05 13:15:20 -080090 INIT_LIST_HEAD(&blkg->q_node);
Tejun Heo03814112012-03-05 13:15:14 -080091 blkg->blkcg = blkcg;
Tejun Heo1adaf3d2012-03-05 13:15:15 -080092 blkg->refcnt = 1;
Tejun Heo03814112012-03-05 13:15:14 -080093
Tejun Heoa0516612012-06-26 15:05:44 -070094 /* root blkg uses @q->root_rl, init rl only for !root blkgs */
95 if (blkcg != &blkcg_root) {
96 if (blk_init_rl(&blkg->rl, q, gfp_mask))
97 goto err_free;
98 blkg->rl.blkg = blkg;
99 }
100
Tejun Heo8bd435b2012-04-13 13:11:28 -0700101 for (i = 0; i < BLKCG_MAX_POLS; i++) {
Tejun Heo3c798392012-04-16 13:57:25 -0700102 struct blkcg_policy *pol = blkcg_policy[i];
Tejun Heoe8989fa2012-03-05 13:15:20 -0800103 struct blkg_policy_data *pd;
Tejun Heo03814112012-03-05 13:15:14 -0800104
Tejun Heoa2b16932012-04-13 13:11:33 -0700105 if (!blkcg_policy_enabled(q, pol))
Tejun Heoe8989fa2012-03-05 13:15:20 -0800106 continue;
Tejun Heo549d3aa2012-03-05 13:15:16 -0800107
Tejun Heoe8989fa2012-03-05 13:15:20 -0800108 /* alloc per-policy data and attach it to blkg */
Tejun Heo15974992012-06-04 20:40:52 -0700109 pd = kzalloc_node(pol->pd_size, gfp_mask, q->node);
Tejun Heoa0516612012-06-26 15:05:44 -0700110 if (!pd)
111 goto err_free;
Tejun Heo549d3aa2012-03-05 13:15:16 -0800112
Tejun Heoe8989fa2012-03-05 13:15:20 -0800113 blkg->pd[i] = pd;
114 pd->blkg = blkg;
Tejun Heo03814112012-03-05 13:15:14 -0800115
Tejun Heo9b2ea862012-06-04 15:21:00 +0900116 /* invoke per-policy init */
Tejun Heo356d2e52013-01-09 08:05:10 -0800117 if (pol->pd_init_fn)
Tejun Heof9fcc2d2012-04-16 13:57:27 -0700118 pol->pd_init_fn(blkg);
Tejun Heoe8989fa2012-03-05 13:15:20 -0800119 }
120
Tejun Heo03814112012-03-05 13:15:14 -0800121 return blkg;
Tejun Heoa0516612012-06-26 15:05:44 -0700122
123err_free:
124 blkg_free(blkg);
125 return NULL;
Tejun Heo03814112012-03-05 13:15:14 -0800126}
127
Tejun Heo3c798392012-04-16 13:57:25 -0700128static struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
Tejun Heo86cde6b2013-01-09 08:05:10 -0800129 struct request_queue *q, bool update_hint)
Tejun Heo80fd9972012-04-13 14:50:53 -0700130{
Tejun Heo3c798392012-04-16 13:57:25 -0700131 struct blkcg_gq *blkg;
Tejun Heo80fd9972012-04-13 14:50:53 -0700132
Tejun Heoa6371202012-04-19 16:29:24 -0700133 blkg = rcu_dereference(blkcg->blkg_hint);
134 if (blkg && blkg->q == q)
135 return blkg;
136
137 /*
Tejun Heo86cde6b2013-01-09 08:05:10 -0800138 * Hint didn't match. Look up from the radix tree. Note that the
139 * hint can only be updated under queue_lock as otherwise @blkg
140 * could have already been removed from blkg_tree. The caller is
141 * responsible for grabbing queue_lock if @update_hint.
Tejun Heoa6371202012-04-19 16:29:24 -0700142 */
143 blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
Tejun Heo86cde6b2013-01-09 08:05:10 -0800144 if (blkg && blkg->q == q) {
145 if (update_hint) {
146 lockdep_assert_held(q->queue_lock);
147 rcu_assign_pointer(blkcg->blkg_hint, blkg);
148 }
Tejun Heoa6371202012-04-19 16:29:24 -0700149 return blkg;
Tejun Heo86cde6b2013-01-09 08:05:10 -0800150 }
Tejun Heoa6371202012-04-19 16:29:24 -0700151
Tejun Heo80fd9972012-04-13 14:50:53 -0700152 return NULL;
153}
154
155/**
156 * blkg_lookup - lookup blkg for the specified blkcg - q pair
157 * @blkcg: blkcg of interest
158 * @q: request_queue of interest
159 *
160 * Lookup blkg for the @blkcg - @q pair. This function should be called
161 * under RCU read lock and is guaranteed to return %NULL if @q is bypassing
162 * - see blk_queue_bypass_start() for details.
163 */
Tejun Heo3c798392012-04-16 13:57:25 -0700164struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q)
Tejun Heo80fd9972012-04-13 14:50:53 -0700165{
166 WARN_ON_ONCE(!rcu_read_lock_held());
167
168 if (unlikely(blk_queue_bypass(q)))
169 return NULL;
Tejun Heo86cde6b2013-01-09 08:05:10 -0800170 return __blkg_lookup(blkcg, q, false);
Tejun Heo80fd9972012-04-13 14:50:53 -0700171}
172EXPORT_SYMBOL_GPL(blkg_lookup);
173
Tejun Heo15974992012-06-04 20:40:52 -0700174/*
175 * If @new_blkg is %NULL, this function tries to allocate a new one as
176 * necessary using %GFP_ATOMIC. @new_blkg is always consumed on return.
177 */
Tejun Heo86cde6b2013-01-09 08:05:10 -0800178static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
179 struct request_queue *q,
180 struct blkcg_gq *new_blkg)
Vivek Goyal5624a4e2011-05-19 15:38:28 -0400181{
Tejun Heo3c798392012-04-16 13:57:25 -0700182 struct blkcg_gq *blkg;
Tejun Heo496fb782012-04-19 16:29:23 -0700183 int ret;
Vivek Goyal5624a4e2011-05-19 15:38:28 -0400184
Tejun Heocd1604f2012-03-05 13:15:06 -0800185 WARN_ON_ONCE(!rcu_read_lock_held());
186 lockdep_assert_held(q->queue_lock);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500187
Tejun Heo7ee9c562012-03-05 13:15:11 -0800188 /* blkg holds a reference to blkcg */
Tejun Heo15974992012-06-04 20:40:52 -0700189 if (!css_tryget(&blkcg->css)) {
Tejun Heo93e6d5d2013-01-09 08:05:10 -0800190 ret = -EINVAL;
191 goto err_free_blkg;
Tejun Heo15974992012-06-04 20:40:52 -0700192 }
Tejun Heocd1604f2012-03-05 13:15:06 -0800193
Tejun Heo496fb782012-04-19 16:29:23 -0700194 /* allocate */
Tejun Heo15974992012-06-04 20:40:52 -0700195 if (!new_blkg) {
196 new_blkg = blkg_alloc(blkcg, q, GFP_ATOMIC);
197 if (unlikely(!new_blkg)) {
Tejun Heo93e6d5d2013-01-09 08:05:10 -0800198 ret = -ENOMEM;
199 goto err_put_css;
Tejun Heo15974992012-06-04 20:40:52 -0700200 }
201 }
202 blkg = new_blkg;
Tejun Heocd1604f2012-03-05 13:15:06 -0800203
204 /* insert */
Tejun Heoa6371202012-04-19 16:29:24 -0700205 spin_lock(&blkcg->lock);
206 ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg);
207 if (likely(!ret)) {
208 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
209 list_add(&blkg->q_node, &q->blkg_list);
210 }
211 spin_unlock(&blkcg->lock);
212
Tejun Heoa6371202012-04-19 16:29:24 -0700213 if (!ret)
214 return blkg;
Tejun Heo15974992012-06-04 20:40:52 -0700215
Tejun Heo93e6d5d2013-01-09 08:05:10 -0800216err_put_css:
Tejun Heo496fb782012-04-19 16:29:23 -0700217 css_put(&blkcg->css);
Tejun Heo93e6d5d2013-01-09 08:05:10 -0800218err_free_blkg:
Tejun Heo15974992012-06-04 20:40:52 -0700219 blkg_free(new_blkg);
Tejun Heo93e6d5d2013-01-09 08:05:10 -0800220 return ERR_PTR(ret);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500221}
Tejun Heo3c96cb32012-04-13 13:11:34 -0700222
Tejun Heo86cde6b2013-01-09 08:05:10 -0800223/**
224 * blkg_lookup_create - lookup blkg, try to create one if not there
225 * @blkcg: blkcg of interest
226 * @q: request_queue of interest
227 *
228 * Lookup blkg for the @blkcg - @q pair. If it doesn't exist, try to
229 * create one. This function should be called under RCU read lock and
230 * @q->queue_lock.
231 *
232 * Returns pointer to the looked up or created blkg on success, ERR_PTR()
233 * value on error. If @q is dead, returns ERR_PTR(-EINVAL). If @q is not
234 * dead and bypassing, returns ERR_PTR(-EBUSY).
235 */
Tejun Heo3c798392012-04-16 13:57:25 -0700236struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
237 struct request_queue *q)
Tejun Heo3c96cb32012-04-13 13:11:34 -0700238{
Tejun Heo86cde6b2013-01-09 08:05:10 -0800239 struct blkcg_gq *blkg;
240
241 WARN_ON_ONCE(!rcu_read_lock_held());
242 lockdep_assert_held(q->queue_lock);
243
Tejun Heo3c96cb32012-04-13 13:11:34 -0700244 /*
245 * This could be the first entry point of blkcg implementation and
246 * we shouldn't allow anything to go through for a bypassing queue.
247 */
248 if (unlikely(blk_queue_bypass(q)))
Bart Van Assche3f3299d2012-11-28 13:42:38 +0100249 return ERR_PTR(blk_queue_dying(q) ? -EINVAL : -EBUSY);
Tejun Heo86cde6b2013-01-09 08:05:10 -0800250
251 blkg = __blkg_lookup(blkcg, q, true);
252 if (blkg)
253 return blkg;
254
255 return blkg_create(blkcg, q, NULL);
Tejun Heo3c96cb32012-04-13 13:11:34 -0700256}
Tejun Heocd1604f2012-03-05 13:15:06 -0800257EXPORT_SYMBOL_GPL(blkg_lookup_create);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500258
Tejun Heo3c798392012-04-16 13:57:25 -0700259static void blkg_destroy(struct blkcg_gq *blkg)
Tejun Heo72e06c22012-03-05 13:15:00 -0800260{
Tejun Heo3c798392012-04-16 13:57:25 -0700261 struct blkcg *blkcg = blkg->blkcg;
Tejun Heo03aa2642012-03-05 13:15:19 -0800262
Tejun Heo27e1f9d2012-06-05 13:36:44 +0200263 lockdep_assert_held(blkg->q->queue_lock);
Tejun Heo9f13ef62012-03-05 13:15:21 -0800264 lockdep_assert_held(&blkcg->lock);
Tejun Heo03aa2642012-03-05 13:15:19 -0800265
266 /* Something wrong if we are trying to remove same group twice */
Tejun Heoe8989fa2012-03-05 13:15:20 -0800267 WARN_ON_ONCE(list_empty(&blkg->q_node));
Tejun Heo9f13ef62012-03-05 13:15:21 -0800268 WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
Tejun Heoa6371202012-04-19 16:29:24 -0700269
270 radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
Tejun Heoe8989fa2012-03-05 13:15:20 -0800271 list_del_init(&blkg->q_node);
Tejun Heo9f13ef62012-03-05 13:15:21 -0800272 hlist_del_init_rcu(&blkg->blkcg_node);
Tejun Heo03aa2642012-03-05 13:15:19 -0800273
Tejun Heo03aa2642012-03-05 13:15:19 -0800274 /*
Tejun Heoa6371202012-04-19 16:29:24 -0700275 * Both setting lookup hint to and clearing it from @blkg are done
276 * under queue_lock. If it's not pointing to @blkg now, it never
277 * will. Hint assignment itself can race safely.
278 */
279 if (rcu_dereference_raw(blkcg->blkg_hint) == blkg)
280 rcu_assign_pointer(blkcg->blkg_hint, NULL);
281
282 /*
Tejun Heo03aa2642012-03-05 13:15:19 -0800283 * Put the reference taken at the time of creation so that when all
284 * queues are gone, group can be destroyed.
285 */
286 blkg_put(blkg);
287}
288
Tejun Heo9f13ef62012-03-05 13:15:21 -0800289/**
290 * blkg_destroy_all - destroy all blkgs associated with a request_queue
291 * @q: request_queue of interest
Tejun Heo9f13ef62012-03-05 13:15:21 -0800292 *
Tejun Heo3c96cb32012-04-13 13:11:34 -0700293 * Destroy all blkgs associated with @q.
Tejun Heo9f13ef62012-03-05 13:15:21 -0800294 */
Tejun Heo3c96cb32012-04-13 13:11:34 -0700295static void blkg_destroy_all(struct request_queue *q)
Tejun Heo03aa2642012-03-05 13:15:19 -0800296{
Tejun Heo3c798392012-04-16 13:57:25 -0700297 struct blkcg_gq *blkg, *n;
Tejun Heo72e06c22012-03-05 13:15:00 -0800298
Tejun Heo6d18b002012-04-13 13:11:35 -0700299 lockdep_assert_held(q->queue_lock);
Tejun Heo72e06c22012-03-05 13:15:00 -0800300
Tejun Heo9f13ef62012-03-05 13:15:21 -0800301 list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
Tejun Heo3c798392012-04-16 13:57:25 -0700302 struct blkcg *blkcg = blkg->blkcg;
Tejun Heo72e06c22012-03-05 13:15:00 -0800303
Tejun Heo9f13ef62012-03-05 13:15:21 -0800304 spin_lock(&blkcg->lock);
305 blkg_destroy(blkg);
306 spin_unlock(&blkcg->lock);
Tejun Heo72e06c22012-03-05 13:15:00 -0800307 }
Jun'ichi Nomura65635cb2012-10-17 17:45:36 +0900308
309 /*
310 * root blkg is destroyed. Just clear the pointer since
311 * root_rl does not take reference on root blkg.
312 */
313 q->root_blkg = NULL;
314 q->root_rl.blkg = NULL;
Tejun Heo72e06c22012-03-05 13:15:00 -0800315}
316
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800317static void blkg_rcu_free(struct rcu_head *rcu_head)
318{
Tejun Heo3c798392012-04-16 13:57:25 -0700319 blkg_free(container_of(rcu_head, struct blkcg_gq, rcu_head));
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800320}
321
Tejun Heo3c798392012-04-16 13:57:25 -0700322void __blkg_release(struct blkcg_gq *blkg)
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800323{
324 /* release the extra blkcg reference this blkg has been holding */
325 css_put(&blkg->blkcg->css);
326
327 /*
328 * A group is freed in rcu manner. But having an rcu lock does not
329 * mean that one can access all the fields of blkg and assume these
330 * are valid. For example, don't try to follow throtl_data and
331 * request queue links.
332 *
333 * Having a reference to blkg under an rcu allows acess to only
334 * values local to groups like group stats and group rate limits
335 */
336 call_rcu(&blkg->rcu_head, blkg_rcu_free);
337}
338EXPORT_SYMBOL_GPL(__blkg_release);
339
Tejun Heoa0516612012-06-26 15:05:44 -0700340/*
341 * The next function used by blk_queue_for_each_rl(). It's a bit tricky
342 * because the root blkg uses @q->root_rl instead of its own rl.
343 */
344struct request_list *__blk_queue_next_rl(struct request_list *rl,
345 struct request_queue *q)
346{
347 struct list_head *ent;
348 struct blkcg_gq *blkg;
349
350 /*
351 * Determine the current blkg list_head. The first entry is
352 * root_rl which is off @q->blkg_list and mapped to the head.
353 */
354 if (rl == &q->root_rl) {
355 ent = &q->blkg_list;
Jun'ichi Nomura65c77fd2012-10-22 10:15:37 +0900356 /* There are no more block groups, hence no request lists */
357 if (list_empty(ent))
358 return NULL;
Tejun Heoa0516612012-06-26 15:05:44 -0700359 } else {
360 blkg = container_of(rl, struct blkcg_gq, rl);
361 ent = &blkg->q_node;
362 }
363
364 /* walk to the next list_head, skip root blkcg */
365 ent = ent->next;
366 if (ent == &q->root_blkg->q_node)
367 ent = ent->next;
368 if (ent == &q->blkg_list)
369 return NULL;
370
371 blkg = container_of(ent, struct blkcg_gq, q_node);
372 return &blkg->rl;
373}
374
Tejun Heo3c798392012-04-16 13:57:25 -0700375static int blkcg_reset_stats(struct cgroup *cgroup, struct cftype *cftype,
376 u64 val)
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700377{
Tejun Heo3c798392012-04-16 13:57:25 -0700378 struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
379 struct blkcg_gq *blkg;
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700380 struct hlist_node *n;
Tejun Heobc0d6502012-04-13 13:11:26 -0700381 int i;
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700382
Tejun Heobc0d6502012-04-13 13:11:26 -0700383 mutex_lock(&blkcg_pol_mutex);
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700384 spin_lock_irq(&blkcg->lock);
Tejun Heo997a0262012-03-08 10:53:58 -0800385
386 /*
387 * Note that stat reset is racy - it doesn't synchronize against
388 * stat updates. This is a debug feature which shouldn't exist
389 * anyway. If you get hit by a race, retry.
390 */
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700391 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
Tejun Heo8bd435b2012-04-13 13:11:28 -0700392 for (i = 0; i < BLKCG_MAX_POLS; i++) {
Tejun Heo3c798392012-04-16 13:57:25 -0700393 struct blkcg_policy *pol = blkcg_policy[i];
Tejun Heo549d3aa2012-03-05 13:15:16 -0800394
Tejun Heoa2b16932012-04-13 13:11:33 -0700395 if (blkcg_policy_enabled(blkg->q, pol) &&
Tejun Heof9fcc2d2012-04-16 13:57:27 -0700396 pol->pd_reset_stats_fn)
397 pol->pd_reset_stats_fn(blkg);
Tejun Heobc0d6502012-04-13 13:11:26 -0700398 }
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700399 }
Vivek Goyalf0bdc8c2011-05-19 15:38:30 -0400400
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700401 spin_unlock_irq(&blkcg->lock);
Tejun Heobc0d6502012-04-13 13:11:26 -0700402 mutex_unlock(&blkcg_pol_mutex);
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700403 return 0;
404}
405
Tejun Heo3c798392012-04-16 13:57:25 -0700406static const char *blkg_dev_name(struct blkcg_gq *blkg)
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700407{
Tejun Heod3d32e62012-04-01 14:38:42 -0700408 /* some drivers (floppy) instantiate a queue w/o disk registered */
409 if (blkg->q->backing_dev_info.dev)
410 return dev_name(blkg->q->backing_dev_info.dev);
411 return NULL;
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700412}
413
Tejun Heod3d32e62012-04-01 14:38:42 -0700414/**
415 * blkcg_print_blkgs - helper for printing per-blkg data
416 * @sf: seq_file to print to
417 * @blkcg: blkcg of interest
418 * @prfill: fill function to print out a blkg
419 * @pol: policy in question
420 * @data: data to be passed to @prfill
421 * @show_total: to print out sum of prfill return values or not
422 *
423 * This function invokes @prfill on each blkg of @blkcg if pd for the
424 * policy specified by @pol exists. @prfill is invoked with @sf, the
425 * policy data and @data. If @show_total is %true, the sum of the return
426 * values from @prfill is printed with "Total" label at the end.
427 *
428 * This is to be used to construct print functions for
429 * cftype->read_seq_string method.
430 */
Tejun Heo3c798392012-04-16 13:57:25 -0700431void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
Tejun Heof95a04a2012-04-16 13:57:26 -0700432 u64 (*prfill)(struct seq_file *,
433 struct blkg_policy_data *, int),
Tejun Heo3c798392012-04-16 13:57:25 -0700434 const struct blkcg_policy *pol, int data,
Tejun Heoec399342012-04-13 13:11:27 -0700435 bool show_total)
Vivek Goyal5624a4e2011-05-19 15:38:28 -0400436{
Tejun Heo3c798392012-04-16 13:57:25 -0700437 struct blkcg_gq *blkg;
Tejun Heod3d32e62012-04-01 14:38:42 -0700438 struct hlist_node *n;
439 u64 total = 0;
440
441 spin_lock_irq(&blkcg->lock);
442 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node)
Tejun Heoa2b16932012-04-13 13:11:33 -0700443 if (blkcg_policy_enabled(blkg->q, pol))
Tejun Heof95a04a2012-04-16 13:57:26 -0700444 total += prfill(sf, blkg->pd[pol->plid], data);
Tejun Heod3d32e62012-04-01 14:38:42 -0700445 spin_unlock_irq(&blkcg->lock);
446
447 if (show_total)
448 seq_printf(sf, "Total %llu\n", (unsigned long long)total);
449}
Tejun Heo829fdb52012-04-01 14:38:43 -0700450EXPORT_SYMBOL_GPL(blkcg_print_blkgs);
Tejun Heod3d32e62012-04-01 14:38:42 -0700451
452/**
453 * __blkg_prfill_u64 - prfill helper for a single u64 value
454 * @sf: seq_file to print to
Tejun Heof95a04a2012-04-16 13:57:26 -0700455 * @pd: policy private data of interest
Tejun Heod3d32e62012-04-01 14:38:42 -0700456 * @v: value to print
457 *
Tejun Heof95a04a2012-04-16 13:57:26 -0700458 * Print @v to @sf for the device assocaited with @pd.
Tejun Heod3d32e62012-04-01 14:38:42 -0700459 */
Tejun Heof95a04a2012-04-16 13:57:26 -0700460u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v)
Tejun Heod3d32e62012-04-01 14:38:42 -0700461{
Tejun Heof95a04a2012-04-16 13:57:26 -0700462 const char *dname = blkg_dev_name(pd->blkg);
Tejun Heod3d32e62012-04-01 14:38:42 -0700463
464 if (!dname)
465 return 0;
466
467 seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v);
468 return v;
469}
Tejun Heo829fdb52012-04-01 14:38:43 -0700470EXPORT_SYMBOL_GPL(__blkg_prfill_u64);
Tejun Heod3d32e62012-04-01 14:38:42 -0700471
472/**
473 * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat
474 * @sf: seq_file to print to
Tejun Heof95a04a2012-04-16 13:57:26 -0700475 * @pd: policy private data of interest
Tejun Heod3d32e62012-04-01 14:38:42 -0700476 * @rwstat: rwstat to print
477 *
Tejun Heof95a04a2012-04-16 13:57:26 -0700478 * Print @rwstat to @sf for the device assocaited with @pd.
Tejun Heod3d32e62012-04-01 14:38:42 -0700479 */
Tejun Heof95a04a2012-04-16 13:57:26 -0700480u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
Tejun Heo829fdb52012-04-01 14:38:43 -0700481 const struct blkg_rwstat *rwstat)
Tejun Heod3d32e62012-04-01 14:38:42 -0700482{
483 static const char *rwstr[] = {
484 [BLKG_RWSTAT_READ] = "Read",
485 [BLKG_RWSTAT_WRITE] = "Write",
486 [BLKG_RWSTAT_SYNC] = "Sync",
487 [BLKG_RWSTAT_ASYNC] = "Async",
488 };
Tejun Heof95a04a2012-04-16 13:57:26 -0700489 const char *dname = blkg_dev_name(pd->blkg);
Tejun Heod3d32e62012-04-01 14:38:42 -0700490 u64 v;
491 int i;
492
493 if (!dname)
494 return 0;
495
496 for (i = 0; i < BLKG_RWSTAT_NR; i++)
497 seq_printf(sf, "%s %s %llu\n", dname, rwstr[i],
498 (unsigned long long)rwstat->cnt[i]);
499
500 v = rwstat->cnt[BLKG_RWSTAT_READ] + rwstat->cnt[BLKG_RWSTAT_WRITE];
501 seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v);
502 return v;
503}
504
Tejun Heo5bc4afb12012-04-01 14:38:45 -0700505/**
506 * blkg_prfill_stat - prfill callback for blkg_stat
507 * @sf: seq_file to print to
Tejun Heof95a04a2012-04-16 13:57:26 -0700508 * @pd: policy private data of interest
509 * @off: offset to the blkg_stat in @pd
Tejun Heo5bc4afb12012-04-01 14:38:45 -0700510 *
511 * prfill callback for printing a blkg_stat.
512 */
Tejun Heof95a04a2012-04-16 13:57:26 -0700513u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off)
Tejun Heod3d32e62012-04-01 14:38:42 -0700514{
Tejun Heof95a04a2012-04-16 13:57:26 -0700515 return __blkg_prfill_u64(sf, pd, blkg_stat_read((void *)pd + off));
Tejun Heod3d32e62012-04-01 14:38:42 -0700516}
Tejun Heo5bc4afb12012-04-01 14:38:45 -0700517EXPORT_SYMBOL_GPL(blkg_prfill_stat);
Tejun Heod3d32e62012-04-01 14:38:42 -0700518
Tejun Heo5bc4afb12012-04-01 14:38:45 -0700519/**
520 * blkg_prfill_rwstat - prfill callback for blkg_rwstat
521 * @sf: seq_file to print to
Tejun Heof95a04a2012-04-16 13:57:26 -0700522 * @pd: policy private data of interest
523 * @off: offset to the blkg_rwstat in @pd
Tejun Heo5bc4afb12012-04-01 14:38:45 -0700524 *
525 * prfill callback for printing a blkg_rwstat.
526 */
Tejun Heof95a04a2012-04-16 13:57:26 -0700527u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
528 int off)
Tejun Heod3d32e62012-04-01 14:38:42 -0700529{
Tejun Heof95a04a2012-04-16 13:57:26 -0700530 struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd + off);
Tejun Heod3d32e62012-04-01 14:38:42 -0700531
Tejun Heof95a04a2012-04-16 13:57:26 -0700532 return __blkg_prfill_rwstat(sf, pd, &rwstat);
Tejun Heod3d32e62012-04-01 14:38:42 -0700533}
Tejun Heo5bc4afb12012-04-01 14:38:45 -0700534EXPORT_SYMBOL_GPL(blkg_prfill_rwstat);
Tejun Heod3d32e62012-04-01 14:38:42 -0700535
Tejun Heo3a8b31d2012-04-01 14:38:43 -0700536/**
537 * blkg_conf_prep - parse and prepare for per-blkg config update
538 * @blkcg: target block cgroup
Tejun Heoda8b0662012-04-13 13:11:29 -0700539 * @pol: target policy
Tejun Heo3a8b31d2012-04-01 14:38:43 -0700540 * @input: input string
541 * @ctx: blkg_conf_ctx to be filled
542 *
543 * Parse per-blkg config update from @input and initialize @ctx with the
544 * result. @ctx->blkg points to the blkg to be updated and @ctx->v the new
Tejun Heoda8b0662012-04-13 13:11:29 -0700545 * value. This function returns with RCU read lock and queue lock held and
546 * must be paired with blkg_conf_finish().
Tejun Heo3a8b31d2012-04-01 14:38:43 -0700547 */
Tejun Heo3c798392012-04-16 13:57:25 -0700548int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
549 const char *input, struct blkg_conf_ctx *ctx)
Tejun Heoda8b0662012-04-13 13:11:29 -0700550 __acquires(rcu) __acquires(disk->queue->queue_lock)
Gui Jianfeng34d0f172010-04-13 16:05:49 +0800551{
Tejun Heo3a8b31d2012-04-01 14:38:43 -0700552 struct gendisk *disk;
Tejun Heo3c798392012-04-16 13:57:25 -0700553 struct blkcg_gq *blkg;
Tejun Heo726fa692012-04-01 14:38:43 -0700554 unsigned int major, minor;
555 unsigned long long v;
556 int part, ret;
Gui Jianfeng34d0f172010-04-13 16:05:49 +0800557
Tejun Heo726fa692012-04-01 14:38:43 -0700558 if (sscanf(input, "%u:%u %llu", &major, &minor, &v) != 3)
559 return -EINVAL;
Tejun Heo3a8b31d2012-04-01 14:38:43 -0700560
Tejun Heo726fa692012-04-01 14:38:43 -0700561 disk = get_gendisk(MKDEV(major, minor), &part);
Tejun Heo4bfd4822012-03-05 13:15:08 -0800562 if (!disk || part)
Tejun Heo726fa692012-04-01 14:38:43 -0700563 return -EINVAL;
Tejun Heoe56da7e2012-03-05 13:15:07 -0800564
565 rcu_read_lock();
Tejun Heo4bfd4822012-03-05 13:15:08 -0800566 spin_lock_irq(disk->queue->queue_lock);
Tejun Heoda8b0662012-04-13 13:11:29 -0700567
Tejun Heoa2b16932012-04-13 13:11:33 -0700568 if (blkcg_policy_enabled(disk->queue, pol))
Tejun Heo3c96cb32012-04-13 13:11:34 -0700569 blkg = blkg_lookup_create(blkcg, disk->queue);
Tejun Heoa2b16932012-04-13 13:11:33 -0700570 else
571 blkg = ERR_PTR(-EINVAL);
Tejun Heoe56da7e2012-03-05 13:15:07 -0800572
Tejun Heo4bfd4822012-03-05 13:15:08 -0800573 if (IS_ERR(blkg)) {
574 ret = PTR_ERR(blkg);
Tejun Heo3a8b31d2012-04-01 14:38:43 -0700575 rcu_read_unlock();
Tejun Heoda8b0662012-04-13 13:11:29 -0700576 spin_unlock_irq(disk->queue->queue_lock);
Tejun Heo3a8b31d2012-04-01 14:38:43 -0700577 put_disk(disk);
578 /*
579 * If queue was bypassing, we should retry. Do so after a
580 * short msleep(). It isn't strictly necessary but queue
581 * can be bypassing for some time and it's always nice to
582 * avoid busy looping.
583 */
584 if (ret == -EBUSY) {
585 msleep(10);
586 ret = restart_syscall();
Vivek Goyal7702e8f2010-09-15 17:06:36 -0400587 }
Tejun Heo726fa692012-04-01 14:38:43 -0700588 return ret;
Vivek Goyal062a6442010-09-15 17:06:33 -0400589 }
Tejun Heoe56da7e2012-03-05 13:15:07 -0800590
Tejun Heo3a8b31d2012-04-01 14:38:43 -0700591 ctx->disk = disk;
592 ctx->blkg = blkg;
Tejun Heo726fa692012-04-01 14:38:43 -0700593 ctx->v = v;
594 return 0;
Gui Jianfeng34d0f172010-04-13 16:05:49 +0800595}
Tejun Heo829fdb52012-04-01 14:38:43 -0700596EXPORT_SYMBOL_GPL(blkg_conf_prep);
Gui Jianfeng34d0f172010-04-13 16:05:49 +0800597
Tejun Heo3a8b31d2012-04-01 14:38:43 -0700598/**
599 * blkg_conf_finish - finish up per-blkg config update
600 * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep()
601 *
602 * Finish up after per-blkg config update. This function must be paired
603 * with blkg_conf_prep().
604 */
Tejun Heo829fdb52012-04-01 14:38:43 -0700605void blkg_conf_finish(struct blkg_conf_ctx *ctx)
Tejun Heoda8b0662012-04-13 13:11:29 -0700606 __releases(ctx->disk->queue->queue_lock) __releases(rcu)
Gui Jianfeng34d0f172010-04-13 16:05:49 +0800607{
Tejun Heoda8b0662012-04-13 13:11:29 -0700608 spin_unlock_irq(ctx->disk->queue->queue_lock);
Tejun Heo3a8b31d2012-04-01 14:38:43 -0700609 rcu_read_unlock();
610 put_disk(ctx->disk);
Gui Jianfeng34d0f172010-04-13 16:05:49 +0800611}
Tejun Heo829fdb52012-04-01 14:38:43 -0700612EXPORT_SYMBOL_GPL(blkg_conf_finish);
Gui Jianfeng34d0f172010-04-13 16:05:49 +0800613
Tejun Heo3c798392012-04-16 13:57:25 -0700614struct cftype blkcg_files[] = {
Vivek Goyal31e4c282009-12-03 12:59:42 -0500615 {
Divyesh Shah84c124d2010-04-09 08:31:19 +0200616 .name = "reset_stats",
Tejun Heo3c798392012-04-16 13:57:25 -0700617 .write_u64 = blkcg_reset_stats,
Vivek Goyal22084192009-12-03 12:59:49 -0500618 },
Tejun Heo4baf6e32012-04-01 12:09:55 -0700619 { } /* terminate */
Vivek Goyal31e4c282009-12-03 12:59:42 -0500620};
621
Tejun Heo9f13ef62012-03-05 13:15:21 -0800622/**
Tejun Heo92fb9742012-11-19 08:13:38 -0800623 * blkcg_css_offline - cgroup css_offline callback
Tejun Heo9f13ef62012-03-05 13:15:21 -0800624 * @cgroup: cgroup of interest
625 *
626 * This function is called when @cgroup is about to go away and responsible
627 * for shooting down all blkgs associated with @cgroup. blkgs should be
628 * removed while holding both q and blkcg locks. As blkcg lock is nested
629 * inside q lock, this function performs reverse double lock dancing.
630 *
631 * This is the blkcg counterpart of ioc_release_fn().
632 */
Tejun Heo92fb9742012-11-19 08:13:38 -0800633static void blkcg_css_offline(struct cgroup *cgroup)
Vivek Goyal31e4c282009-12-03 12:59:42 -0500634{
Tejun Heo3c798392012-04-16 13:57:25 -0700635 struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500636
Tejun Heo9f13ef62012-03-05 13:15:21 -0800637 spin_lock_irq(&blkcg->lock);
Tejun Heo7ee9c562012-03-05 13:15:11 -0800638
Tejun Heo9f13ef62012-03-05 13:15:21 -0800639 while (!hlist_empty(&blkcg->blkg_list)) {
Tejun Heo3c798392012-04-16 13:57:25 -0700640 struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
641 struct blkcg_gq, blkcg_node);
Tejun Heoc875f4d2012-03-05 13:15:22 -0800642 struct request_queue *q = blkg->q;
Vivek Goyalb1c35762009-12-03 12:59:47 -0500643
Tejun Heo9f13ef62012-03-05 13:15:21 -0800644 if (spin_trylock(q->queue_lock)) {
645 blkg_destroy(blkg);
646 spin_unlock(q->queue_lock);
647 } else {
648 spin_unlock_irq(&blkcg->lock);
Tejun Heo9f13ef62012-03-05 13:15:21 -0800649 cpu_relax();
Dan Carpentera5567932012-03-29 20:57:08 +0200650 spin_lock_irq(&blkcg->lock);
Jens Axboe0f3942a2010-05-03 14:28:55 +0200651 }
Tejun Heo9f13ef62012-03-05 13:15:21 -0800652 }
Jens Axboe0f3942a2010-05-03 14:28:55 +0200653
Tejun Heo9f13ef62012-03-05 13:15:21 -0800654 spin_unlock_irq(&blkcg->lock);
Tejun Heo7ee9c562012-03-05 13:15:11 -0800655}
656
Tejun Heo92fb9742012-11-19 08:13:38 -0800657static void blkcg_css_free(struct cgroup *cgroup)
Tejun Heo7ee9c562012-03-05 13:15:11 -0800658{
Tejun Heo3c798392012-04-16 13:57:25 -0700659 struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
Tejun Heo7ee9c562012-03-05 13:15:11 -0800660
Tejun Heo3c798392012-04-16 13:57:25 -0700661 if (blkcg != &blkcg_root)
Ben Blum67523c42010-03-10 15:22:11 -0800662 kfree(blkcg);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500663}
664
Tejun Heo92fb9742012-11-19 08:13:38 -0800665static struct cgroup_subsys_state *blkcg_css_alloc(struct cgroup *cgroup)
Vivek Goyal31e4c282009-12-03 12:59:42 -0500666{
Tejun Heo9a9e8a22012-03-19 15:10:56 -0700667 static atomic64_t id_seq = ATOMIC64_INIT(0);
Tejun Heo3c798392012-04-16 13:57:25 -0700668 struct blkcg *blkcg;
Li Zefan03415092010-05-07 08:57:00 +0200669 struct cgroup *parent = cgroup->parent;
Vivek Goyal31e4c282009-12-03 12:59:42 -0500670
Li Zefan03415092010-05-07 08:57:00 +0200671 if (!parent) {
Tejun Heo3c798392012-04-16 13:57:25 -0700672 blkcg = &blkcg_root;
Vivek Goyal31e4c282009-12-03 12:59:42 -0500673 goto done;
674 }
675
Vivek Goyal31e4c282009-12-03 12:59:42 -0500676 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
677 if (!blkcg)
678 return ERR_PTR(-ENOMEM);
679
Tejun Heo3381cb82012-04-01 14:38:44 -0700680 blkcg->cfq_weight = CFQ_WEIGHT_DEFAULT;
Tejun Heo9a9e8a22012-03-19 15:10:56 -0700681 blkcg->id = atomic64_inc_return(&id_seq); /* root is 0, start from 1 */
Vivek Goyal31e4c282009-12-03 12:59:42 -0500682done:
683 spin_lock_init(&blkcg->lock);
Tejun Heoa6371202012-04-19 16:29:24 -0700684 INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_ATOMIC);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500685 INIT_HLIST_HEAD(&blkcg->blkg_list);
686
687 return &blkcg->css;
688}
689
Tejun Heo5efd6112012-03-05 13:15:12 -0800690/**
691 * blkcg_init_queue - initialize blkcg part of request queue
692 * @q: request_queue to initialize
693 *
694 * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
695 * part of new request_queue @q.
696 *
697 * RETURNS:
698 * 0 on success, -errno on failure.
699 */
700int blkcg_init_queue(struct request_queue *q)
701{
702 might_sleep();
703
Tejun Heo3c96cb32012-04-13 13:11:34 -0700704 return blk_throtl_init(q);
Tejun Heo5efd6112012-03-05 13:15:12 -0800705}
706
707/**
708 * blkcg_drain_queue - drain blkcg part of request_queue
709 * @q: request_queue to drain
710 *
711 * Called from blk_drain_queue(). Responsible for draining blkcg part.
712 */
713void blkcg_drain_queue(struct request_queue *q)
714{
715 lockdep_assert_held(q->queue_lock);
716
717 blk_throtl_drain(q);
718}
719
720/**
721 * blkcg_exit_queue - exit and release blkcg part of request_queue
722 * @q: request_queue being released
723 *
724 * Called from blk_release_queue(). Responsible for exiting blkcg part.
725 */
726void blkcg_exit_queue(struct request_queue *q)
727{
Tejun Heo6d18b002012-04-13 13:11:35 -0700728 spin_lock_irq(q->queue_lock);
Tejun Heo3c96cb32012-04-13 13:11:34 -0700729 blkg_destroy_all(q);
Tejun Heo6d18b002012-04-13 13:11:35 -0700730 spin_unlock_irq(q->queue_lock);
731
Tejun Heo5efd6112012-03-05 13:15:12 -0800732 blk_throtl_exit(q);
733}
734
Vivek Goyal31e4c282009-12-03 12:59:42 -0500735/*
736 * We cannot support shared io contexts, as we have no mean to support
737 * two tasks with the same ioc in two different groups without major rework
738 * of the main cic data structures. For now we allow a task to change
739 * its cgroup only if it's the only owner of its ioc.
740 */
Tejun Heo3c798392012-04-16 13:57:25 -0700741static int blkcg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
Vivek Goyal31e4c282009-12-03 12:59:42 -0500742{
Tejun Heobb9d97b2011-12-12 18:12:21 -0800743 struct task_struct *task;
Vivek Goyal31e4c282009-12-03 12:59:42 -0500744 struct io_context *ioc;
745 int ret = 0;
746
747 /* task_lock() is needed to avoid races with exit_io_context() */
Tejun Heobb9d97b2011-12-12 18:12:21 -0800748 cgroup_taskset_for_each(task, cgrp, tset) {
749 task_lock(task);
750 ioc = task->io_context;
751 if (ioc && atomic_read(&ioc->nr_tasks) > 1)
752 ret = -EINVAL;
753 task_unlock(task);
754 if (ret)
755 break;
756 }
Vivek Goyal31e4c282009-12-03 12:59:42 -0500757 return ret;
758}
759
Tejun Heo676f7c82012-04-01 12:09:55 -0700760struct cgroup_subsys blkio_subsys = {
761 .name = "blkio",
Tejun Heo92fb9742012-11-19 08:13:38 -0800762 .css_alloc = blkcg_css_alloc,
763 .css_offline = blkcg_css_offline,
764 .css_free = blkcg_css_free,
Tejun Heo3c798392012-04-16 13:57:25 -0700765 .can_attach = blkcg_can_attach,
Tejun Heo676f7c82012-04-01 12:09:55 -0700766 .subsys_id = blkio_subsys_id,
Tejun Heo3c798392012-04-16 13:57:25 -0700767 .base_cftypes = blkcg_files,
Tejun Heo676f7c82012-04-01 12:09:55 -0700768 .module = THIS_MODULE,
Tejun Heo8c7f6ed2012-09-13 12:20:58 -0700769
770 /*
771 * blkio subsystem is utterly broken in terms of hierarchy support.
772 * It treats all cgroups equally regardless of where they're
773 * located in the hierarchy - all cgroups are treated as if they're
774 * right below the root. Fix it and remove the following.
775 */
776 .broken_hierarchy = true,
Tejun Heo676f7c82012-04-01 12:09:55 -0700777};
778EXPORT_SYMBOL_GPL(blkio_subsys);
779
Tejun Heo8bd435b2012-04-13 13:11:28 -0700780/**
Tejun Heoa2b16932012-04-13 13:11:33 -0700781 * blkcg_activate_policy - activate a blkcg policy on a request_queue
782 * @q: request_queue of interest
783 * @pol: blkcg policy to activate
784 *
785 * Activate @pol on @q. Requires %GFP_KERNEL context. @q goes through
786 * bypass mode to populate its blkgs with policy_data for @pol.
787 *
788 * Activation happens with @q bypassed, so nobody would be accessing blkgs
789 * from IO path. Update of each blkg is protected by both queue and blkcg
790 * locks so that holding either lock and testing blkcg_policy_enabled() is
791 * always enough for dereferencing policy data.
792 *
793 * The caller is responsible for synchronizing [de]activations and policy
794 * [un]registerations. Returns 0 on success, -errno on failure.
795 */
796int blkcg_activate_policy(struct request_queue *q,
Tejun Heo3c798392012-04-16 13:57:25 -0700797 const struct blkcg_policy *pol)
Tejun Heoa2b16932012-04-13 13:11:33 -0700798{
799 LIST_HEAD(pds);
Tejun Heo86cde6b2013-01-09 08:05:10 -0800800 struct blkcg_gq *blkg, *new_blkg;
Tejun Heoa2b16932012-04-13 13:11:33 -0700801 struct blkg_policy_data *pd, *n;
802 int cnt = 0, ret;
Tejun Heo15974992012-06-04 20:40:52 -0700803 bool preloaded;
Tejun Heoa2b16932012-04-13 13:11:33 -0700804
805 if (blkcg_policy_enabled(q, pol))
806 return 0;
807
Tejun Heo15974992012-06-04 20:40:52 -0700808 /* preallocations for root blkg */
Tejun Heo86cde6b2013-01-09 08:05:10 -0800809 new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL);
810 if (!new_blkg)
Tejun Heo15974992012-06-04 20:40:52 -0700811 return -ENOMEM;
812
813 preloaded = !radix_tree_preload(GFP_KERNEL);
814
Tejun Heoa2b16932012-04-13 13:11:33 -0700815 blk_queue_bypass_start(q);
816
Tejun Heo86cde6b2013-01-09 08:05:10 -0800817 /*
818 * Make sure the root blkg exists and count the existing blkgs. As
819 * @q is bypassing at this point, blkg_lookup_create() can't be
820 * used. Open code it.
821 */
Tejun Heoa2b16932012-04-13 13:11:33 -0700822 spin_lock_irq(q->queue_lock);
823
824 rcu_read_lock();
Tejun Heo86cde6b2013-01-09 08:05:10 -0800825 blkg = __blkg_lookup(&blkcg_root, q, false);
826 if (blkg)
827 blkg_free(new_blkg);
828 else
829 blkg = blkg_create(&blkcg_root, q, new_blkg);
Tejun Heoa2b16932012-04-13 13:11:33 -0700830 rcu_read_unlock();
831
Tejun Heo15974992012-06-04 20:40:52 -0700832 if (preloaded)
833 radix_tree_preload_end();
834
Tejun Heoa2b16932012-04-13 13:11:33 -0700835 if (IS_ERR(blkg)) {
836 ret = PTR_ERR(blkg);
837 goto out_unlock;
838 }
839 q->root_blkg = blkg;
Tejun Heoa0516612012-06-26 15:05:44 -0700840 q->root_rl.blkg = blkg;
Tejun Heoa2b16932012-04-13 13:11:33 -0700841
842 list_for_each_entry(blkg, &q->blkg_list, q_node)
843 cnt++;
844
845 spin_unlock_irq(q->queue_lock);
846
847 /* allocate policy_data for all existing blkgs */
848 while (cnt--) {
Tejun Heof95a04a2012-04-16 13:57:26 -0700849 pd = kzalloc_node(pol->pd_size, GFP_KERNEL, q->node);
Tejun Heoa2b16932012-04-13 13:11:33 -0700850 if (!pd) {
851 ret = -ENOMEM;
852 goto out_free;
853 }
854 list_add_tail(&pd->alloc_node, &pds);
855 }
856
857 /*
858 * Install the allocated pds. With @q bypassing, no new blkg
859 * should have been created while the queue lock was dropped.
860 */
861 spin_lock_irq(q->queue_lock);
862
863 list_for_each_entry(blkg, &q->blkg_list, q_node) {
864 if (WARN_ON(list_empty(&pds))) {
865 /* umm... this shouldn't happen, just abort */
866 ret = -ENOMEM;
867 goto out_unlock;
868 }
869 pd = list_first_entry(&pds, struct blkg_policy_data, alloc_node);
870 list_del_init(&pd->alloc_node);
871
872 /* grab blkcg lock too while installing @pd on @blkg */
873 spin_lock(&blkg->blkcg->lock);
874
875 blkg->pd[pol->plid] = pd;
876 pd->blkg = blkg;
Tejun Heof9fcc2d2012-04-16 13:57:27 -0700877 pol->pd_init_fn(blkg);
Tejun Heoa2b16932012-04-13 13:11:33 -0700878
879 spin_unlock(&blkg->blkcg->lock);
880 }
881
882 __set_bit(pol->plid, q->blkcg_pols);
883 ret = 0;
884out_unlock:
885 spin_unlock_irq(q->queue_lock);
886out_free:
887 blk_queue_bypass_end(q);
888 list_for_each_entry_safe(pd, n, &pds, alloc_node)
889 kfree(pd);
890 return ret;
891}
892EXPORT_SYMBOL_GPL(blkcg_activate_policy);
893
894/**
895 * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue
896 * @q: request_queue of interest
897 * @pol: blkcg policy to deactivate
898 *
899 * Deactivate @pol on @q. Follows the same synchronization rules as
900 * blkcg_activate_policy().
901 */
902void blkcg_deactivate_policy(struct request_queue *q,
Tejun Heo3c798392012-04-16 13:57:25 -0700903 const struct blkcg_policy *pol)
Tejun Heoa2b16932012-04-13 13:11:33 -0700904{
Tejun Heo3c798392012-04-16 13:57:25 -0700905 struct blkcg_gq *blkg;
Tejun Heoa2b16932012-04-13 13:11:33 -0700906
907 if (!blkcg_policy_enabled(q, pol))
908 return;
909
910 blk_queue_bypass_start(q);
911 spin_lock_irq(q->queue_lock);
912
913 __clear_bit(pol->plid, q->blkcg_pols);
914
Tejun Heo6d18b002012-04-13 13:11:35 -0700915 /* if no policy is left, no need for blkgs - shoot them down */
916 if (bitmap_empty(q->blkcg_pols, BLKCG_MAX_POLS))
917 blkg_destroy_all(q);
918
Tejun Heoa2b16932012-04-13 13:11:33 -0700919 list_for_each_entry(blkg, &q->blkg_list, q_node) {
920 /* grab blkcg lock too while removing @pd from @blkg */
921 spin_lock(&blkg->blkcg->lock);
922
Tejun Heof9fcc2d2012-04-16 13:57:27 -0700923 if (pol->pd_exit_fn)
924 pol->pd_exit_fn(blkg);
Tejun Heoa2b16932012-04-13 13:11:33 -0700925
926 kfree(blkg->pd[pol->plid]);
927 blkg->pd[pol->plid] = NULL;
928
929 spin_unlock(&blkg->blkcg->lock);
930 }
931
932 spin_unlock_irq(q->queue_lock);
933 blk_queue_bypass_end(q);
934}
935EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
936
937/**
Tejun Heo3c798392012-04-16 13:57:25 -0700938 * blkcg_policy_register - register a blkcg policy
939 * @pol: blkcg policy to register
Tejun Heo8bd435b2012-04-13 13:11:28 -0700940 *
Tejun Heo3c798392012-04-16 13:57:25 -0700941 * Register @pol with blkcg core. Might sleep and @pol may be modified on
942 * successful registration. Returns 0 on success and -errno on failure.
Tejun Heo8bd435b2012-04-13 13:11:28 -0700943 */
Tejun Heo3c798392012-04-16 13:57:25 -0700944int blkcg_policy_register(struct blkcg_policy *pol)
Vivek Goyal3e252062009-12-04 10:36:42 -0500945{
Tejun Heo8bd435b2012-04-13 13:11:28 -0700946 int i, ret;
Tejun Heoe8989fa2012-03-05 13:15:20 -0800947
Tejun Heof95a04a2012-04-16 13:57:26 -0700948 if (WARN_ON(pol->pd_size < sizeof(struct blkg_policy_data)))
949 return -EINVAL;
950
Tejun Heobc0d6502012-04-13 13:11:26 -0700951 mutex_lock(&blkcg_pol_mutex);
952
Tejun Heo8bd435b2012-04-13 13:11:28 -0700953 /* find an empty slot */
954 ret = -ENOSPC;
955 for (i = 0; i < BLKCG_MAX_POLS; i++)
Tejun Heo3c798392012-04-16 13:57:25 -0700956 if (!blkcg_policy[i])
Tejun Heo8bd435b2012-04-13 13:11:28 -0700957 break;
958 if (i >= BLKCG_MAX_POLS)
959 goto out_unlock;
Tejun Heo035d10b2012-03-05 13:15:04 -0800960
Tejun Heo8bd435b2012-04-13 13:11:28 -0700961 /* register and update blkgs */
Tejun Heo3c798392012-04-16 13:57:25 -0700962 pol->plid = i;
963 blkcg_policy[i] = pol;
Tejun Heo8bd435b2012-04-13 13:11:28 -0700964
Tejun Heo8bd435b2012-04-13 13:11:28 -0700965 /* everything is in place, add intf files for the new policy */
Tejun Heo3c798392012-04-16 13:57:25 -0700966 if (pol->cftypes)
967 WARN_ON(cgroup_add_cftypes(&blkio_subsys, pol->cftypes));
Tejun Heo8bd435b2012-04-13 13:11:28 -0700968 ret = 0;
969out_unlock:
Tejun Heobc0d6502012-04-13 13:11:26 -0700970 mutex_unlock(&blkcg_pol_mutex);
Tejun Heo8bd435b2012-04-13 13:11:28 -0700971 return ret;
Vivek Goyal3e252062009-12-04 10:36:42 -0500972}
Tejun Heo3c798392012-04-16 13:57:25 -0700973EXPORT_SYMBOL_GPL(blkcg_policy_register);
Vivek Goyal3e252062009-12-04 10:36:42 -0500974
Tejun Heo8bd435b2012-04-13 13:11:28 -0700975/**
Tejun Heo3c798392012-04-16 13:57:25 -0700976 * blkcg_policy_unregister - unregister a blkcg policy
977 * @pol: blkcg policy to unregister
Tejun Heo8bd435b2012-04-13 13:11:28 -0700978 *
Tejun Heo3c798392012-04-16 13:57:25 -0700979 * Undo blkcg_policy_register(@pol). Might sleep.
Tejun Heo8bd435b2012-04-13 13:11:28 -0700980 */
Tejun Heo3c798392012-04-16 13:57:25 -0700981void blkcg_policy_unregister(struct blkcg_policy *pol)
Vivek Goyal3e252062009-12-04 10:36:42 -0500982{
Tejun Heobc0d6502012-04-13 13:11:26 -0700983 mutex_lock(&blkcg_pol_mutex);
984
Tejun Heo3c798392012-04-16 13:57:25 -0700985 if (WARN_ON(blkcg_policy[pol->plid] != pol))
Tejun Heo8bd435b2012-04-13 13:11:28 -0700986 goto out_unlock;
987
988 /* kill the intf files first */
Tejun Heo3c798392012-04-16 13:57:25 -0700989 if (pol->cftypes)
990 cgroup_rm_cftypes(&blkio_subsys, pol->cftypes);
Tejun Heo44ea53d2012-04-01 14:38:43 -0700991
Tejun Heo8bd435b2012-04-13 13:11:28 -0700992 /* unregister and update blkgs */
Tejun Heo3c798392012-04-16 13:57:25 -0700993 blkcg_policy[pol->plid] = NULL;
Tejun Heo8bd435b2012-04-13 13:11:28 -0700994out_unlock:
Tejun Heobc0d6502012-04-13 13:11:26 -0700995 mutex_unlock(&blkcg_pol_mutex);
Vivek Goyal3e252062009-12-04 10:36:42 -0500996}
Tejun Heo3c798392012-04-16 13:57:25 -0700997EXPORT_SYMBOL_GPL(blkcg_policy_unregister);