blob: c79a226cc25c2f1c39c640356d77c11ea1a87578 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * net/sched/sch_api.c Packet scheduler API.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 * Fixes:
12 *
13 * Rani Assaf <rani@magic.metawire.com> :980802: JIFFIES and CPU clock sources are repaired.
14 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
15 * Jamal Hadi Salim <hadi@nortelnetworks.com>: 990601: ingress support
16 */
17
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/module.h>
19#include <linux/types.h>
20#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/string.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/errno.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <linux/skbuff.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/init.h>
25#include <linux/proc_fs.h>
26#include <linux/seq_file.h>
27#include <linux/kmod.h>
28#include <linux/list.h>
Patrick McHardy41794772007-03-16 01:19:15 -070029#include <linux/hrtimer.h>
Jarek Poplawski25bfcd52008-08-18 20:53:34 -070030#include <linux/lockdep.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090031#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020033#include <net/net_namespace.h>
Denis V. Lunevb8542722007-12-01 00:21:31 +110034#include <net/sock.h>
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -070035#include <net/netlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <net/pkt_sched.h>
37
Tom Goff7316ae82010-03-19 15:40:13 +000038static int qdisc_notify(struct net *net, struct sk_buff *oskb,
39 struct nlmsghdr *n, u32 clid,
Linus Torvalds1da177e2005-04-16 15:20:36 -070040 struct Qdisc *old, struct Qdisc *new);
Tom Goff7316ae82010-03-19 15:40:13 +000041static int tclass_notify(struct net *net, struct sk_buff *oskb,
42 struct nlmsghdr *n, struct Qdisc *q,
43 unsigned long cl, int event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
45/*
46
47 Short review.
48 -------------
49
50 This file consists of two interrelated parts:
51
52 1. queueing disciplines manager frontend.
53 2. traffic classes manager frontend.
54
55 Generally, queueing discipline ("qdisc") is a black box,
56 which is able to enqueue packets and to dequeue them (when
57 device is ready to send something) in order and at times
58 determined by algorithm hidden in it.
59
60 qdisc's are divided to two categories:
61 - "queues", which have no internal structure visible from outside.
62 - "schedulers", which split all the packets to "traffic classes",
63 using "packet classifiers" (look at cls_api.c)
64
65 In turn, classes may have child qdiscs (as rule, queues)
66 attached to them etc. etc. etc.
67
68 The goal of the routines in this file is to translate
69 information supplied by user in the form of handles
70 to more intelligible for kernel form, to make some sanity
71 checks and part of work, which is common to all qdiscs
72 and to provide rtnetlink notifications.
73
74 All real intelligent work is done inside qdisc modules.
75
76
77
78 Every discipline has two major routines: enqueue and dequeue.
79
80 ---dequeue
81
82 dequeue usually returns a skb to send. It is allowed to return NULL,
83 but it does not mean that queue is empty, it just means that
84 discipline does not want to send anything this time.
85 Queue is really empty if q->q.qlen == 0.
86 For complicated disciplines with multiple queues q->q is not
87 real packet queue, but however q->q.qlen must be valid.
88
89 ---enqueue
90
91 enqueue returns 0, if packet was enqueued successfully.
92 If packet (this one or another one) was dropped, it returns
93 not zero error code.
94 NET_XMIT_DROP - this packet dropped
95 Expected action: do not backoff, but wait until queue will clear.
96 NET_XMIT_CN - probably this packet enqueued, but another one dropped.
97 Expected action: backoff or ignore
98 NET_XMIT_POLICED - dropped by police.
99 Expected action: backoff or error to real-time apps.
100
101 Auxiliary routines:
102
Jarek Poplawski99c0db22008-10-31 00:45:27 -0700103 ---peek
104
105 like dequeue but without removing a packet from the queue
106
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107 ---reset
108
109 returns qdisc to initial state: purge all buffers, clear all
110 timers, counters (except for statistics) etc.
111
112 ---init
113
114 initializes newly created qdisc.
115
116 ---destroy
117
118 destroys resources allocated by init and during lifetime of qdisc.
119
120 ---change
121
122 changes qdisc parameters.
123 */
124
125/* Protects list of registered TC modules. It is pure SMP lock. */
126static DEFINE_RWLOCK(qdisc_mod_lock);
127
128
129/************************************************
130 * Queueing disciplines manipulation. *
131 ************************************************/
132
133
134/* The list of all installed queueing disciplines. */
135
136static struct Qdisc_ops *qdisc_base;
137
Zhi Yong Wu21eb2182014-01-01 04:34:51 +0800138/* Register/unregister queueing discipline */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139
140int register_qdisc(struct Qdisc_ops *qops)
141{
142 struct Qdisc_ops *q, **qp;
143 int rc = -EEXIST;
144
145 write_lock(&qdisc_mod_lock);
146 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
147 if (!strcmp(qops->id, q->id))
148 goto out;
149
150 if (qops->enqueue == NULL)
151 qops->enqueue = noop_qdisc_ops.enqueue;
Jarek Poplawski99c0db22008-10-31 00:45:27 -0700152 if (qops->peek == NULL) {
Jarek Poplawski68fd26b2010-08-09 12:18:48 +0000153 if (qops->dequeue == NULL)
Jarek Poplawski99c0db22008-10-31 00:45:27 -0700154 qops->peek = noop_qdisc_ops.peek;
Jarek Poplawski68fd26b2010-08-09 12:18:48 +0000155 else
156 goto out_einval;
Jarek Poplawski99c0db22008-10-31 00:45:27 -0700157 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158 if (qops->dequeue == NULL)
159 qops->dequeue = noop_qdisc_ops.dequeue;
160
Jarek Poplawski68fd26b2010-08-09 12:18:48 +0000161 if (qops->cl_ops) {
162 const struct Qdisc_class_ops *cops = qops->cl_ops;
163
Jarek Poplawski3e9e5a52010-08-10 22:31:20 +0000164 if (!(cops->get && cops->put && cops->walk && cops->leaf))
Jarek Poplawski68fd26b2010-08-09 12:18:48 +0000165 goto out_einval;
166
167 if (cops->tcf_chain && !(cops->bind_tcf && cops->unbind_tcf))
168 goto out_einval;
169 }
170
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171 qops->next = NULL;
172 *qp = qops;
173 rc = 0;
174out:
175 write_unlock(&qdisc_mod_lock);
176 return rc;
Jarek Poplawski68fd26b2010-08-09 12:18:48 +0000177
178out_einval:
179 rc = -EINVAL;
180 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181}
Patrick McHardy62e3ba12008-01-22 22:10:23 -0800182EXPORT_SYMBOL(register_qdisc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183
184int unregister_qdisc(struct Qdisc_ops *qops)
185{
186 struct Qdisc_ops *q, **qp;
187 int err = -ENOENT;
188
189 write_lock(&qdisc_mod_lock);
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000190 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191 if (q == qops)
192 break;
193 if (q) {
194 *qp = q->next;
195 q->next = NULL;
196 err = 0;
197 }
198 write_unlock(&qdisc_mod_lock);
199 return err;
200}
Patrick McHardy62e3ba12008-01-22 22:10:23 -0800201EXPORT_SYMBOL(unregister_qdisc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202
stephen hemminger6da7c8f2013-08-27 16:19:08 -0700203/* Get default qdisc if not otherwise specified */
204void qdisc_get_default(char *name, size_t len)
205{
206 read_lock(&qdisc_mod_lock);
207 strlcpy(name, default_qdisc_ops->id, len);
208 read_unlock(&qdisc_mod_lock);
209}
210
211static struct Qdisc_ops *qdisc_lookup_default(const char *name)
212{
213 struct Qdisc_ops *q = NULL;
214
215 for (q = qdisc_base; q; q = q->next) {
216 if (!strcmp(name, q->id)) {
217 if (!try_module_get(q->owner))
218 q = NULL;
219 break;
220 }
221 }
222
223 return q;
224}
225
226/* Set new default qdisc to use */
227int qdisc_set_default(const char *name)
228{
229 const struct Qdisc_ops *ops;
230
231 if (!capable(CAP_NET_ADMIN))
232 return -EPERM;
233
234 write_lock(&qdisc_mod_lock);
235 ops = qdisc_lookup_default(name);
236 if (!ops) {
237 /* Not found, drop lock and try to load module */
238 write_unlock(&qdisc_mod_lock);
239 request_module("sch_%s", name);
240 write_lock(&qdisc_mod_lock);
241
242 ops = qdisc_lookup_default(name);
243 }
244
245 if (ops) {
246 /* Set new default */
247 module_put(default_qdisc_ops->owner);
248 default_qdisc_ops = ops;
249 }
250 write_unlock(&qdisc_mod_lock);
251
252 return ops ? 0 : -ENOENT;
253}
254
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255/* We know handle. Find qdisc among all qdisc's attached to device
256 (root qdisc, all its children, children of children etc.)
257 */
258
Hannes Eder6113b742008-11-28 03:06:46 -0800259static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
David S. Miller8123b422008-08-08 23:23:39 -0700260{
261 struct Qdisc *q;
262
263 if (!(root->flags & TCQ_F_BUILTIN) &&
264 root->handle == handle)
265 return root;
266
267 list_for_each_entry(q, &root->list, list) {
268 if (q->handle == handle)
269 return q;
270 }
271 return NULL;
272}
273
Eric Dumazet95dc1922013-12-05 11:12:02 -0800274void qdisc_list_add(struct Qdisc *q)
Jarek Poplawskif6e0b232008-08-22 03:24:05 -0700275{
Eric Dumazet37314362014-03-08 08:01:19 -0800276 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
277 struct Qdisc *root = qdisc_dev(q)->qdisc;
Eric Dumazete57a7842013-12-12 15:41:56 -0800278
Eric Dumazet37314362014-03-08 08:01:19 -0800279 WARN_ON_ONCE(root == &noop_qdisc);
Eric Dumazete57a7842013-12-12 15:41:56 -0800280 list_add_tail(&q->list, &root->list);
Eric Dumazet37314362014-03-08 08:01:19 -0800281 }
Jarek Poplawskif6e0b232008-08-22 03:24:05 -0700282}
Eric Dumazet95dc1922013-12-05 11:12:02 -0800283EXPORT_SYMBOL(qdisc_list_add);
Jarek Poplawskif6e0b232008-08-22 03:24:05 -0700284
285void qdisc_list_del(struct Qdisc *q)
286{
Jarek Poplawskif6486d42008-11-25 13:56:06 -0800287 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS))
Jarek Poplawskif6e0b232008-08-22 03:24:05 -0700288 list_del(&q->list);
Jarek Poplawskif6e0b232008-08-22 03:24:05 -0700289}
290EXPORT_SYMBOL(qdisc_list_del);
291
David S. Milleread81cc2008-07-17 00:50:32 -0700292struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
Patrick McHardy43effa12006-11-29 17:35:48 -0800293{
Jarek Poplawskif6e0b232008-08-22 03:24:05 -0700294 struct Qdisc *q;
295
Patrick McHardyaf356af2009-09-04 06:41:18 +0000296 q = qdisc_match_from_root(dev->qdisc, handle);
297 if (q)
298 goto out;
Jarek Poplawskif6e0b232008-08-22 03:24:05 -0700299
Eric Dumazet24824a02010-10-02 06:11:55 +0000300 if (dev_ingress_queue(dev))
301 q = qdisc_match_from_root(
302 dev_ingress_queue(dev)->qdisc_sleeping,
303 handle);
Jarek Poplawskif6486d42008-11-25 13:56:06 -0800304out:
Jarek Poplawskif6e0b232008-08-22 03:24:05 -0700305 return q;
Patrick McHardy43effa12006-11-29 17:35:48 -0800306}
307
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
309{
310 unsigned long cl;
311 struct Qdisc *leaf;
Eric Dumazet20fea082007-11-14 01:44:41 -0800312 const struct Qdisc_class_ops *cops = p->ops->cl_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313
314 if (cops == NULL)
315 return NULL;
316 cl = cops->get(p, classid);
317
318 if (cl == 0)
319 return NULL;
320 leaf = cops->leaf(p, cl);
321 cops->put(p, cl);
322 return leaf;
323}
324
325/* Find queueing discipline by name */
326
Patrick McHardy1e904742008-01-22 22:11:17 -0800327static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328{
329 struct Qdisc_ops *q = NULL;
330
331 if (kind) {
332 read_lock(&qdisc_mod_lock);
333 for (q = qdisc_base; q; q = q->next) {
Patrick McHardy1e904742008-01-22 22:11:17 -0800334 if (nla_strcmp(kind, q->id) == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335 if (!try_module_get(q->owner))
336 q = NULL;
337 break;
338 }
339 }
340 read_unlock(&qdisc_mod_lock);
341 }
342 return q;
343}
344
Jesper Dangaard Brouer8a8e3d82013-08-14 23:47:11 +0200345/* The linklayer setting were not transferred from iproute2, in older
346 * versions, and the rate tables lookup systems have been dropped in
347 * the kernel. To keep backward compatible with older iproute2 tc
348 * utils, we detect the linklayer setting by detecting if the rate
349 * table were modified.
350 *
351 * For linklayer ATM table entries, the rate table will be aligned to
352 * 48 bytes, thus some table entries will contain the same value. The
353 * mpu (min packet unit) is also encoded into the old rate table, thus
354 * starting from the mpu, we find low and high table entries for
355 * mapping this cell. If these entries contain the same value, when
356 * the rate tables have been modified for linklayer ATM.
357 *
358 * This is done by rounding mpu to the nearest 48 bytes cell/entry,
359 * and then roundup to the next cell, calc the table entry one below,
360 * and compare.
361 */
362static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab)
363{
364 int low = roundup(r->mpu, 48);
365 int high = roundup(low+1, 48);
366 int cell_low = low >> r->cell_log;
367 int cell_high = (high >> r->cell_log) - 1;
368
369 /* rtab is too inaccurate at rates > 100Mbit/s */
370 if ((r->rate > (100000000/8)) || (rtab[0] == 0)) {
371 pr_debug("TC linklayer: Giving up ATM detection\n");
372 return TC_LINKLAYER_ETHERNET;
373 }
374
375 if ((cell_high > cell_low) && (cell_high < 256)
376 && (rtab[cell_low] == rtab[cell_high])) {
377 pr_debug("TC linklayer: Detected ATM, low(%d)=high(%d)=%u\n",
378 cell_low, cell_high, rtab[cell_high]);
379 return TC_LINKLAYER_ATM;
380 }
381 return TC_LINKLAYER_ETHERNET;
382}
383
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384static struct qdisc_rate_table *qdisc_rtab_list;
385
Patrick McHardy1e904742008-01-22 22:11:17 -0800386struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *tab)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387{
388 struct qdisc_rate_table *rtab;
389
Eric Dumazet40edeff2013-06-02 11:15:55 +0000390 if (tab == NULL || r->rate == 0 || r->cell_log == 0 ||
391 nla_len(tab) != TC_RTAB_SIZE)
392 return NULL;
393
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394 for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) {
Eric Dumazet40edeff2013-06-02 11:15:55 +0000395 if (!memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) &&
396 !memcmp(&rtab->data, nla_data(tab), 1024)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397 rtab->refcnt++;
398 return rtab;
399 }
400 }
401
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402 rtab = kmalloc(sizeof(*rtab), GFP_KERNEL);
403 if (rtab) {
404 rtab->rate = *r;
405 rtab->refcnt = 1;
Patrick McHardy1e904742008-01-22 22:11:17 -0800406 memcpy(rtab->data, nla_data(tab), 1024);
Jesper Dangaard Brouer8a8e3d82013-08-14 23:47:11 +0200407 if (r->linklayer == TC_LINKLAYER_UNAWARE)
408 r->linklayer = __detect_linklayer(r, rtab->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409 rtab->next = qdisc_rtab_list;
410 qdisc_rtab_list = rtab;
411 }
412 return rtab;
413}
Patrick McHardy62e3ba12008-01-22 22:10:23 -0800414EXPORT_SYMBOL(qdisc_get_rtab);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415
416void qdisc_put_rtab(struct qdisc_rate_table *tab)
417{
418 struct qdisc_rate_table *rtab, **rtabp;
419
420 if (!tab || --tab->refcnt)
421 return;
422
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000423 for (rtabp = &qdisc_rtab_list;
424 (rtab = *rtabp) != NULL;
425 rtabp = &rtab->next) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 if (rtab == tab) {
427 *rtabp = rtab->next;
428 kfree(rtab);
429 return;
430 }
431 }
432}
Patrick McHardy62e3ba12008-01-22 22:10:23 -0800433EXPORT_SYMBOL(qdisc_put_rtab);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700435static LIST_HEAD(qdisc_stab_list);
436static DEFINE_SPINLOCK(qdisc_stab_lock);
437
438static const struct nla_policy stab_policy[TCA_STAB_MAX + 1] = {
439 [TCA_STAB_BASE] = { .len = sizeof(struct tc_sizespec) },
440 [TCA_STAB_DATA] = { .type = NLA_BINARY },
441};
442
443static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt)
444{
445 struct nlattr *tb[TCA_STAB_MAX + 1];
446 struct qdisc_size_table *stab;
447 struct tc_sizespec *s;
448 unsigned int tsize = 0;
449 u16 *tab = NULL;
450 int err;
451
452 err = nla_parse_nested(tb, TCA_STAB_MAX, opt, stab_policy);
453 if (err < 0)
454 return ERR_PTR(err);
455 if (!tb[TCA_STAB_BASE])
456 return ERR_PTR(-EINVAL);
457
458 s = nla_data(tb[TCA_STAB_BASE]);
459
460 if (s->tsize > 0) {
461 if (!tb[TCA_STAB_DATA])
462 return ERR_PTR(-EINVAL);
463 tab = nla_data(tb[TCA_STAB_DATA]);
464 tsize = nla_len(tb[TCA_STAB_DATA]) / sizeof(u16);
465 }
466
Dan Carpenter00093fa2010-08-14 11:09:49 +0000467 if (tsize != s->tsize || (!tab && tsize > 0))
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700468 return ERR_PTR(-EINVAL);
469
David S. Millerf3b96052008-08-18 22:33:05 -0700470 spin_lock(&qdisc_stab_lock);
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700471
472 list_for_each_entry(stab, &qdisc_stab_list, list) {
473 if (memcmp(&stab->szopts, s, sizeof(*s)))
474 continue;
475 if (tsize > 0 && memcmp(stab->data, tab, tsize * sizeof(u16)))
476 continue;
477 stab->refcnt++;
David S. Millerf3b96052008-08-18 22:33:05 -0700478 spin_unlock(&qdisc_stab_lock);
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700479 return stab;
480 }
481
David S. Millerf3b96052008-08-18 22:33:05 -0700482 spin_unlock(&qdisc_stab_lock);
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700483
484 stab = kmalloc(sizeof(*stab) + tsize * sizeof(u16), GFP_KERNEL);
485 if (!stab)
486 return ERR_PTR(-ENOMEM);
487
488 stab->refcnt = 1;
489 stab->szopts = *s;
490 if (tsize > 0)
491 memcpy(stab->data, tab, tsize * sizeof(u16));
492
David S. Millerf3b96052008-08-18 22:33:05 -0700493 spin_lock(&qdisc_stab_lock);
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700494 list_add_tail(&stab->list, &qdisc_stab_list);
David S. Millerf3b96052008-08-18 22:33:05 -0700495 spin_unlock(&qdisc_stab_lock);
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700496
497 return stab;
498}
499
Eric Dumazeta2da5702011-01-20 03:48:19 +0000500static void stab_kfree_rcu(struct rcu_head *head)
501{
502 kfree(container_of(head, struct qdisc_size_table, rcu));
503}
504
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700505void qdisc_put_stab(struct qdisc_size_table *tab)
506{
507 if (!tab)
508 return;
509
David S. Millerf3b96052008-08-18 22:33:05 -0700510 spin_lock(&qdisc_stab_lock);
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700511
512 if (--tab->refcnt == 0) {
513 list_del(&tab->list);
Eric Dumazeta2da5702011-01-20 03:48:19 +0000514 call_rcu_bh(&tab->rcu, stab_kfree_rcu);
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700515 }
516
David S. Millerf3b96052008-08-18 22:33:05 -0700517 spin_unlock(&qdisc_stab_lock);
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700518}
519EXPORT_SYMBOL(qdisc_put_stab);
520
521static int qdisc_dump_stab(struct sk_buff *skb, struct qdisc_size_table *stab)
522{
523 struct nlattr *nest;
524
525 nest = nla_nest_start(skb, TCA_STAB);
Patrick McHardy3aa46142008-11-20 04:07:14 -0800526 if (nest == NULL)
527 goto nla_put_failure;
David S. Miller1b34ec42012-03-29 05:11:39 -0400528 if (nla_put(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts))
529 goto nla_put_failure;
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700530 nla_nest_end(skb, nest);
531
532 return skb->len;
533
534nla_put_failure:
535 return -1;
536}
537
Eric Dumazeta2da5702011-01-20 03:48:19 +0000538void __qdisc_calculate_pkt_len(struct sk_buff *skb, const struct qdisc_size_table *stab)
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700539{
540 int pkt_len, slot;
541
542 pkt_len = skb->len + stab->szopts.overhead;
543 if (unlikely(!stab->szopts.tsize))
544 goto out;
545
546 slot = pkt_len + stab->szopts.cell_align;
547 if (unlikely(slot < 0))
548 slot = 0;
549
550 slot >>= stab->szopts.cell_log;
551 if (likely(slot < stab->szopts.tsize))
552 pkt_len = stab->data[slot];
553 else
554 pkt_len = stab->data[stab->szopts.tsize - 1] *
555 (slot / stab->szopts.tsize) +
556 stab->data[slot % stab->szopts.tsize];
557
558 pkt_len <<= stab->szopts.size_log;
559out:
560 if (unlikely(pkt_len < 1))
561 pkt_len = 1;
562 qdisc_skb_cb(skb)->pkt_len = pkt_len;
563}
Eric Dumazeta2da5702011-01-20 03:48:19 +0000564EXPORT_SYMBOL(__qdisc_calculate_pkt_len);
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700565
Florian Westphal6e765a02014-06-11 20:35:18 +0200566void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc)
Jarek Poplawskib00355d2009-02-01 01:12:42 -0800567{
568 if (!(qdisc->flags & TCQ_F_WARN_NONWC)) {
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000569 pr_warn("%s: %s qdisc %X: is non-work-conserving?\n",
570 txt, qdisc->ops->id, qdisc->handle >> 16);
Jarek Poplawskib00355d2009-02-01 01:12:42 -0800571 qdisc->flags |= TCQ_F_WARN_NONWC;
572 }
573}
574EXPORT_SYMBOL(qdisc_warn_nonwc);
575
Patrick McHardy41794772007-03-16 01:19:15 -0700576static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
577{
578 struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
David S. Miller2fbd3da2009-09-01 17:59:25 -0700579 timer);
Patrick McHardy41794772007-03-16 01:19:15 -0700580
John Fastabend1e203c12014-10-02 22:43:09 -0700581 rcu_read_lock();
Eric Dumazetfd245a42011-01-20 05:27:16 +0000582 qdisc_unthrottled(wd->qdisc);
David S. Miller8608db02008-08-18 20:51:18 -0700583 __netif_schedule(qdisc_root(wd->qdisc));
John Fastabend1e203c12014-10-02 22:43:09 -0700584 rcu_read_unlock();
Stephen Hemminger19365022007-03-22 12:18:35 -0700585
Patrick McHardy41794772007-03-16 01:19:15 -0700586 return HRTIMER_NORESTART;
587}
588
589void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
590{
Eric Dumazet4a8e3202014-09-20 18:01:30 -0700591 hrtimer_init(&wd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
David S. Miller2fbd3da2009-09-01 17:59:25 -0700592 wd->timer.function = qdisc_watchdog;
Patrick McHardy41794772007-03-16 01:19:15 -0700593 wd->qdisc = qdisc;
594}
595EXPORT_SYMBOL(qdisc_watchdog_init);
596
Jiri Pirko34c5d292013-02-12 00:12:04 +0000597void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires)
Patrick McHardy41794772007-03-16 01:19:15 -0700598{
Jarek Poplawski2540e052008-08-21 05:11:14 -0700599 if (test_bit(__QDISC_STATE_DEACTIVATED,
600 &qdisc_root_sleeping(wd->qdisc)->state))
601 return;
602
Eric Dumazetfd245a42011-01-20 05:27:16 +0000603 qdisc_throttled(wd->qdisc);
Eric Dumazet46baac32012-10-20 00:40:51 +0000604
605 hrtimer_start(&wd->timer,
Jiri Pirko34c5d292013-02-12 00:12:04 +0000606 ns_to_ktime(expires),
Eric Dumazet4a8e3202014-09-20 18:01:30 -0700607 HRTIMER_MODE_ABS_PINNED);
Patrick McHardy41794772007-03-16 01:19:15 -0700608}
Jiri Pirko34c5d292013-02-12 00:12:04 +0000609EXPORT_SYMBOL(qdisc_watchdog_schedule_ns);
Patrick McHardy41794772007-03-16 01:19:15 -0700610
611void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
612{
David S. Miller2fbd3da2009-09-01 17:59:25 -0700613 hrtimer_cancel(&wd->timer);
Eric Dumazetfd245a42011-01-20 05:27:16 +0000614 qdisc_unthrottled(wd->qdisc);
Patrick McHardy41794772007-03-16 01:19:15 -0700615}
616EXPORT_SYMBOL(qdisc_watchdog_cancel);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617
Adrian Bunka94f7792008-07-22 14:20:11 -0700618static struct hlist_head *qdisc_class_hash_alloc(unsigned int n)
Patrick McHardy6fe1c7a2008-07-05 23:21:31 -0700619{
620 unsigned int size = n * sizeof(struct hlist_head), i;
621 struct hlist_head *h;
622
623 if (size <= PAGE_SIZE)
624 h = kmalloc(size, GFP_KERNEL);
625 else
626 h = (struct hlist_head *)
627 __get_free_pages(GFP_KERNEL, get_order(size));
628
629 if (h != NULL) {
630 for (i = 0; i < n; i++)
631 INIT_HLIST_HEAD(&h[i]);
632 }
633 return h;
634}
635
636static void qdisc_class_hash_free(struct hlist_head *h, unsigned int n)
637{
638 unsigned int size = n * sizeof(struct hlist_head);
639
640 if (size <= PAGE_SIZE)
641 kfree(h);
642 else
643 free_pages((unsigned long)h, get_order(size));
644}
645
646void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash)
647{
648 struct Qdisc_class_common *cl;
Sasha Levinb67bfe02013-02-27 17:06:00 -0800649 struct hlist_node *next;
Patrick McHardy6fe1c7a2008-07-05 23:21:31 -0700650 struct hlist_head *nhash, *ohash;
651 unsigned int nsize, nmask, osize;
652 unsigned int i, h;
653
654 /* Rehash when load factor exceeds 0.75 */
655 if (clhash->hashelems * 4 <= clhash->hashsize * 3)
656 return;
657 nsize = clhash->hashsize * 2;
658 nmask = nsize - 1;
659 nhash = qdisc_class_hash_alloc(nsize);
660 if (nhash == NULL)
661 return;
662
663 ohash = clhash->hash;
664 osize = clhash->hashsize;
665
666 sch_tree_lock(sch);
667 for (i = 0; i < osize; i++) {
Sasha Levinb67bfe02013-02-27 17:06:00 -0800668 hlist_for_each_entry_safe(cl, next, &ohash[i], hnode) {
Patrick McHardy6fe1c7a2008-07-05 23:21:31 -0700669 h = qdisc_class_hash(cl->classid, nmask);
670 hlist_add_head(&cl->hnode, &nhash[h]);
671 }
672 }
673 clhash->hash = nhash;
674 clhash->hashsize = nsize;
675 clhash->hashmask = nmask;
676 sch_tree_unlock(sch);
677
678 qdisc_class_hash_free(ohash, osize);
679}
680EXPORT_SYMBOL(qdisc_class_hash_grow);
681
682int qdisc_class_hash_init(struct Qdisc_class_hash *clhash)
683{
684 unsigned int size = 4;
685
686 clhash->hash = qdisc_class_hash_alloc(size);
687 if (clhash->hash == NULL)
688 return -ENOMEM;
689 clhash->hashsize = size;
690 clhash->hashmask = size - 1;
691 clhash->hashelems = 0;
692 return 0;
693}
694EXPORT_SYMBOL(qdisc_class_hash_init);
695
696void qdisc_class_hash_destroy(struct Qdisc_class_hash *clhash)
697{
698 qdisc_class_hash_free(clhash->hash, clhash->hashsize);
699}
700EXPORT_SYMBOL(qdisc_class_hash_destroy);
701
702void qdisc_class_hash_insert(struct Qdisc_class_hash *clhash,
703 struct Qdisc_class_common *cl)
704{
705 unsigned int h;
706
707 INIT_HLIST_NODE(&cl->hnode);
708 h = qdisc_class_hash(cl->classid, clhash->hashmask);
709 hlist_add_head(&cl->hnode, &clhash->hash[h]);
710 clhash->hashelems++;
711}
712EXPORT_SYMBOL(qdisc_class_hash_insert);
713
714void qdisc_class_hash_remove(struct Qdisc_class_hash *clhash,
715 struct Qdisc_class_common *cl)
716{
717 hlist_del(&cl->hnode);
718 clhash->hashelems--;
719}
720EXPORT_SYMBOL(qdisc_class_hash_remove);
721
Eric Dumazetfa0f5aa2012-01-03 00:00:11 +0000722/* Allocate an unique handle from space managed by kernel
723 * Possible range is [8000-FFFF]:0000 (0x8000 values)
724 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725static u32 qdisc_alloc_handle(struct net_device *dev)
726{
Eric Dumazetfa0f5aa2012-01-03 00:00:11 +0000727 int i = 0x8000;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728 static u32 autohandle = TC_H_MAKE(0x80000000U, 0);
729
730 do {
731 autohandle += TC_H_MAKE(0x10000U, 0);
732 if (autohandle == TC_H_MAKE(TC_H_ROOT, 0))
733 autohandle = TC_H_MAKE(0x80000000U, 0);
Eric Dumazetfa0f5aa2012-01-03 00:00:11 +0000734 if (!qdisc_lookup(dev, autohandle))
735 return autohandle;
736 cond_resched();
737 } while (--i > 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738
Eric Dumazetfa0f5aa2012-01-03 00:00:11 +0000739 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740}
741
Patrick McHardy43effa12006-11-29 17:35:48 -0800742void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
743{
Eric Dumazet20fea082007-11-14 01:44:41 -0800744 const struct Qdisc_class_ops *cops;
Patrick McHardy43effa12006-11-29 17:35:48 -0800745 unsigned long cl;
746 u32 parentid;
Eric Dumazet2c8c8e62013-10-07 08:32:32 -0700747 int drops;
Patrick McHardy43effa12006-11-29 17:35:48 -0800748
749 if (n == 0)
750 return;
Eric Dumazet2c8c8e62013-10-07 08:32:32 -0700751 drops = max_t(int, n, 0);
Patrick McHardy43effa12006-11-29 17:35:48 -0800752 while ((parentid = sch->parent)) {
Jarek Poplawski066a3b52008-04-14 15:10:42 -0700753 if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
754 return;
755
David S. Miller5ce2d482008-07-08 17:06:30 -0700756 sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid));
Patrick McHardyffc8fef2007-07-30 17:11:50 -0700757 if (sch == NULL) {
758 WARN_ON(parentid != TC_H_ROOT);
759 return;
760 }
Patrick McHardy43effa12006-11-29 17:35:48 -0800761 cops = sch->ops->cl_ops;
762 if (cops->qlen_notify) {
763 cl = cops->get(sch, parentid);
764 cops->qlen_notify(sch, cl);
765 cops->put(sch, cl);
766 }
767 sch->q.qlen -= n;
John Fastabend25331d62014-09-28 11:53:29 -0700768 __qdisc_qstats_drop(sch, drops);
Patrick McHardy43effa12006-11-29 17:35:48 -0800769 }
770}
771EXPORT_SYMBOL(qdisc_tree_decrease_qlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772
Tom Goff7316ae82010-03-19 15:40:13 +0000773static void notify_and_destroy(struct net *net, struct sk_buff *skb,
774 struct nlmsghdr *n, u32 clid,
David S. Miller99194cf2008-07-17 04:54:10 -0700775 struct Qdisc *old, struct Qdisc *new)
776{
777 if (new || old)
Tom Goff7316ae82010-03-19 15:40:13 +0000778 qdisc_notify(net, skb, n, clid, old, new);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779
David S. Miller4d8863a2008-08-18 21:03:15 -0700780 if (old)
David S. Miller99194cf2008-07-17 04:54:10 -0700781 qdisc_destroy(old);
David S. Miller99194cf2008-07-17 04:54:10 -0700782}
783
784/* Graft qdisc "new" to class "classid" of qdisc "parent" or
785 * to device "dev".
786 *
787 * When appropriate send a netlink notification using 'skb'
788 * and "n".
789 *
790 * On success, destroy old qdisc.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791 */
792
793static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
David S. Miller99194cf2008-07-17 04:54:10 -0700794 struct sk_buff *skb, struct nlmsghdr *n, u32 classid,
795 struct Qdisc *new, struct Qdisc *old)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796{
David S. Miller99194cf2008-07-17 04:54:10 -0700797 struct Qdisc *q = old;
Tom Goff7316ae82010-03-19 15:40:13 +0000798 struct net *net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900801 if (parent == NULL) {
David S. Miller99194cf2008-07-17 04:54:10 -0700802 unsigned int i, num_q, ingress;
803
804 ingress = 0;
805 num_q = dev->num_tx_queues;
David S. Miller8d50b532008-07-30 02:37:46 -0700806 if ((q && q->flags & TCQ_F_INGRESS) ||
807 (new && new->flags & TCQ_F_INGRESS)) {
David S. Miller99194cf2008-07-17 04:54:10 -0700808 num_q = 1;
809 ingress = 1;
Eric Dumazet24824a02010-10-02 06:11:55 +0000810 if (!dev_ingress_queue(dev))
811 return -ENOENT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812 }
David S. Miller99194cf2008-07-17 04:54:10 -0700813
814 if (dev->flags & IFF_UP)
815 dev_deactivate(dev);
816
David S. Miller6ec1c692009-09-06 01:58:51 -0700817 if (new && new->ops->attach) {
818 new->ops->attach(new);
819 num_q = 0;
820 }
821
David S. Miller99194cf2008-07-17 04:54:10 -0700822 for (i = 0; i < num_q; i++) {
Eric Dumazet24824a02010-10-02 06:11:55 +0000823 struct netdev_queue *dev_queue = dev_ingress_queue(dev);
David S. Miller99194cf2008-07-17 04:54:10 -0700824
825 if (!ingress)
826 dev_queue = netdev_get_tx_queue(dev, i);
827
David S. Miller8d50b532008-07-30 02:37:46 -0700828 old = dev_graft_qdisc(dev_queue, new);
829 if (new && i > 0)
830 atomic_inc(&new->refcnt);
831
Jarek Poplawski036d6a62009-09-13 22:35:44 +0000832 if (!ingress)
833 qdisc_destroy(old);
David S. Miller99194cf2008-07-17 04:54:10 -0700834 }
835
Jarek Poplawski036d6a62009-09-13 22:35:44 +0000836 if (!ingress) {
Tom Goff7316ae82010-03-19 15:40:13 +0000837 notify_and_destroy(net, skb, n, classid,
838 dev->qdisc, new);
Jarek Poplawski036d6a62009-09-13 22:35:44 +0000839 if (new && !new->ops->attach)
840 atomic_inc(&new->refcnt);
841 dev->qdisc = new ? : &noop_qdisc;
842 } else {
Tom Goff7316ae82010-03-19 15:40:13 +0000843 notify_and_destroy(net, skb, n, classid, old, new);
Jarek Poplawski036d6a62009-09-13 22:35:44 +0000844 }
Patrick McHardyaf356af2009-09-04 06:41:18 +0000845
David S. Miller99194cf2008-07-17 04:54:10 -0700846 if (dev->flags & IFF_UP)
847 dev_activate(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848 } else {
Eric Dumazet20fea082007-11-14 01:44:41 -0800849 const struct Qdisc_class_ops *cops = parent->ops->cl_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850
Patrick McHardyc9f1d032009-09-04 06:41:13 +0000851 err = -EOPNOTSUPP;
852 if (cops && cops->graft) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853 unsigned long cl = cops->get(parent, classid);
854 if (cl) {
David S. Miller99194cf2008-07-17 04:54:10 -0700855 err = cops->graft(parent, cl, new, &old);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856 cops->put(parent, cl);
Patrick McHardyc9f1d032009-09-04 06:41:13 +0000857 } else
858 err = -ENOENT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859 }
David S. Miller99194cf2008-07-17 04:54:10 -0700860 if (!err)
Tom Goff7316ae82010-03-19 15:40:13 +0000861 notify_and_destroy(net, skb, n, classid, old, new);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862 }
863 return err;
864}
865
Jarek Poplawski25bfcd52008-08-18 20:53:34 -0700866/* lockdep annotation is needed for ingress; egress gets it only for name */
867static struct lock_class_key qdisc_tx_lock;
868static struct lock_class_key qdisc_rx_lock;
869
Linus Torvalds1da177e2005-04-16 15:20:36 -0700870/*
871 Allocate and initialize new qdisc.
872
873 Parameters are passed via opt.
874 */
875
876static struct Qdisc *
David S. Millerbb949fb2008-07-08 16:55:56 -0700877qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
Patrick McHardy23bcf632009-09-09 18:11:23 -0700878 struct Qdisc *p, u32 parent, u32 handle,
879 struct nlattr **tca, int *errp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880{
881 int err;
Patrick McHardy1e904742008-01-22 22:11:17 -0800882 struct nlattr *kind = tca[TCA_KIND];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883 struct Qdisc *sch;
884 struct Qdisc_ops *ops;
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700885 struct qdisc_size_table *stab;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886
887 ops = qdisc_lookup_ops(kind);
Johannes Berg95a5afc2008-10-16 15:24:51 -0700888#ifdef CONFIG_MODULES
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889 if (ops == NULL && kind != NULL) {
890 char name[IFNAMSIZ];
Patrick McHardy1e904742008-01-22 22:11:17 -0800891 if (nla_strlcpy(name, kind, IFNAMSIZ) < IFNAMSIZ) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892 /* We dropped the RTNL semaphore in order to
893 * perform the module load. So, even if we
894 * succeeded in loading the module we have to
895 * tell the caller to replay the request. We
896 * indicate this using -EAGAIN.
897 * We replay the request because the device may
898 * go away in the mean time.
899 */
900 rtnl_unlock();
901 request_module("sch_%s", name);
902 rtnl_lock();
903 ops = qdisc_lookup_ops(kind);
904 if (ops != NULL) {
905 /* We will try again qdisc_lookup_ops,
906 * so don't keep a reference.
907 */
908 module_put(ops->owner);
909 err = -EAGAIN;
910 goto err_out;
911 }
912 }
913 }
914#endif
915
Jamal Hadi Salimb9e2cc02006-08-03 16:36:51 -0700916 err = -ENOENT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917 if (ops == NULL)
918 goto err_out;
919
David S. Miller5ce2d482008-07-08 17:06:30 -0700920 sch = qdisc_alloc(dev_queue, ops);
Thomas Graf3d54b822005-07-05 14:15:09 -0700921 if (IS_ERR(sch)) {
922 err = PTR_ERR(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923 goto err_out2;
Thomas Graf3d54b822005-07-05 14:15:09 -0700924 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925
Patrick McHardyffc8fef2007-07-30 17:11:50 -0700926 sch->parent = parent;
927
Thomas Graf3d54b822005-07-05 14:15:09 -0700928 if (handle == TC_H_INGRESS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929 sch->flags |= TCQ_F_INGRESS;
Thomas Graf3d54b822005-07-05 14:15:09 -0700930 handle = TC_H_MAKE(TC_H_INGRESS, 0);
Jarek Poplawski25bfcd52008-08-18 20:53:34 -0700931 lockdep_set_class(qdisc_lock(sch), &qdisc_rx_lock);
Patrick McHardyfd44de72007-04-16 17:07:08 -0700932 } else {
Patrick McHardyfd44de72007-04-16 17:07:08 -0700933 if (handle == 0) {
934 handle = qdisc_alloc_handle(dev);
935 err = -ENOMEM;
936 if (handle == 0)
937 goto err_out3;
938 }
Jarek Poplawski25bfcd52008-08-18 20:53:34 -0700939 lockdep_set_class(qdisc_lock(sch), &qdisc_tx_lock);
Eric Dumazet1abbe132012-12-11 15:54:33 +0000940 if (!netif_is_multiqueue(dev))
941 sch->flags |= TCQ_F_ONETXQUEUE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942 }
943
Thomas Graf3d54b822005-07-05 14:15:09 -0700944 sch->handle = handle;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945
Patrick McHardy1e904742008-01-22 22:11:17 -0800946 if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) {
John Fastabend22e0f8b2014-09-28 11:52:56 -0700947 if (qdisc_is_percpu_stats(sch)) {
948 sch->cpu_bstats =
949 alloc_percpu(struct gnet_stats_basic_cpu);
950 if (!sch->cpu_bstats)
951 goto err_out4;
John Fastabendb0ab6f92014-09-28 11:54:24 -0700952
953 sch->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
954 if (!sch->cpu_qstats)
955 goto err_out4;
John Fastabend22e0f8b2014-09-28 11:52:56 -0700956 }
957
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700958 if (tca[TCA_STAB]) {
959 stab = qdisc_get_stab(tca[TCA_STAB]);
960 if (IS_ERR(stab)) {
961 err = PTR_ERR(stab);
Jarek Poplawski7c64b9f2009-09-15 23:42:05 -0700962 goto err_out4;
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700963 }
Eric Dumazeta2da5702011-01-20 03:48:19 +0000964 rcu_assign_pointer(sch->stab, stab);
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700965 }
Patrick McHardy1e904742008-01-22 22:11:17 -0800966 if (tca[TCA_RATE]) {
Jarek Poplawskif6f9b932008-08-27 02:25:17 -0700967 spinlock_t *root_lock;
968
Patrick McHardy23bcf632009-09-09 18:11:23 -0700969 err = -EOPNOTSUPP;
970 if (sch->flags & TCQ_F_MQROOT)
971 goto err_out4;
972
Jarek Poplawskif6f9b932008-08-27 02:25:17 -0700973 if ((sch->parent != TC_H_ROOT) &&
Patrick McHardy23bcf632009-09-09 18:11:23 -0700974 !(sch->flags & TCQ_F_INGRESS) &&
975 (!p || !(p->flags & TCQ_F_MQROOT)))
Jarek Poplawskif6f9b932008-08-27 02:25:17 -0700976 root_lock = qdisc_root_sleeping_lock(sch);
977 else
978 root_lock = qdisc_lock(sch);
979
John Fastabend22e0f8b2014-09-28 11:52:56 -0700980 err = gen_new_estimator(&sch->bstats,
981 sch->cpu_bstats,
982 &sch->rate_est,
983 root_lock,
984 tca[TCA_RATE]);
Patrick McHardy23bcf632009-09-09 18:11:23 -0700985 if (err)
986 goto err_out4;
Thomas Graf023e09a2005-07-05 14:15:53 -0700987 }
Jarek Poplawskif6e0b232008-08-22 03:24:05 -0700988
989 qdisc_list_add(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991 return sch;
992 }
993err_out3:
994 dev_put(dev);
Thomas Graf3d54b822005-07-05 14:15:09 -0700995 kfree((char *) sch - sch->padded);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996err_out2:
997 module_put(ops->owner);
998err_out:
999 *errp = err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000 return NULL;
Patrick McHardy23bcf632009-09-09 18:11:23 -07001001
1002err_out4:
John Fastabend22e0f8b2014-09-28 11:52:56 -07001003 free_percpu(sch->cpu_bstats);
John Fastabendb0ab6f92014-09-28 11:54:24 -07001004 free_percpu(sch->cpu_qstats);
Patrick McHardy23bcf632009-09-09 18:11:23 -07001005 /*
1006 * Any broken qdiscs that would require a ops->reset() here?
1007 * The qdisc was never in action so it shouldn't be necessary.
1008 */
Eric Dumazeta2da5702011-01-20 03:48:19 +00001009 qdisc_put_stab(rtnl_dereference(sch->stab));
Patrick McHardy23bcf632009-09-09 18:11:23 -07001010 if (ops->destroy)
1011 ops->destroy(sch);
1012 goto err_out3;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013}
1014
Patrick McHardy1e904742008-01-22 22:11:17 -08001015static int qdisc_change(struct Qdisc *sch, struct nlattr **tca)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016{
Eric Dumazeta2da5702011-01-20 03:48:19 +00001017 struct qdisc_size_table *ostab, *stab = NULL;
Jussi Kivilinna175f9c12008-07-20 00:08:47 -07001018 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001019
Jussi Kivilinna175f9c12008-07-20 00:08:47 -07001020 if (tca[TCA_OPTIONS]) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021 if (sch->ops->change == NULL)
1022 return -EINVAL;
Patrick McHardy1e904742008-01-22 22:11:17 -08001023 err = sch->ops->change(sch, tca[TCA_OPTIONS]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001024 if (err)
1025 return err;
1026 }
Jussi Kivilinna175f9c12008-07-20 00:08:47 -07001027
1028 if (tca[TCA_STAB]) {
1029 stab = qdisc_get_stab(tca[TCA_STAB]);
1030 if (IS_ERR(stab))
1031 return PTR_ERR(stab);
1032 }
1033
Eric Dumazeta2da5702011-01-20 03:48:19 +00001034 ostab = rtnl_dereference(sch->stab);
1035 rcu_assign_pointer(sch->stab, stab);
1036 qdisc_put_stab(ostab);
Jussi Kivilinna175f9c12008-07-20 00:08:47 -07001037
Patrick McHardy23bcf632009-09-09 18:11:23 -07001038 if (tca[TCA_RATE]) {
Stephen Hemminger71bcb092008-11-25 21:13:31 -08001039 /* NB: ignores errors from replace_estimator
1040 because change can't be undone. */
Patrick McHardy23bcf632009-09-09 18:11:23 -07001041 if (sch->flags & TCQ_F_MQROOT)
1042 goto out;
John Fastabend22e0f8b2014-09-28 11:52:56 -07001043 gen_replace_estimator(&sch->bstats,
1044 sch->cpu_bstats,
1045 &sch->rate_est,
1046 qdisc_root_sleeping_lock(sch),
1047 tca[TCA_RATE]);
Patrick McHardy23bcf632009-09-09 18:11:23 -07001048 }
1049out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001050 return 0;
1051}
1052
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001053struct check_loop_arg {
1054 struct qdisc_walker w;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055 struct Qdisc *p;
1056 int depth;
1057};
1058
1059static int check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w);
1060
1061static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth)
1062{
1063 struct check_loop_arg arg;
1064
1065 if (q->ops->cl_ops == NULL)
1066 return 0;
1067
1068 arg.w.stop = arg.w.skip = arg.w.count = 0;
1069 arg.w.fn = check_loop_fn;
1070 arg.depth = depth;
1071 arg.p = p;
1072 q->ops->cl_ops->walk(q, &arg.w);
1073 return arg.w.stop ? -ELOOP : 0;
1074}
1075
1076static int
1077check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
1078{
1079 struct Qdisc *leaf;
Eric Dumazet20fea082007-11-14 01:44:41 -08001080 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001081 struct check_loop_arg *arg = (struct check_loop_arg *)w;
1082
1083 leaf = cops->leaf(q, cl);
1084 if (leaf) {
1085 if (leaf == arg->p || arg->depth > 7)
1086 return -ELOOP;
1087 return check_loop(leaf, arg->p, arg->depth + 1);
1088 }
1089 return 0;
1090}
1091
1092/*
1093 * Delete/get qdisc.
1094 */
1095
Thomas Graf661d2962013-03-21 07:45:29 +00001096static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001097{
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001098 struct net *net = sock_net(skb->sk);
David S. Miller02ef22c2012-06-26 21:50:05 -07001099 struct tcmsg *tcm = nlmsg_data(n);
Patrick McHardy1e904742008-01-22 22:11:17 -08001100 struct nlattr *tca[TCA_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101 struct net_device *dev;
Hong zhi guode179c82013-03-25 17:36:33 +00001102 u32 clid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103 struct Qdisc *q = NULL;
1104 struct Qdisc *p = NULL;
1105 int err;
1106
Stéphane Graber4e8bbb82014-04-30 11:25:43 -04001107 if ((n->nlmsg_type != RTM_GETQDISC) &&
David S. Miller5f013c9b2014-05-12 13:19:14 -04001108 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
Eric W. Biedermandfc47ef2012-11-16 03:03:00 +00001109 return -EPERM;
1110
Patrick McHardy1e904742008-01-22 22:11:17 -08001111 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
1112 if (err < 0)
1113 return err;
1114
Hong zhi guode179c82013-03-25 17:36:33 +00001115 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1116 if (!dev)
1117 return -ENODEV;
1118
1119 clid = tcm->tcm_parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001120 if (clid) {
1121 if (clid != TC_H_ROOT) {
1122 if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) {
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001123 p = qdisc_lookup(dev, TC_H_MAJ(clid));
1124 if (!p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001125 return -ENOENT;
1126 q = qdisc_leaf(p, clid);
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001127 } else if (dev_ingress_queue(dev)) {
1128 q = dev_ingress_queue(dev)->qdisc_sleeping;
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +09001129 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130 } else {
Patrick McHardyaf356af2009-09-04 06:41:18 +00001131 q = dev->qdisc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132 }
1133 if (!q)
1134 return -ENOENT;
1135
1136 if (tcm->tcm_handle && q->handle != tcm->tcm_handle)
1137 return -EINVAL;
1138 } else {
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001139 q = qdisc_lookup(dev, tcm->tcm_handle);
1140 if (!q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141 return -ENOENT;
1142 }
1143
Patrick McHardy1e904742008-01-22 22:11:17 -08001144 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145 return -EINVAL;
1146
1147 if (n->nlmsg_type == RTM_DELQDISC) {
1148 if (!clid)
1149 return -EINVAL;
1150 if (q->handle == 0)
1151 return -ENOENT;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001152 err = qdisc_graft(dev, p, skb, n, clid, NULL, q);
1153 if (err != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001155 } else {
Tom Goff7316ae82010-03-19 15:40:13 +00001156 qdisc_notify(net, skb, n, clid, NULL, q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001157 }
1158 return 0;
1159}
1160
1161/*
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001162 * Create/change qdisc.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163 */
1164
Thomas Graf661d2962013-03-21 07:45:29 +00001165static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001166{
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001167 struct net *net = sock_net(skb->sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168 struct tcmsg *tcm;
Patrick McHardy1e904742008-01-22 22:11:17 -08001169 struct nlattr *tca[TCA_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001170 struct net_device *dev;
1171 u32 clid;
1172 struct Qdisc *q, *p;
1173 int err;
1174
David S. Miller5f013c9b2014-05-12 13:19:14 -04001175 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
Eric W. Biedermandfc47ef2012-11-16 03:03:00 +00001176 return -EPERM;
1177
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178replay:
1179 /* Reinit, just in case something touches this. */
Hong zhi guode179c82013-03-25 17:36:33 +00001180 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
1181 if (err < 0)
1182 return err;
1183
David S. Miller02ef22c2012-06-26 21:50:05 -07001184 tcm = nlmsg_data(n);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185 clid = tcm->tcm_parent;
1186 q = p = NULL;
1187
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001188 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1189 if (!dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190 return -ENODEV;
1191
Patrick McHardy1e904742008-01-22 22:11:17 -08001192
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193 if (clid) {
1194 if (clid != TC_H_ROOT) {
1195 if (clid != TC_H_INGRESS) {
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001196 p = qdisc_lookup(dev, TC_H_MAJ(clid));
1197 if (!p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198 return -ENOENT;
1199 q = qdisc_leaf(p, clid);
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001200 } else if (dev_ingress_queue_create(dev)) {
1201 q = dev_ingress_queue(dev)->qdisc_sleeping;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202 }
1203 } else {
Patrick McHardyaf356af2009-09-04 06:41:18 +00001204 q = dev->qdisc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205 }
1206
1207 /* It may be default qdisc, ignore it */
1208 if (q && q->handle == 0)
1209 q = NULL;
1210
1211 if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) {
1212 if (tcm->tcm_handle) {
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001213 if (q && !(n->nlmsg_flags & NLM_F_REPLACE))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214 return -EEXIST;
1215 if (TC_H_MIN(tcm->tcm_handle))
1216 return -EINVAL;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001217 q = qdisc_lookup(dev, tcm->tcm_handle);
1218 if (!q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219 goto create_n_graft;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001220 if (n->nlmsg_flags & NLM_F_EXCL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221 return -EEXIST;
Patrick McHardy1e904742008-01-22 22:11:17 -08001222 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223 return -EINVAL;
1224 if (q == p ||
1225 (p && check_loop(q, p, 0)))
1226 return -ELOOP;
1227 atomic_inc(&q->refcnt);
1228 goto graft;
1229 } else {
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001230 if (!q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001231 goto create_n_graft;
1232
1233 /* This magic test requires explanation.
1234 *
1235 * We know, that some child q is already
1236 * attached to this parent and have choice:
1237 * either to change it or to create/graft new one.
1238 *
1239 * 1. We are allowed to create/graft only
1240 * if CREATE and REPLACE flags are set.
1241 *
1242 * 2. If EXCL is set, requestor wanted to say,
1243 * that qdisc tcm_handle is not expected
1244 * to exist, so that we choose create/graft too.
1245 *
1246 * 3. The last case is when no flags are set.
1247 * Alas, it is sort of hole in API, we
1248 * cannot decide what to do unambiguously.
1249 * For now we select create/graft, if
1250 * user gave KIND, which does not match existing.
1251 */
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001252 if ((n->nlmsg_flags & NLM_F_CREATE) &&
1253 (n->nlmsg_flags & NLM_F_REPLACE) &&
1254 ((n->nlmsg_flags & NLM_F_EXCL) ||
Patrick McHardy1e904742008-01-22 22:11:17 -08001255 (tca[TCA_KIND] &&
1256 nla_strcmp(tca[TCA_KIND], q->ops->id))))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257 goto create_n_graft;
1258 }
1259 }
1260 } else {
1261 if (!tcm->tcm_handle)
1262 return -EINVAL;
1263 q = qdisc_lookup(dev, tcm->tcm_handle);
1264 }
1265
1266 /* Change qdisc parameters */
1267 if (q == NULL)
1268 return -ENOENT;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001269 if (n->nlmsg_flags & NLM_F_EXCL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270 return -EEXIST;
Patrick McHardy1e904742008-01-22 22:11:17 -08001271 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272 return -EINVAL;
1273 err = qdisc_change(q, tca);
1274 if (err == 0)
Tom Goff7316ae82010-03-19 15:40:13 +00001275 qdisc_notify(net, skb, n, clid, NULL, q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001276 return err;
1277
1278create_n_graft:
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001279 if (!(n->nlmsg_flags & NLM_F_CREATE))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001280 return -ENOENT;
Eric Dumazet24824a02010-10-02 06:11:55 +00001281 if (clid == TC_H_INGRESS) {
1282 if (dev_ingress_queue(dev))
1283 q = qdisc_create(dev, dev_ingress_queue(dev), p,
1284 tcm->tcm_parent, tcm->tcm_parent,
1285 tca, &err);
1286 else
1287 err = -ENOENT;
1288 } else {
Jarek Poplawski926e61b2009-09-15 02:53:07 -07001289 struct netdev_queue *dev_queue;
David S. Miller6ec1c692009-09-06 01:58:51 -07001290
1291 if (p && p->ops->cl_ops && p->ops->cl_ops->select_queue)
Jarek Poplawski926e61b2009-09-15 02:53:07 -07001292 dev_queue = p->ops->cl_ops->select_queue(p, tcm);
1293 else if (p)
1294 dev_queue = p->dev_queue;
1295 else
1296 dev_queue = netdev_get_tx_queue(dev, 0);
David S. Miller6ec1c692009-09-06 01:58:51 -07001297
Jarek Poplawski926e61b2009-09-15 02:53:07 -07001298 q = qdisc_create(dev, dev_queue, p,
David S. Millerbb949fb2008-07-08 16:55:56 -07001299 tcm->tcm_parent, tcm->tcm_handle,
Patrick McHardyffc8fef2007-07-30 17:11:50 -07001300 tca, &err);
David S. Miller6ec1c692009-09-06 01:58:51 -07001301 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001302 if (q == NULL) {
1303 if (err == -EAGAIN)
1304 goto replay;
1305 return err;
1306 }
1307
1308graft:
Ilpo Järvinene5befbd2008-08-18 22:30:01 -07001309 err = qdisc_graft(dev, p, skb, n, clid, q, NULL);
1310 if (err) {
1311 if (q)
1312 qdisc_destroy(q);
1313 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001314 }
Ilpo Järvinene5befbd2008-08-18 22:30:01 -07001315
Linus Torvalds1da177e2005-04-16 15:20:36 -07001316 return 0;
1317}
1318
1319static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
Eric W. Biederman15e47302012-09-07 20:12:54 +00001320 u32 portid, u32 seq, u16 flags, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321{
John Fastabend22e0f8b2014-09-28 11:52:56 -07001322 struct gnet_stats_basic_cpu __percpu *cpu_bstats = NULL;
John Fastabendb0ab6f92014-09-28 11:54:24 -07001323 struct gnet_stats_queue __percpu *cpu_qstats = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001324 struct tcmsg *tcm;
1325 struct nlmsghdr *nlh;
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001326 unsigned char *b = skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327 struct gnet_dump d;
Eric Dumazeta2da5702011-01-20 03:48:19 +00001328 struct qdisc_size_table *stab;
John Fastabend64015852014-09-28 11:53:57 -07001329 __u32 qlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001330
Eric Dumazetfba373d2014-03-10 17:11:43 -07001331 cond_resched();
Eric W. Biederman15e47302012-09-07 20:12:54 +00001332 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
David S. Miller02ef22c2012-06-26 21:50:05 -07001333 if (!nlh)
1334 goto out_nlmsg_trim;
1335 tcm = nlmsg_data(nlh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001336 tcm->tcm_family = AF_UNSPEC;
Patrick McHardy9ef1d4c2005-06-28 12:55:30 -07001337 tcm->tcm__pad1 = 0;
1338 tcm->tcm__pad2 = 0;
David S. Miller5ce2d482008-07-08 17:06:30 -07001339 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001340 tcm->tcm_parent = clid;
1341 tcm->tcm_handle = q->handle;
1342 tcm->tcm_info = atomic_read(&q->refcnt);
David S. Miller1b34ec42012-03-29 05:11:39 -04001343 if (nla_put_string(skb, TCA_KIND, q->ops->id))
1344 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001345 if (q->ops->dump && q->ops->dump(q, skb) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -08001346 goto nla_put_failure;
John Fastabend64015852014-09-28 11:53:57 -07001347 qlen = q->q.qlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348
Eric Dumazeta2da5702011-01-20 03:48:19 +00001349 stab = rtnl_dereference(q->stab);
1350 if (stab && qdisc_dump_stab(skb, stab) < 0)
Jussi Kivilinna175f9c12008-07-20 00:08:47 -07001351 goto nla_put_failure;
1352
Jarek Poplawski102396a2008-08-29 14:21:52 -07001353 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1354 qdisc_root_sleeping_lock(q), &d) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -08001355 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001356
1357 if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -08001358 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001359
John Fastabendb0ab6f92014-09-28 11:54:24 -07001360 if (qdisc_is_percpu_stats(q)) {
John Fastabend22e0f8b2014-09-28 11:52:56 -07001361 cpu_bstats = q->cpu_bstats;
John Fastabendb0ab6f92014-09-28 11:54:24 -07001362 cpu_qstats = q->cpu_qstats;
1363 }
John Fastabend22e0f8b2014-09-28 11:52:56 -07001364
1365 if (gnet_stats_copy_basic(&d, cpu_bstats, &q->bstats) < 0 ||
Eric Dumazetd250a5f2009-10-02 10:32:18 +00001366 gnet_stats_copy_rate_est(&d, &q->bstats, &q->rate_est) < 0 ||
John Fastabendb0ab6f92014-09-28 11:54:24 -07001367 gnet_stats_copy_queue(&d, cpu_qstats, &q->qstats, qlen) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -08001368 goto nla_put_failure;
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +09001369
Linus Torvalds1da177e2005-04-16 15:20:36 -07001370 if (gnet_stats_finish_copy(&d) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -08001371 goto nla_put_failure;
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +09001372
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001373 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001374 return skb->len;
1375
David S. Miller02ef22c2012-06-26 21:50:05 -07001376out_nlmsg_trim:
Patrick McHardy1e904742008-01-22 22:11:17 -08001377nla_put_failure:
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -07001378 nlmsg_trim(skb, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001379 return -1;
1380}
1381
Eric Dumazet53b0f082010-05-22 20:37:44 +00001382static bool tc_qdisc_dump_ignore(struct Qdisc *q)
1383{
1384 return (q->flags & TCQ_F_BUILTIN) ? true : false;
1385}
1386
Tom Goff7316ae82010-03-19 15:40:13 +00001387static int qdisc_notify(struct net *net, struct sk_buff *oskb,
1388 struct nlmsghdr *n, u32 clid,
1389 struct Qdisc *old, struct Qdisc *new)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001390{
1391 struct sk_buff *skb;
Eric W. Biederman15e47302012-09-07 20:12:54 +00001392 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001393
1394 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1395 if (!skb)
1396 return -ENOBUFS;
1397
Eric Dumazet53b0f082010-05-22 20:37:44 +00001398 if (old && !tc_qdisc_dump_ignore(old)) {
Eric W. Biederman15e47302012-09-07 20:12:54 +00001399 if (tc_fill_qdisc(skb, old, clid, portid, n->nlmsg_seq,
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001400 0, RTM_DELQDISC) < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001401 goto err_out;
1402 }
Eric Dumazet53b0f082010-05-22 20:37:44 +00001403 if (new && !tc_qdisc_dump_ignore(new)) {
Eric W. Biederman15e47302012-09-07 20:12:54 +00001404 if (tc_fill_qdisc(skb, new, clid, portid, n->nlmsg_seq,
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001405 old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001406 goto err_out;
1407 }
1408
1409 if (skb->len)
Eric W. Biederman15e47302012-09-07 20:12:54 +00001410 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001411 n->nlmsg_flags & NLM_F_ECHO);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001412
1413err_out:
1414 kfree_skb(skb);
1415 return -EINVAL;
1416}
1417
David S. Miller30723672008-07-18 22:50:15 -07001418static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
1419 struct netlink_callback *cb,
1420 int *q_idx_p, int s_q_idx)
1421{
1422 int ret = 0, q_idx = *q_idx_p;
1423 struct Qdisc *q;
1424
1425 if (!root)
1426 return 0;
1427
1428 q = root;
1429 if (q_idx < s_q_idx) {
1430 q_idx++;
1431 } else {
1432 if (!tc_qdisc_dump_ignore(q) &&
Eric W. Biederman15e47302012-09-07 20:12:54 +00001433 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
David S. Miller30723672008-07-18 22:50:15 -07001434 cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
1435 goto done;
1436 q_idx++;
1437 }
1438 list_for_each_entry(q, &root->list, list) {
1439 if (q_idx < s_q_idx) {
1440 q_idx++;
1441 continue;
1442 }
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001443 if (!tc_qdisc_dump_ignore(q) &&
Eric W. Biederman15e47302012-09-07 20:12:54 +00001444 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
David S. Miller30723672008-07-18 22:50:15 -07001445 cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
1446 goto done;
1447 q_idx++;
1448 }
1449
1450out:
1451 *q_idx_p = q_idx;
1452 return ret;
1453done:
1454 ret = -1;
1455 goto out;
1456}
1457
Linus Torvalds1da177e2005-04-16 15:20:36 -07001458static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
1459{
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001460 struct net *net = sock_net(skb->sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461 int idx, q_idx;
1462 int s_idx, s_q_idx;
1463 struct net_device *dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001464
1465 s_idx = cb->args[0];
1466 s_q_idx = q_idx = cb->args[1];
stephen hemmingerf1e90162009-11-10 07:54:49 +00001467
Pavel Emelianov7562f872007-05-03 15:13:45 -07001468 idx = 0;
Eric Dumazet15dc36e2014-03-10 17:11:42 -07001469 ASSERT_RTNL();
1470 for_each_netdev(net, dev) {
David S. Miller30723672008-07-18 22:50:15 -07001471 struct netdev_queue *dev_queue;
1472
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473 if (idx < s_idx)
Pavel Emelianov7562f872007-05-03 15:13:45 -07001474 goto cont;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475 if (idx > s_idx)
1476 s_q_idx = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001477 q_idx = 0;
David S. Miller30723672008-07-18 22:50:15 -07001478
Patrick McHardyaf356af2009-09-04 06:41:18 +00001479 if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx) < 0)
David S. Miller30723672008-07-18 22:50:15 -07001480 goto done;
1481
Eric Dumazet24824a02010-10-02 06:11:55 +00001482 dev_queue = dev_ingress_queue(dev);
1483 if (dev_queue &&
1484 tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb,
1485 &q_idx, s_q_idx) < 0)
David S. Miller30723672008-07-18 22:50:15 -07001486 goto done;
1487
Pavel Emelianov7562f872007-05-03 15:13:45 -07001488cont:
1489 idx++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001490 }
1491
1492done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001493 cb->args[0] = idx;
1494 cb->args[1] = q_idx;
1495
1496 return skb->len;
1497}
1498
1499
1500
1501/************************************************
1502 * Traffic classes manipulation. *
1503 ************************************************/
1504
1505
1506
Thomas Graf661d2962013-03-21 07:45:29 +00001507static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508{
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001509 struct net *net = sock_net(skb->sk);
David S. Miller02ef22c2012-06-26 21:50:05 -07001510 struct tcmsg *tcm = nlmsg_data(n);
Patrick McHardy1e904742008-01-22 22:11:17 -08001511 struct nlattr *tca[TCA_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001512 struct net_device *dev;
1513 struct Qdisc *q = NULL;
Eric Dumazet20fea082007-11-14 01:44:41 -08001514 const struct Qdisc_class_ops *cops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001515 unsigned long cl = 0;
1516 unsigned long new_cl;
Hong zhi guode179c82013-03-25 17:36:33 +00001517 u32 portid;
1518 u32 clid;
1519 u32 qid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001520 int err;
1521
Stéphane Graber4e8bbb82014-04-30 11:25:43 -04001522 if ((n->nlmsg_type != RTM_GETTCLASS) &&
David S. Miller5f013c9b2014-05-12 13:19:14 -04001523 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
Eric W. Biedermandfc47ef2012-11-16 03:03:00 +00001524 return -EPERM;
1525
Patrick McHardy1e904742008-01-22 22:11:17 -08001526 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
1527 if (err < 0)
1528 return err;
1529
Hong zhi guode179c82013-03-25 17:36:33 +00001530 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1531 if (!dev)
1532 return -ENODEV;
1533
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534 /*
1535 parent == TC_H_UNSPEC - unspecified parent.
1536 parent == TC_H_ROOT - class is root, which has no parent.
1537 parent == X:0 - parent is root class.
1538 parent == X:Y - parent is a node in hierarchy.
1539 parent == 0:Y - parent is X:Y, where X:0 is qdisc.
1540
1541 handle == 0:0 - generate handle from kernel pool.
1542 handle == 0:Y - class is X:Y, where X:0 is qdisc.
1543 handle == X:Y - clear.
1544 handle == X:0 - root class.
1545 */
1546
1547 /* Step 1. Determine qdisc handle X:0 */
1548
Hong zhi guode179c82013-03-25 17:36:33 +00001549 portid = tcm->tcm_parent;
1550 clid = tcm->tcm_handle;
1551 qid = TC_H_MAJ(clid);
1552
Eric W. Biederman15e47302012-09-07 20:12:54 +00001553 if (portid != TC_H_ROOT) {
1554 u32 qid1 = TC_H_MAJ(portid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555
1556 if (qid && qid1) {
1557 /* If both majors are known, they must be identical. */
1558 if (qid != qid1)
1559 return -EINVAL;
1560 } else if (qid1) {
1561 qid = qid1;
1562 } else if (qid == 0)
Patrick McHardyaf356af2009-09-04 06:41:18 +00001563 qid = dev->qdisc->handle;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001564
1565 /* Now qid is genuine qdisc handle consistent
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001566 * both with parent and child.
1567 *
Eric W. Biederman15e47302012-09-07 20:12:54 +00001568 * TC_H_MAJ(portid) still may be unspecified, complete it now.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569 */
Eric W. Biederman15e47302012-09-07 20:12:54 +00001570 if (portid)
1571 portid = TC_H_MAKE(qid, portid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001572 } else {
1573 if (qid == 0)
Patrick McHardyaf356af2009-09-04 06:41:18 +00001574 qid = dev->qdisc->handle;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575 }
1576
1577 /* OK. Locate qdisc */
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001578 q = qdisc_lookup(dev, qid);
1579 if (!q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001580 return -ENOENT;
1581
1582 /* An check that it supports classes */
1583 cops = q->ops->cl_ops;
1584 if (cops == NULL)
1585 return -EINVAL;
1586
1587 /* Now try to get class */
1588 if (clid == 0) {
Eric W. Biederman15e47302012-09-07 20:12:54 +00001589 if (portid == TC_H_ROOT)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001590 clid = qid;
1591 } else
1592 clid = TC_H_MAKE(qid, clid);
1593
1594 if (clid)
1595 cl = cops->get(q, clid);
1596
1597 if (cl == 0) {
1598 err = -ENOENT;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001599 if (n->nlmsg_type != RTM_NEWTCLASS ||
1600 !(n->nlmsg_flags & NLM_F_CREATE))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001601 goto out;
1602 } else {
1603 switch (n->nlmsg_type) {
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +09001604 case RTM_NEWTCLASS:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001605 err = -EEXIST;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001606 if (n->nlmsg_flags & NLM_F_EXCL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001607 goto out;
1608 break;
1609 case RTM_DELTCLASS:
Patrick McHardyde6d5cd2009-09-04 06:41:16 +00001610 err = -EOPNOTSUPP;
1611 if (cops->delete)
1612 err = cops->delete(q, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001613 if (err == 0)
Tom Goff7316ae82010-03-19 15:40:13 +00001614 tclass_notify(net, skb, n, q, cl, RTM_DELTCLASS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001615 goto out;
1616 case RTM_GETTCLASS:
Tom Goff7316ae82010-03-19 15:40:13 +00001617 err = tclass_notify(net, skb, n, q, cl, RTM_NEWTCLASS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001618 goto out;
1619 default:
1620 err = -EINVAL;
1621 goto out;
1622 }
1623 }
1624
1625 new_cl = cl;
Patrick McHardyde6d5cd2009-09-04 06:41:16 +00001626 err = -EOPNOTSUPP;
1627 if (cops->change)
Eric W. Biederman15e47302012-09-07 20:12:54 +00001628 err = cops->change(q, clid, portid, tca, &new_cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001629 if (err == 0)
Tom Goff7316ae82010-03-19 15:40:13 +00001630 tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631
1632out:
1633 if (cl)
1634 cops->put(q, cl);
1635
1636 return err;
1637}
1638
1639
1640static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1641 unsigned long cl,
Eric W. Biederman15e47302012-09-07 20:12:54 +00001642 u32 portid, u32 seq, u16 flags, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001643{
1644 struct tcmsg *tcm;
1645 struct nlmsghdr *nlh;
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001646 unsigned char *b = skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001647 struct gnet_dump d;
Eric Dumazet20fea082007-11-14 01:44:41 -08001648 const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001649
Eric Dumazetfba373d2014-03-10 17:11:43 -07001650 cond_resched();
Eric W. Biederman15e47302012-09-07 20:12:54 +00001651 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
David S. Miller02ef22c2012-06-26 21:50:05 -07001652 if (!nlh)
1653 goto out_nlmsg_trim;
1654 tcm = nlmsg_data(nlh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001655 tcm->tcm_family = AF_UNSPEC;
Eric Dumazet16ebb5e2009-09-02 02:40:09 +00001656 tcm->tcm__pad1 = 0;
1657 tcm->tcm__pad2 = 0;
David S. Miller5ce2d482008-07-08 17:06:30 -07001658 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001659 tcm->tcm_parent = q->handle;
1660 tcm->tcm_handle = q->handle;
1661 tcm->tcm_info = 0;
David S. Miller1b34ec42012-03-29 05:11:39 -04001662 if (nla_put_string(skb, TCA_KIND, q->ops->id))
1663 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001664 if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -08001665 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001666
Jarek Poplawski102396a2008-08-29 14:21:52 -07001667 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1668 qdisc_root_sleeping_lock(q), &d) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -08001669 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001670
1671 if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -08001672 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001673
1674 if (gnet_stats_finish_copy(&d) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -08001675 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001676
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001677 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001678 return skb->len;
1679
David S. Miller02ef22c2012-06-26 21:50:05 -07001680out_nlmsg_trim:
Patrick McHardy1e904742008-01-22 22:11:17 -08001681nla_put_failure:
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -07001682 nlmsg_trim(skb, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001683 return -1;
1684}
1685
Tom Goff7316ae82010-03-19 15:40:13 +00001686static int tclass_notify(struct net *net, struct sk_buff *oskb,
1687 struct nlmsghdr *n, struct Qdisc *q,
1688 unsigned long cl, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001689{
1690 struct sk_buff *skb;
Eric W. Biederman15e47302012-09-07 20:12:54 +00001691 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001692
1693 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1694 if (!skb)
1695 return -ENOBUFS;
1696
Eric W. Biederman15e47302012-09-07 20:12:54 +00001697 if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0, event) < 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001698 kfree_skb(skb);
1699 return -EINVAL;
1700 }
1701
Eric W. Biederman15e47302012-09-07 20:12:54 +00001702 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001703 n->nlmsg_flags & NLM_F_ECHO);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001704}
1705
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001706struct qdisc_dump_args {
1707 struct qdisc_walker w;
1708 struct sk_buff *skb;
1709 struct netlink_callback *cb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001710};
1711
1712static int qdisc_class_dump(struct Qdisc *q, unsigned long cl, struct qdisc_walker *arg)
1713{
1714 struct qdisc_dump_args *a = (struct qdisc_dump_args *)arg;
1715
Eric W. Biederman15e47302012-09-07 20:12:54 +00001716 return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).portid,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001717 a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTCLASS);
1718}
1719
David S. Miller30723672008-07-18 22:50:15 -07001720static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb,
1721 struct tcmsg *tcm, struct netlink_callback *cb,
1722 int *t_p, int s_t)
1723{
1724 struct qdisc_dump_args arg;
1725
1726 if (tc_qdisc_dump_ignore(q) ||
1727 *t_p < s_t || !q->ops->cl_ops ||
1728 (tcm->tcm_parent &&
1729 TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
1730 (*t_p)++;
1731 return 0;
1732 }
1733 if (*t_p > s_t)
1734 memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
1735 arg.w.fn = qdisc_class_dump;
1736 arg.skb = skb;
1737 arg.cb = cb;
1738 arg.w.stop = 0;
1739 arg.w.skip = cb->args[1];
1740 arg.w.count = 0;
1741 q->ops->cl_ops->walk(q, &arg.w);
1742 cb->args[1] = arg.w.count;
1743 if (arg.w.stop)
1744 return -1;
1745 (*t_p)++;
1746 return 0;
1747}
1748
1749static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
1750 struct tcmsg *tcm, struct netlink_callback *cb,
1751 int *t_p, int s_t)
1752{
1753 struct Qdisc *q;
1754
1755 if (!root)
1756 return 0;
1757
1758 if (tc_dump_tclass_qdisc(root, skb, tcm, cb, t_p, s_t) < 0)
1759 return -1;
1760
1761 list_for_each_entry(q, &root->list, list) {
1762 if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
1763 return -1;
1764 }
1765
1766 return 0;
1767}
1768
Linus Torvalds1da177e2005-04-16 15:20:36 -07001769static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
1770{
David S. Miller02ef22c2012-06-26 21:50:05 -07001771 struct tcmsg *tcm = nlmsg_data(cb->nlh);
David S. Miller30723672008-07-18 22:50:15 -07001772 struct net *net = sock_net(skb->sk);
1773 struct netdev_queue *dev_queue;
1774 struct net_device *dev;
1775 int t, s_t;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776
Hong zhi guo573ce262013-03-27 06:47:04 +00001777 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001778 return 0;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001779 dev = dev_get_by_index(net, tcm->tcm_ifindex);
1780 if (!dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001781 return 0;
1782
1783 s_t = cb->args[0];
1784 t = 0;
1785
Patrick McHardyaf356af2009-09-04 06:41:18 +00001786 if (tc_dump_tclass_root(dev->qdisc, skb, tcm, cb, &t, s_t) < 0)
David S. Miller30723672008-07-18 22:50:15 -07001787 goto done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001788
Eric Dumazet24824a02010-10-02 06:11:55 +00001789 dev_queue = dev_ingress_queue(dev);
1790 if (dev_queue &&
1791 tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb,
1792 &t, s_t) < 0)
David S. Miller30723672008-07-18 22:50:15 -07001793 goto done;
1794
1795done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001796 cb->args[0] = t;
1797
1798 dev_put(dev);
1799 return skb->len;
1800}
1801
1802/* Main classifier routine: scans classifier chain attached
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001803 * to this qdisc, (optionally) tests for protocol and asks
1804 * specific classifiers.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001805 */
Eric Dumazetdc7f9f62011-07-05 23:25:42 +00001806int tc_classify_compat(struct sk_buff *skb, const struct tcf_proto *tp,
Patrick McHardy73ca4912007-07-15 00:02:31 -07001807 struct tcf_result *res)
1808{
1809 __be16 protocol = skb->protocol;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001810 int err;
Patrick McHardy73ca4912007-07-15 00:02:31 -07001811
John Fastabend25d8c0d2014-09-12 20:05:27 -07001812 for (; tp; tp = rcu_dereference_bh(tp->next)) {
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001813 if (tp->protocol != protocol &&
1814 tp->protocol != htons(ETH_P_ALL))
1815 continue;
1816 err = tp->classify(skb, tp, res);
1817
1818 if (err >= 0) {
Patrick McHardy73ca4912007-07-15 00:02:31 -07001819#ifdef CONFIG_NET_CLS_ACT
1820 if (err != TC_ACT_RECLASSIFY && skb->tc_verd)
1821 skb->tc_verd = SET_TC_VERD(skb->tc_verd, 0);
1822#endif
1823 return err;
1824 }
1825 }
1826 return -1;
1827}
1828EXPORT_SYMBOL(tc_classify_compat);
1829
Eric Dumazetdc7f9f62011-07-05 23:25:42 +00001830int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
Patrick McHardy73ca4912007-07-15 00:02:31 -07001831 struct tcf_result *res)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001832{
1833 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001834#ifdef CONFIG_NET_CLS_ACT
Eric Dumazetdc7f9f62011-07-05 23:25:42 +00001835 const struct tcf_proto *otp = tp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001836reclassify:
Hagen Paul Pfeifer52bc9742011-02-25 05:45:21 +00001837#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001838
Patrick McHardy73ca4912007-07-15 00:02:31 -07001839 err = tc_classify_compat(skb, tp, res);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001840#ifdef CONFIG_NET_CLS_ACT
Patrick McHardy73ca4912007-07-15 00:02:31 -07001841 if (err == TC_ACT_RECLASSIFY) {
1842 u32 verd = G_TC_VERD(skb->tc_verd);
1843 tp = otp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001844
Patrick McHardy73ca4912007-07-15 00:02:31 -07001845 if (verd++ >= MAX_REC_LOOP) {
Joe Perchese87cc472012-05-13 21:56:26 +00001846 net_notice_ratelimited("%s: packet reclassify loop rule prio %u protocol %02x\n",
1847 tp->q->ops->id,
1848 tp->prio & 0xffff,
1849 ntohs(tp->protocol));
Patrick McHardy73ca4912007-07-15 00:02:31 -07001850 return TC_ACT_SHOT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001851 }
Patrick McHardy73ca4912007-07-15 00:02:31 -07001852 skb->tc_verd = SET_TC_VERD(skb->tc_verd, verd);
1853 goto reclassify;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001854 }
Patrick McHardy73ca4912007-07-15 00:02:31 -07001855#endif
1856 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001857}
Patrick McHardy73ca4912007-07-15 00:02:31 -07001858EXPORT_SYMBOL(tc_classify);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001859
Patrick McHardya48b5a62007-03-23 11:29:43 -07001860void tcf_destroy(struct tcf_proto *tp)
1861{
1862 tp->ops->destroy(tp);
1863 module_put(tp->ops->owner);
John Fastabend25d8c0d2014-09-12 20:05:27 -07001864 kfree_rcu(tp, rcu);
Patrick McHardya48b5a62007-03-23 11:29:43 -07001865}
1866
John Fastabend25d8c0d2014-09-12 20:05:27 -07001867void tcf_destroy_chain(struct tcf_proto __rcu **fl)
Patrick McHardya48b5a62007-03-23 11:29:43 -07001868{
1869 struct tcf_proto *tp;
1870
John Fastabend25d8c0d2014-09-12 20:05:27 -07001871 while ((tp = rtnl_dereference(*fl)) != NULL) {
1872 RCU_INIT_POINTER(*fl, tp->next);
Patrick McHardya48b5a62007-03-23 11:29:43 -07001873 tcf_destroy(tp);
1874 }
1875}
1876EXPORT_SYMBOL(tcf_destroy_chain);
1877
Linus Torvalds1da177e2005-04-16 15:20:36 -07001878#ifdef CONFIG_PROC_FS
1879static int psched_show(struct seq_file *seq, void *v)
1880{
Patrick McHardy3c0cfc12007-10-10 16:32:41 -07001881 struct timespec ts;
1882
1883 hrtimer_get_res(CLOCK_MONOTONIC, &ts);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001884 seq_printf(seq, "%08x %08x %08x %08x\n",
Jarek Poplawskica44d6e2009-06-15 02:31:47 -07001885 (u32)NSEC_PER_USEC, (u32)PSCHED_TICKS2NS(1),
Patrick McHardy514bca32007-03-16 12:34:52 -07001886 1000000,
Patrick McHardy3c0cfc12007-10-10 16:32:41 -07001887 (u32)NSEC_PER_SEC/(u32)ktime_to_ns(timespec_to_ktime(ts)));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001888
1889 return 0;
1890}
1891
1892static int psched_open(struct inode *inode, struct file *file)
1893{
Tom Goff7e5ab152010-03-30 19:44:56 -07001894 return single_open(file, psched_show, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895}
1896
Arjan van de Venda7071d2007-02-12 00:55:36 -08001897static const struct file_operations psched_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001898 .owner = THIS_MODULE,
1899 .open = psched_open,
1900 .read = seq_read,
1901 .llseek = seq_lseek,
1902 .release = single_release,
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +09001903};
Tom Goff7316ae82010-03-19 15:40:13 +00001904
1905static int __net_init psched_net_init(struct net *net)
1906{
1907 struct proc_dir_entry *e;
1908
Gao fengd4beaa62013-02-18 01:34:54 +00001909 e = proc_create("psched", 0, net->proc_net, &psched_fops);
Tom Goff7316ae82010-03-19 15:40:13 +00001910 if (e == NULL)
1911 return -ENOMEM;
1912
1913 return 0;
1914}
1915
1916static void __net_exit psched_net_exit(struct net *net)
1917{
Gao fengece31ff2013-02-18 01:34:56 +00001918 remove_proc_entry("psched", net->proc_net);
Tom Goff7316ae82010-03-19 15:40:13 +00001919}
1920#else
1921static int __net_init psched_net_init(struct net *net)
1922{
1923 return 0;
1924}
1925
1926static void __net_exit psched_net_exit(struct net *net)
1927{
1928}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001929#endif
1930
Tom Goff7316ae82010-03-19 15:40:13 +00001931static struct pernet_operations psched_net_ops = {
1932 .init = psched_net_init,
1933 .exit = psched_net_exit,
1934};
1935
Linus Torvalds1da177e2005-04-16 15:20:36 -07001936static int __init pktsched_init(void)
1937{
Tom Goff7316ae82010-03-19 15:40:13 +00001938 int err;
1939
1940 err = register_pernet_subsys(&psched_net_ops);
1941 if (err) {
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001942 pr_err("pktsched_init: "
Tom Goff7316ae82010-03-19 15:40:13 +00001943 "cannot initialize per netns operations\n");
1944 return err;
1945 }
1946
stephen hemminger6da7c8f2013-08-27 16:19:08 -07001947 register_qdisc(&pfifo_fast_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001948 register_qdisc(&pfifo_qdisc_ops);
1949 register_qdisc(&bfifo_qdisc_ops);
Hagen Paul Pfeifer57dbb2d2010-01-24 12:30:59 +00001950 register_qdisc(&pfifo_head_drop_qdisc_ops);
David S. Miller6ec1c692009-09-06 01:58:51 -07001951 register_qdisc(&mq_qdisc_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001952
Greg Rosec7ac8672011-06-10 01:27:09 +00001953 rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL, NULL);
1954 rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL, NULL);
1955 rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc, NULL);
1956 rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL, NULL);
1957 rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL, NULL);
1958 rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass, NULL);
Thomas Grafbe577dd2007-03-22 11:55:50 -07001959
Linus Torvalds1da177e2005-04-16 15:20:36 -07001960 return 0;
1961}
1962
1963subsys_initcall(pktsched_init);