blob: a2a7a81b2b0b01d956f4e48036fbc601eed5ad23 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * net/sched/sch_api.c Packet scheduler API.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 * Fixes:
12 *
13 * Rani Assaf <rani@magic.metawire.com> :980802: JIFFIES and CPU clock sources are repaired.
14 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
15 * Jamal Hadi Salim <hadi@nortelnetworks.com>: 990601: ingress support
16 */
17
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/module.h>
19#include <linux/types.h>
20#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/string.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/errno.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <linux/skbuff.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/init.h>
25#include <linux/proc_fs.h>
26#include <linux/seq_file.h>
27#include <linux/kmod.h>
28#include <linux/list.h>
Patrick McHardy41794772007-03-16 01:19:15 -070029#include <linux/hrtimer.h>
Jarek Poplawski25bfcd52008-08-18 20:53:34 -070030#include <linux/lockdep.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090031#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020033#include <net/net_namespace.h>
Denis V. Lunevb8542722007-12-01 00:21:31 +110034#include <net/sock.h>
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -070035#include <net/netlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <net/pkt_sched.h>
37
Tom Goff7316ae82010-03-19 15:40:13 +000038static int qdisc_notify(struct net *net, struct sk_buff *oskb,
39 struct nlmsghdr *n, u32 clid,
Linus Torvalds1da177e2005-04-16 15:20:36 -070040 struct Qdisc *old, struct Qdisc *new);
Tom Goff7316ae82010-03-19 15:40:13 +000041static int tclass_notify(struct net *net, struct sk_buff *oskb,
42 struct nlmsghdr *n, struct Qdisc *q,
43 unsigned long cl, int event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
45/*
46
47 Short review.
48 -------------
49
50 This file consists of two interrelated parts:
51
52 1. queueing disciplines manager frontend.
53 2. traffic classes manager frontend.
54
55 Generally, queueing discipline ("qdisc") is a black box,
56 which is able to enqueue packets and to dequeue them (when
57 device is ready to send something) in order and at times
58 determined by algorithm hidden in it.
59
60 qdisc's are divided to two categories:
61 - "queues", which have no internal structure visible from outside.
62 - "schedulers", which split all the packets to "traffic classes",
63 using "packet classifiers" (look at cls_api.c)
64
65 In turn, classes may have child qdiscs (as rule, queues)
66 attached to them etc. etc. etc.
67
68 The goal of the routines in this file is to translate
69 information supplied by user in the form of handles
70 to more intelligible for kernel form, to make some sanity
71 checks and part of work, which is common to all qdiscs
72 and to provide rtnetlink notifications.
73
74 All real intelligent work is done inside qdisc modules.
75
76
77
78 Every discipline has two major routines: enqueue and dequeue.
79
80 ---dequeue
81
82 dequeue usually returns a skb to send. It is allowed to return NULL,
83 but it does not mean that queue is empty, it just means that
84 discipline does not want to send anything this time.
85 Queue is really empty if q->q.qlen == 0.
86 For complicated disciplines with multiple queues q->q is not
87 real packet queue, but however q->q.qlen must be valid.
88
89 ---enqueue
90
91 enqueue returns 0, if packet was enqueued successfully.
92 If packet (this one or another one) was dropped, it returns
93 not zero error code.
94 NET_XMIT_DROP - this packet dropped
95 Expected action: do not backoff, but wait until queue will clear.
96 NET_XMIT_CN - probably this packet enqueued, but another one dropped.
97 Expected action: backoff or ignore
98 NET_XMIT_POLICED - dropped by police.
99 Expected action: backoff or error to real-time apps.
100
101 Auxiliary routines:
102
Jarek Poplawski99c0db22008-10-31 00:45:27 -0700103 ---peek
104
105 like dequeue but without removing a packet from the queue
106
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107 ---reset
108
109 returns qdisc to initial state: purge all buffers, clear all
110 timers, counters (except for statistics) etc.
111
112 ---init
113
114 initializes newly created qdisc.
115
116 ---destroy
117
118 destroys resources allocated by init and during lifetime of qdisc.
119
120 ---change
121
122 changes qdisc parameters.
123 */
124
125/* Protects list of registered TC modules. It is pure SMP lock. */
126static DEFINE_RWLOCK(qdisc_mod_lock);
127
128
129/************************************************
130 * Queueing disciplines manipulation. *
131 ************************************************/
132
133
134/* The list of all installed queueing disciplines. */
135
136static struct Qdisc_ops *qdisc_base;
137
Zhi Yong Wu21eb2182014-01-01 04:34:51 +0800138/* Register/unregister queueing discipline */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139
140int register_qdisc(struct Qdisc_ops *qops)
141{
142 struct Qdisc_ops *q, **qp;
143 int rc = -EEXIST;
144
145 write_lock(&qdisc_mod_lock);
146 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
147 if (!strcmp(qops->id, q->id))
148 goto out;
149
150 if (qops->enqueue == NULL)
151 qops->enqueue = noop_qdisc_ops.enqueue;
Jarek Poplawski99c0db22008-10-31 00:45:27 -0700152 if (qops->peek == NULL) {
Jarek Poplawski68fd26b2010-08-09 12:18:48 +0000153 if (qops->dequeue == NULL)
Jarek Poplawski99c0db22008-10-31 00:45:27 -0700154 qops->peek = noop_qdisc_ops.peek;
Jarek Poplawski68fd26b2010-08-09 12:18:48 +0000155 else
156 goto out_einval;
Jarek Poplawski99c0db22008-10-31 00:45:27 -0700157 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158 if (qops->dequeue == NULL)
159 qops->dequeue = noop_qdisc_ops.dequeue;
160
Jarek Poplawski68fd26b2010-08-09 12:18:48 +0000161 if (qops->cl_ops) {
162 const struct Qdisc_class_ops *cops = qops->cl_ops;
163
Jarek Poplawski3e9e5a52010-08-10 22:31:20 +0000164 if (!(cops->get && cops->put && cops->walk && cops->leaf))
Jarek Poplawski68fd26b2010-08-09 12:18:48 +0000165 goto out_einval;
166
167 if (cops->tcf_chain && !(cops->bind_tcf && cops->unbind_tcf))
168 goto out_einval;
169 }
170
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171 qops->next = NULL;
172 *qp = qops;
173 rc = 0;
174out:
175 write_unlock(&qdisc_mod_lock);
176 return rc;
Jarek Poplawski68fd26b2010-08-09 12:18:48 +0000177
178out_einval:
179 rc = -EINVAL;
180 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181}
Patrick McHardy62e3ba12008-01-22 22:10:23 -0800182EXPORT_SYMBOL(register_qdisc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183
184int unregister_qdisc(struct Qdisc_ops *qops)
185{
186 struct Qdisc_ops *q, **qp;
187 int err = -ENOENT;
188
189 write_lock(&qdisc_mod_lock);
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000190 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191 if (q == qops)
192 break;
193 if (q) {
194 *qp = q->next;
195 q->next = NULL;
196 err = 0;
197 }
198 write_unlock(&qdisc_mod_lock);
199 return err;
200}
Patrick McHardy62e3ba12008-01-22 22:10:23 -0800201EXPORT_SYMBOL(unregister_qdisc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202
stephen hemminger6da7c8f2013-08-27 16:19:08 -0700203/* Get default qdisc if not otherwise specified */
204void qdisc_get_default(char *name, size_t len)
205{
206 read_lock(&qdisc_mod_lock);
207 strlcpy(name, default_qdisc_ops->id, len);
208 read_unlock(&qdisc_mod_lock);
209}
210
211static struct Qdisc_ops *qdisc_lookup_default(const char *name)
212{
213 struct Qdisc_ops *q = NULL;
214
215 for (q = qdisc_base; q; q = q->next) {
216 if (!strcmp(name, q->id)) {
217 if (!try_module_get(q->owner))
218 q = NULL;
219 break;
220 }
221 }
222
223 return q;
224}
225
226/* Set new default qdisc to use */
227int qdisc_set_default(const char *name)
228{
229 const struct Qdisc_ops *ops;
230
231 if (!capable(CAP_NET_ADMIN))
232 return -EPERM;
233
234 write_lock(&qdisc_mod_lock);
235 ops = qdisc_lookup_default(name);
236 if (!ops) {
237 /* Not found, drop lock and try to load module */
238 write_unlock(&qdisc_mod_lock);
239 request_module("sch_%s", name);
240 write_lock(&qdisc_mod_lock);
241
242 ops = qdisc_lookup_default(name);
243 }
244
245 if (ops) {
246 /* Set new default */
247 module_put(default_qdisc_ops->owner);
248 default_qdisc_ops = ops;
249 }
250 write_unlock(&qdisc_mod_lock);
251
252 return ops ? 0 : -ENOENT;
253}
254
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255/* We know handle. Find qdisc among all qdisc's attached to device
256 (root qdisc, all its children, children of children etc.)
257 */
258
Hannes Eder6113b742008-11-28 03:06:46 -0800259static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
David S. Miller8123b422008-08-08 23:23:39 -0700260{
261 struct Qdisc *q;
262
263 if (!(root->flags & TCQ_F_BUILTIN) &&
264 root->handle == handle)
265 return root;
266
267 list_for_each_entry(q, &root->list, list) {
268 if (q->handle == handle)
269 return q;
270 }
271 return NULL;
272}
273
Eric Dumazet95dc1922013-12-05 11:12:02 -0800274void qdisc_list_add(struct Qdisc *q)
Jarek Poplawskif6e0b232008-08-22 03:24:05 -0700275{
Eric Dumazet37314362014-03-08 08:01:19 -0800276 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
277 struct Qdisc *root = qdisc_dev(q)->qdisc;
Eric Dumazete57a7842013-12-12 15:41:56 -0800278
Eric Dumazet37314362014-03-08 08:01:19 -0800279 WARN_ON_ONCE(root == &noop_qdisc);
Eric Dumazete57a7842013-12-12 15:41:56 -0800280 list_add_tail(&q->list, &root->list);
Eric Dumazet37314362014-03-08 08:01:19 -0800281 }
Jarek Poplawskif6e0b232008-08-22 03:24:05 -0700282}
Eric Dumazet95dc1922013-12-05 11:12:02 -0800283EXPORT_SYMBOL(qdisc_list_add);
Jarek Poplawskif6e0b232008-08-22 03:24:05 -0700284
285void qdisc_list_del(struct Qdisc *q)
286{
Jarek Poplawskif6486d42008-11-25 13:56:06 -0800287 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS))
Jarek Poplawskif6e0b232008-08-22 03:24:05 -0700288 list_del(&q->list);
Jarek Poplawskif6e0b232008-08-22 03:24:05 -0700289}
290EXPORT_SYMBOL(qdisc_list_del);
291
David S. Milleread81cc2008-07-17 00:50:32 -0700292struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
Patrick McHardy43effa12006-11-29 17:35:48 -0800293{
Jarek Poplawskif6e0b232008-08-22 03:24:05 -0700294 struct Qdisc *q;
295
Patrick McHardyaf356af2009-09-04 06:41:18 +0000296 q = qdisc_match_from_root(dev->qdisc, handle);
297 if (q)
298 goto out;
Jarek Poplawskif6e0b232008-08-22 03:24:05 -0700299
Eric Dumazet24824a02010-10-02 06:11:55 +0000300 if (dev_ingress_queue(dev))
301 q = qdisc_match_from_root(
302 dev_ingress_queue(dev)->qdisc_sleeping,
303 handle);
Jarek Poplawskif6486d42008-11-25 13:56:06 -0800304out:
Jarek Poplawskif6e0b232008-08-22 03:24:05 -0700305 return q;
Patrick McHardy43effa12006-11-29 17:35:48 -0800306}
307
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
309{
310 unsigned long cl;
311 struct Qdisc *leaf;
Eric Dumazet20fea082007-11-14 01:44:41 -0800312 const struct Qdisc_class_ops *cops = p->ops->cl_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313
314 if (cops == NULL)
315 return NULL;
316 cl = cops->get(p, classid);
317
318 if (cl == 0)
319 return NULL;
320 leaf = cops->leaf(p, cl);
321 cops->put(p, cl);
322 return leaf;
323}
324
325/* Find queueing discipline by name */
326
Patrick McHardy1e904742008-01-22 22:11:17 -0800327static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328{
329 struct Qdisc_ops *q = NULL;
330
331 if (kind) {
332 read_lock(&qdisc_mod_lock);
333 for (q = qdisc_base; q; q = q->next) {
Patrick McHardy1e904742008-01-22 22:11:17 -0800334 if (nla_strcmp(kind, q->id) == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335 if (!try_module_get(q->owner))
336 q = NULL;
337 break;
338 }
339 }
340 read_unlock(&qdisc_mod_lock);
341 }
342 return q;
343}
344
Jesper Dangaard Brouer8a8e3d82013-08-14 23:47:11 +0200345/* The linklayer setting were not transferred from iproute2, in older
346 * versions, and the rate tables lookup systems have been dropped in
347 * the kernel. To keep backward compatible with older iproute2 tc
348 * utils, we detect the linklayer setting by detecting if the rate
349 * table were modified.
350 *
351 * For linklayer ATM table entries, the rate table will be aligned to
352 * 48 bytes, thus some table entries will contain the same value. The
353 * mpu (min packet unit) is also encoded into the old rate table, thus
354 * starting from the mpu, we find low and high table entries for
355 * mapping this cell. If these entries contain the same value, when
356 * the rate tables have been modified for linklayer ATM.
357 *
358 * This is done by rounding mpu to the nearest 48 bytes cell/entry,
359 * and then roundup to the next cell, calc the table entry one below,
360 * and compare.
361 */
362static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab)
363{
364 int low = roundup(r->mpu, 48);
365 int high = roundup(low+1, 48);
366 int cell_low = low >> r->cell_log;
367 int cell_high = (high >> r->cell_log) - 1;
368
369 /* rtab is too inaccurate at rates > 100Mbit/s */
370 if ((r->rate > (100000000/8)) || (rtab[0] == 0)) {
371 pr_debug("TC linklayer: Giving up ATM detection\n");
372 return TC_LINKLAYER_ETHERNET;
373 }
374
375 if ((cell_high > cell_low) && (cell_high < 256)
376 && (rtab[cell_low] == rtab[cell_high])) {
377 pr_debug("TC linklayer: Detected ATM, low(%d)=high(%d)=%u\n",
378 cell_low, cell_high, rtab[cell_high]);
379 return TC_LINKLAYER_ATM;
380 }
381 return TC_LINKLAYER_ETHERNET;
382}
383
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384static struct qdisc_rate_table *qdisc_rtab_list;
385
Patrick McHardy1e904742008-01-22 22:11:17 -0800386struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *tab)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387{
388 struct qdisc_rate_table *rtab;
389
Eric Dumazet40edeff2013-06-02 11:15:55 +0000390 if (tab == NULL || r->rate == 0 || r->cell_log == 0 ||
391 nla_len(tab) != TC_RTAB_SIZE)
392 return NULL;
393
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394 for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) {
Eric Dumazet40edeff2013-06-02 11:15:55 +0000395 if (!memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) &&
396 !memcmp(&rtab->data, nla_data(tab), 1024)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397 rtab->refcnt++;
398 return rtab;
399 }
400 }
401
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402 rtab = kmalloc(sizeof(*rtab), GFP_KERNEL);
403 if (rtab) {
404 rtab->rate = *r;
405 rtab->refcnt = 1;
Patrick McHardy1e904742008-01-22 22:11:17 -0800406 memcpy(rtab->data, nla_data(tab), 1024);
Jesper Dangaard Brouer8a8e3d82013-08-14 23:47:11 +0200407 if (r->linklayer == TC_LINKLAYER_UNAWARE)
408 r->linklayer = __detect_linklayer(r, rtab->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409 rtab->next = qdisc_rtab_list;
410 qdisc_rtab_list = rtab;
411 }
412 return rtab;
413}
Patrick McHardy62e3ba12008-01-22 22:10:23 -0800414EXPORT_SYMBOL(qdisc_get_rtab);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415
416void qdisc_put_rtab(struct qdisc_rate_table *tab)
417{
418 struct qdisc_rate_table *rtab, **rtabp;
419
420 if (!tab || --tab->refcnt)
421 return;
422
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000423 for (rtabp = &qdisc_rtab_list;
424 (rtab = *rtabp) != NULL;
425 rtabp = &rtab->next) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 if (rtab == tab) {
427 *rtabp = rtab->next;
428 kfree(rtab);
429 return;
430 }
431 }
432}
Patrick McHardy62e3ba12008-01-22 22:10:23 -0800433EXPORT_SYMBOL(qdisc_put_rtab);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700435static LIST_HEAD(qdisc_stab_list);
436static DEFINE_SPINLOCK(qdisc_stab_lock);
437
438static const struct nla_policy stab_policy[TCA_STAB_MAX + 1] = {
439 [TCA_STAB_BASE] = { .len = sizeof(struct tc_sizespec) },
440 [TCA_STAB_DATA] = { .type = NLA_BINARY },
441};
442
443static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt)
444{
445 struct nlattr *tb[TCA_STAB_MAX + 1];
446 struct qdisc_size_table *stab;
447 struct tc_sizespec *s;
448 unsigned int tsize = 0;
449 u16 *tab = NULL;
450 int err;
451
452 err = nla_parse_nested(tb, TCA_STAB_MAX, opt, stab_policy);
453 if (err < 0)
454 return ERR_PTR(err);
455 if (!tb[TCA_STAB_BASE])
456 return ERR_PTR(-EINVAL);
457
458 s = nla_data(tb[TCA_STAB_BASE]);
459
460 if (s->tsize > 0) {
461 if (!tb[TCA_STAB_DATA])
462 return ERR_PTR(-EINVAL);
463 tab = nla_data(tb[TCA_STAB_DATA]);
464 tsize = nla_len(tb[TCA_STAB_DATA]) / sizeof(u16);
465 }
466
Dan Carpenter00093fa2010-08-14 11:09:49 +0000467 if (tsize != s->tsize || (!tab && tsize > 0))
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700468 return ERR_PTR(-EINVAL);
469
David S. Millerf3b96052008-08-18 22:33:05 -0700470 spin_lock(&qdisc_stab_lock);
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700471
472 list_for_each_entry(stab, &qdisc_stab_list, list) {
473 if (memcmp(&stab->szopts, s, sizeof(*s)))
474 continue;
475 if (tsize > 0 && memcmp(stab->data, tab, tsize * sizeof(u16)))
476 continue;
477 stab->refcnt++;
David S. Millerf3b96052008-08-18 22:33:05 -0700478 spin_unlock(&qdisc_stab_lock);
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700479 return stab;
480 }
481
David S. Millerf3b96052008-08-18 22:33:05 -0700482 spin_unlock(&qdisc_stab_lock);
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700483
484 stab = kmalloc(sizeof(*stab) + tsize * sizeof(u16), GFP_KERNEL);
485 if (!stab)
486 return ERR_PTR(-ENOMEM);
487
488 stab->refcnt = 1;
489 stab->szopts = *s;
490 if (tsize > 0)
491 memcpy(stab->data, tab, tsize * sizeof(u16));
492
David S. Millerf3b96052008-08-18 22:33:05 -0700493 spin_lock(&qdisc_stab_lock);
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700494 list_add_tail(&stab->list, &qdisc_stab_list);
David S. Millerf3b96052008-08-18 22:33:05 -0700495 spin_unlock(&qdisc_stab_lock);
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700496
497 return stab;
498}
499
Eric Dumazeta2da5702011-01-20 03:48:19 +0000500static void stab_kfree_rcu(struct rcu_head *head)
501{
502 kfree(container_of(head, struct qdisc_size_table, rcu));
503}
504
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700505void qdisc_put_stab(struct qdisc_size_table *tab)
506{
507 if (!tab)
508 return;
509
David S. Millerf3b96052008-08-18 22:33:05 -0700510 spin_lock(&qdisc_stab_lock);
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700511
512 if (--tab->refcnt == 0) {
513 list_del(&tab->list);
Eric Dumazeta2da5702011-01-20 03:48:19 +0000514 call_rcu_bh(&tab->rcu, stab_kfree_rcu);
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700515 }
516
David S. Millerf3b96052008-08-18 22:33:05 -0700517 spin_unlock(&qdisc_stab_lock);
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700518}
519EXPORT_SYMBOL(qdisc_put_stab);
520
521static int qdisc_dump_stab(struct sk_buff *skb, struct qdisc_size_table *stab)
522{
523 struct nlattr *nest;
524
525 nest = nla_nest_start(skb, TCA_STAB);
Patrick McHardy3aa46142008-11-20 04:07:14 -0800526 if (nest == NULL)
527 goto nla_put_failure;
David S. Miller1b34ec42012-03-29 05:11:39 -0400528 if (nla_put(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts))
529 goto nla_put_failure;
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700530 nla_nest_end(skb, nest);
531
532 return skb->len;
533
534nla_put_failure:
535 return -1;
536}
537
Eric Dumazeta2da5702011-01-20 03:48:19 +0000538void __qdisc_calculate_pkt_len(struct sk_buff *skb, const struct qdisc_size_table *stab)
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700539{
540 int pkt_len, slot;
541
542 pkt_len = skb->len + stab->szopts.overhead;
543 if (unlikely(!stab->szopts.tsize))
544 goto out;
545
546 slot = pkt_len + stab->szopts.cell_align;
547 if (unlikely(slot < 0))
548 slot = 0;
549
550 slot >>= stab->szopts.cell_log;
551 if (likely(slot < stab->szopts.tsize))
552 pkt_len = stab->data[slot];
553 else
554 pkt_len = stab->data[stab->szopts.tsize - 1] *
555 (slot / stab->szopts.tsize) +
556 stab->data[slot % stab->szopts.tsize];
557
558 pkt_len <<= stab->szopts.size_log;
559out:
560 if (unlikely(pkt_len < 1))
561 pkt_len = 1;
562 qdisc_skb_cb(skb)->pkt_len = pkt_len;
563}
Eric Dumazeta2da5702011-01-20 03:48:19 +0000564EXPORT_SYMBOL(__qdisc_calculate_pkt_len);
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700565
Florian Westphal6e765a02014-06-11 20:35:18 +0200566void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc)
Jarek Poplawskib00355d2009-02-01 01:12:42 -0800567{
568 if (!(qdisc->flags & TCQ_F_WARN_NONWC)) {
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000569 pr_warn("%s: %s qdisc %X: is non-work-conserving?\n",
570 txt, qdisc->ops->id, qdisc->handle >> 16);
Jarek Poplawskib00355d2009-02-01 01:12:42 -0800571 qdisc->flags |= TCQ_F_WARN_NONWC;
572 }
573}
574EXPORT_SYMBOL(qdisc_warn_nonwc);
575
Patrick McHardy41794772007-03-16 01:19:15 -0700576static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
577{
578 struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
David S. Miller2fbd3da2009-09-01 17:59:25 -0700579 timer);
Patrick McHardy41794772007-03-16 01:19:15 -0700580
John Fastabend1e203c12014-10-02 22:43:09 -0700581 rcu_read_lock();
Eric Dumazetfd245a42011-01-20 05:27:16 +0000582 qdisc_unthrottled(wd->qdisc);
David S. Miller8608db02008-08-18 20:51:18 -0700583 __netif_schedule(qdisc_root(wd->qdisc));
John Fastabend1e203c12014-10-02 22:43:09 -0700584 rcu_read_unlock();
Stephen Hemminger19365022007-03-22 12:18:35 -0700585
Patrick McHardy41794772007-03-16 01:19:15 -0700586 return HRTIMER_NORESTART;
587}
588
589void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
590{
Eric Dumazet4a8e3202014-09-20 18:01:30 -0700591 hrtimer_init(&wd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
David S. Miller2fbd3da2009-09-01 17:59:25 -0700592 wd->timer.function = qdisc_watchdog;
Patrick McHardy41794772007-03-16 01:19:15 -0700593 wd->qdisc = qdisc;
594}
595EXPORT_SYMBOL(qdisc_watchdog_init);
596
Eric Dumazetf2600cf2014-10-04 10:11:31 -0700597void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires, bool throttle)
Patrick McHardy41794772007-03-16 01:19:15 -0700598{
Jarek Poplawski2540e052008-08-21 05:11:14 -0700599 if (test_bit(__QDISC_STATE_DEACTIVATED,
600 &qdisc_root_sleeping(wd->qdisc)->state))
601 return;
602
Eric Dumazetf2600cf2014-10-04 10:11:31 -0700603 if (throttle)
604 qdisc_throttled(wd->qdisc);
Eric Dumazet46baac32012-10-20 00:40:51 +0000605
606 hrtimer_start(&wd->timer,
Jiri Pirko34c5d292013-02-12 00:12:04 +0000607 ns_to_ktime(expires),
Eric Dumazet4a8e3202014-09-20 18:01:30 -0700608 HRTIMER_MODE_ABS_PINNED);
Patrick McHardy41794772007-03-16 01:19:15 -0700609}
Jiri Pirko34c5d292013-02-12 00:12:04 +0000610EXPORT_SYMBOL(qdisc_watchdog_schedule_ns);
Patrick McHardy41794772007-03-16 01:19:15 -0700611
612void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
613{
David S. Miller2fbd3da2009-09-01 17:59:25 -0700614 hrtimer_cancel(&wd->timer);
Eric Dumazetfd245a42011-01-20 05:27:16 +0000615 qdisc_unthrottled(wd->qdisc);
Patrick McHardy41794772007-03-16 01:19:15 -0700616}
617EXPORT_SYMBOL(qdisc_watchdog_cancel);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618
Adrian Bunka94f7792008-07-22 14:20:11 -0700619static struct hlist_head *qdisc_class_hash_alloc(unsigned int n)
Patrick McHardy6fe1c7a2008-07-05 23:21:31 -0700620{
621 unsigned int size = n * sizeof(struct hlist_head), i;
622 struct hlist_head *h;
623
624 if (size <= PAGE_SIZE)
625 h = kmalloc(size, GFP_KERNEL);
626 else
627 h = (struct hlist_head *)
628 __get_free_pages(GFP_KERNEL, get_order(size));
629
630 if (h != NULL) {
631 for (i = 0; i < n; i++)
632 INIT_HLIST_HEAD(&h[i]);
633 }
634 return h;
635}
636
637static void qdisc_class_hash_free(struct hlist_head *h, unsigned int n)
638{
639 unsigned int size = n * sizeof(struct hlist_head);
640
641 if (size <= PAGE_SIZE)
642 kfree(h);
643 else
644 free_pages((unsigned long)h, get_order(size));
645}
646
647void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash)
648{
649 struct Qdisc_class_common *cl;
Sasha Levinb67bfe02013-02-27 17:06:00 -0800650 struct hlist_node *next;
Patrick McHardy6fe1c7a2008-07-05 23:21:31 -0700651 struct hlist_head *nhash, *ohash;
652 unsigned int nsize, nmask, osize;
653 unsigned int i, h;
654
655 /* Rehash when load factor exceeds 0.75 */
656 if (clhash->hashelems * 4 <= clhash->hashsize * 3)
657 return;
658 nsize = clhash->hashsize * 2;
659 nmask = nsize - 1;
660 nhash = qdisc_class_hash_alloc(nsize);
661 if (nhash == NULL)
662 return;
663
664 ohash = clhash->hash;
665 osize = clhash->hashsize;
666
667 sch_tree_lock(sch);
668 for (i = 0; i < osize; i++) {
Sasha Levinb67bfe02013-02-27 17:06:00 -0800669 hlist_for_each_entry_safe(cl, next, &ohash[i], hnode) {
Patrick McHardy6fe1c7a2008-07-05 23:21:31 -0700670 h = qdisc_class_hash(cl->classid, nmask);
671 hlist_add_head(&cl->hnode, &nhash[h]);
672 }
673 }
674 clhash->hash = nhash;
675 clhash->hashsize = nsize;
676 clhash->hashmask = nmask;
677 sch_tree_unlock(sch);
678
679 qdisc_class_hash_free(ohash, osize);
680}
681EXPORT_SYMBOL(qdisc_class_hash_grow);
682
683int qdisc_class_hash_init(struct Qdisc_class_hash *clhash)
684{
685 unsigned int size = 4;
686
687 clhash->hash = qdisc_class_hash_alloc(size);
688 if (clhash->hash == NULL)
689 return -ENOMEM;
690 clhash->hashsize = size;
691 clhash->hashmask = size - 1;
692 clhash->hashelems = 0;
693 return 0;
694}
695EXPORT_SYMBOL(qdisc_class_hash_init);
696
697void qdisc_class_hash_destroy(struct Qdisc_class_hash *clhash)
698{
699 qdisc_class_hash_free(clhash->hash, clhash->hashsize);
700}
701EXPORT_SYMBOL(qdisc_class_hash_destroy);
702
703void qdisc_class_hash_insert(struct Qdisc_class_hash *clhash,
704 struct Qdisc_class_common *cl)
705{
706 unsigned int h;
707
708 INIT_HLIST_NODE(&cl->hnode);
709 h = qdisc_class_hash(cl->classid, clhash->hashmask);
710 hlist_add_head(&cl->hnode, &clhash->hash[h]);
711 clhash->hashelems++;
712}
713EXPORT_SYMBOL(qdisc_class_hash_insert);
714
715void qdisc_class_hash_remove(struct Qdisc_class_hash *clhash,
716 struct Qdisc_class_common *cl)
717{
718 hlist_del(&cl->hnode);
719 clhash->hashelems--;
720}
721EXPORT_SYMBOL(qdisc_class_hash_remove);
722
Eric Dumazetfa0f5aa2012-01-03 00:00:11 +0000723/* Allocate an unique handle from space managed by kernel
724 * Possible range is [8000-FFFF]:0000 (0x8000 values)
725 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726static u32 qdisc_alloc_handle(struct net_device *dev)
727{
Eric Dumazetfa0f5aa2012-01-03 00:00:11 +0000728 int i = 0x8000;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729 static u32 autohandle = TC_H_MAKE(0x80000000U, 0);
730
731 do {
732 autohandle += TC_H_MAKE(0x10000U, 0);
733 if (autohandle == TC_H_MAKE(TC_H_ROOT, 0))
734 autohandle = TC_H_MAKE(0x80000000U, 0);
Eric Dumazetfa0f5aa2012-01-03 00:00:11 +0000735 if (!qdisc_lookup(dev, autohandle))
736 return autohandle;
737 cond_resched();
738 } while (--i > 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739
Eric Dumazetfa0f5aa2012-01-03 00:00:11 +0000740 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741}
742
WANG Cong678b2092016-02-25 14:55:01 -0800743void qdisc_tree_reduce_backlog(struct Qdisc *sch, unsigned int n,
744 unsigned int len)
Patrick McHardy43effa12006-11-29 17:35:48 -0800745{
Eric Dumazet20fea082007-11-14 01:44:41 -0800746 const struct Qdisc_class_ops *cops;
Patrick McHardy43effa12006-11-29 17:35:48 -0800747 unsigned long cl;
748 u32 parentid;
Eric Dumazet2c8c8e62013-10-07 08:32:32 -0700749 int drops;
Patrick McHardy43effa12006-11-29 17:35:48 -0800750
WANG Cong678b2092016-02-25 14:55:01 -0800751 if (n == 0 && len == 0)
Patrick McHardy43effa12006-11-29 17:35:48 -0800752 return;
Eric Dumazet2c8c8e62013-10-07 08:32:32 -0700753 drops = max_t(int, n, 0);
Patrick McHardy43effa12006-11-29 17:35:48 -0800754 while ((parentid = sch->parent)) {
Jarek Poplawski066a3b52008-04-14 15:10:42 -0700755 if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
756 return;
757
David S. Miller5ce2d482008-07-08 17:06:30 -0700758 sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid));
Patrick McHardyffc8fef2007-07-30 17:11:50 -0700759 if (sch == NULL) {
760 WARN_ON(parentid != TC_H_ROOT);
761 return;
762 }
Patrick McHardy43effa12006-11-29 17:35:48 -0800763 cops = sch->ops->cl_ops;
764 if (cops->qlen_notify) {
765 cl = cops->get(sch, parentid);
766 cops->qlen_notify(sch, cl);
767 cops->put(sch, cl);
768 }
769 sch->q.qlen -= n;
WANG Cong678b2092016-02-25 14:55:01 -0800770 sch->qstats.backlog -= len;
John Fastabend25331d62014-09-28 11:53:29 -0700771 __qdisc_qstats_drop(sch, drops);
Patrick McHardy43effa12006-11-29 17:35:48 -0800772 }
773}
WANG Cong678b2092016-02-25 14:55:01 -0800774EXPORT_SYMBOL(qdisc_tree_reduce_backlog);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775
Tom Goff7316ae82010-03-19 15:40:13 +0000776static void notify_and_destroy(struct net *net, struct sk_buff *skb,
777 struct nlmsghdr *n, u32 clid,
David S. Miller99194cf2008-07-17 04:54:10 -0700778 struct Qdisc *old, struct Qdisc *new)
779{
780 if (new || old)
Tom Goff7316ae82010-03-19 15:40:13 +0000781 qdisc_notify(net, skb, n, clid, old, new);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782
David S. Miller4d8863a2008-08-18 21:03:15 -0700783 if (old)
David S. Miller99194cf2008-07-17 04:54:10 -0700784 qdisc_destroy(old);
David S. Miller99194cf2008-07-17 04:54:10 -0700785}
786
787/* Graft qdisc "new" to class "classid" of qdisc "parent" or
788 * to device "dev".
789 *
790 * When appropriate send a netlink notification using 'skb'
791 * and "n".
792 *
793 * On success, destroy old qdisc.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794 */
795
796static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
David S. Miller99194cf2008-07-17 04:54:10 -0700797 struct sk_buff *skb, struct nlmsghdr *n, u32 classid,
798 struct Qdisc *new, struct Qdisc *old)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799{
David S. Miller99194cf2008-07-17 04:54:10 -0700800 struct Qdisc *q = old;
Tom Goff7316ae82010-03-19 15:40:13 +0000801 struct net *net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900804 if (parent == NULL) {
David S. Miller99194cf2008-07-17 04:54:10 -0700805 unsigned int i, num_q, ingress;
806
807 ingress = 0;
808 num_q = dev->num_tx_queues;
David S. Miller8d50b532008-07-30 02:37:46 -0700809 if ((q && q->flags & TCQ_F_INGRESS) ||
810 (new && new->flags & TCQ_F_INGRESS)) {
David S. Miller99194cf2008-07-17 04:54:10 -0700811 num_q = 1;
812 ingress = 1;
Eric Dumazet24824a02010-10-02 06:11:55 +0000813 if (!dev_ingress_queue(dev))
814 return -ENOENT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815 }
David S. Miller99194cf2008-07-17 04:54:10 -0700816
817 if (dev->flags & IFF_UP)
818 dev_deactivate(dev);
819
WANG Cong4b72bd12015-05-26 16:08:48 -0700820 if (new && new->ops->attach)
821 goto skip;
David S. Miller6ec1c692009-09-06 01:58:51 -0700822
David S. Miller99194cf2008-07-17 04:54:10 -0700823 for (i = 0; i < num_q; i++) {
Eric Dumazet24824a02010-10-02 06:11:55 +0000824 struct netdev_queue *dev_queue = dev_ingress_queue(dev);
David S. Miller99194cf2008-07-17 04:54:10 -0700825
826 if (!ingress)
827 dev_queue = netdev_get_tx_queue(dev, i);
828
David S. Miller8d50b532008-07-30 02:37:46 -0700829 old = dev_graft_qdisc(dev_queue, new);
830 if (new && i > 0)
831 atomic_inc(&new->refcnt);
832
Jarek Poplawski036d6a62009-09-13 22:35:44 +0000833 if (!ingress)
834 qdisc_destroy(old);
David S. Miller99194cf2008-07-17 04:54:10 -0700835 }
836
WANG Cong4b72bd12015-05-26 16:08:48 -0700837skip:
Jarek Poplawski036d6a62009-09-13 22:35:44 +0000838 if (!ingress) {
Tom Goff7316ae82010-03-19 15:40:13 +0000839 notify_and_destroy(net, skb, n, classid,
840 dev->qdisc, new);
Jarek Poplawski036d6a62009-09-13 22:35:44 +0000841 if (new && !new->ops->attach)
842 atomic_inc(&new->refcnt);
843 dev->qdisc = new ? : &noop_qdisc;
WANG Cong4b72bd12015-05-26 16:08:48 -0700844
845 if (new && new->ops->attach)
846 new->ops->attach(new);
Jarek Poplawski036d6a62009-09-13 22:35:44 +0000847 } else {
Tom Goff7316ae82010-03-19 15:40:13 +0000848 notify_and_destroy(net, skb, n, classid, old, new);
Jarek Poplawski036d6a62009-09-13 22:35:44 +0000849 }
Patrick McHardyaf356af2009-09-04 06:41:18 +0000850
David S. Miller99194cf2008-07-17 04:54:10 -0700851 if (dev->flags & IFF_UP)
852 dev_activate(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853 } else {
Eric Dumazet20fea082007-11-14 01:44:41 -0800854 const struct Qdisc_class_ops *cops = parent->ops->cl_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855
Patrick McHardyc9f1d032009-09-04 06:41:13 +0000856 err = -EOPNOTSUPP;
857 if (cops && cops->graft) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858 unsigned long cl = cops->get(parent, classid);
859 if (cl) {
David S. Miller99194cf2008-07-17 04:54:10 -0700860 err = cops->graft(parent, cl, new, &old);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861 cops->put(parent, cl);
Patrick McHardyc9f1d032009-09-04 06:41:13 +0000862 } else
863 err = -ENOENT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864 }
David S. Miller99194cf2008-07-17 04:54:10 -0700865 if (!err)
Tom Goff7316ae82010-03-19 15:40:13 +0000866 notify_and_destroy(net, skb, n, classid, old, new);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867 }
868 return err;
869}
870
Jarek Poplawski25bfcd52008-08-18 20:53:34 -0700871/* lockdep annotation is needed for ingress; egress gets it only for name */
872static struct lock_class_key qdisc_tx_lock;
873static struct lock_class_key qdisc_rx_lock;
874
Linus Torvalds1da177e2005-04-16 15:20:36 -0700875/*
876 Allocate and initialize new qdisc.
877
878 Parameters are passed via opt.
879 */
880
881static struct Qdisc *
David S. Millerbb949fb2008-07-08 16:55:56 -0700882qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
Patrick McHardy23bcf632009-09-09 18:11:23 -0700883 struct Qdisc *p, u32 parent, u32 handle,
884 struct nlattr **tca, int *errp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885{
886 int err;
Patrick McHardy1e904742008-01-22 22:11:17 -0800887 struct nlattr *kind = tca[TCA_KIND];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888 struct Qdisc *sch;
889 struct Qdisc_ops *ops;
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700890 struct qdisc_size_table *stab;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891
892 ops = qdisc_lookup_ops(kind);
Johannes Berg95a5afc2008-10-16 15:24:51 -0700893#ifdef CONFIG_MODULES
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894 if (ops == NULL && kind != NULL) {
895 char name[IFNAMSIZ];
Patrick McHardy1e904742008-01-22 22:11:17 -0800896 if (nla_strlcpy(name, kind, IFNAMSIZ) < IFNAMSIZ) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897 /* We dropped the RTNL semaphore in order to
898 * perform the module load. So, even if we
899 * succeeded in loading the module we have to
900 * tell the caller to replay the request. We
901 * indicate this using -EAGAIN.
902 * We replay the request because the device may
903 * go away in the mean time.
904 */
905 rtnl_unlock();
906 request_module("sch_%s", name);
907 rtnl_lock();
908 ops = qdisc_lookup_ops(kind);
909 if (ops != NULL) {
910 /* We will try again qdisc_lookup_ops,
911 * so don't keep a reference.
912 */
913 module_put(ops->owner);
914 err = -EAGAIN;
915 goto err_out;
916 }
917 }
918 }
919#endif
920
Jamal Hadi Salimb9e2cc02006-08-03 16:36:51 -0700921 err = -ENOENT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922 if (ops == NULL)
923 goto err_out;
924
David S. Miller5ce2d482008-07-08 17:06:30 -0700925 sch = qdisc_alloc(dev_queue, ops);
Thomas Graf3d54b822005-07-05 14:15:09 -0700926 if (IS_ERR(sch)) {
927 err = PTR_ERR(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928 goto err_out2;
Thomas Graf3d54b822005-07-05 14:15:09 -0700929 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930
Patrick McHardyffc8fef2007-07-30 17:11:50 -0700931 sch->parent = parent;
932
Thomas Graf3d54b822005-07-05 14:15:09 -0700933 if (handle == TC_H_INGRESS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700934 sch->flags |= TCQ_F_INGRESS;
Thomas Graf3d54b822005-07-05 14:15:09 -0700935 handle = TC_H_MAKE(TC_H_INGRESS, 0);
Jarek Poplawski25bfcd52008-08-18 20:53:34 -0700936 lockdep_set_class(qdisc_lock(sch), &qdisc_rx_lock);
Patrick McHardyfd44de72007-04-16 17:07:08 -0700937 } else {
Patrick McHardyfd44de72007-04-16 17:07:08 -0700938 if (handle == 0) {
939 handle = qdisc_alloc_handle(dev);
940 err = -ENOMEM;
941 if (handle == 0)
942 goto err_out3;
943 }
Jarek Poplawski25bfcd52008-08-18 20:53:34 -0700944 lockdep_set_class(qdisc_lock(sch), &qdisc_tx_lock);
Eric Dumazet1abbe132012-12-11 15:54:33 +0000945 if (!netif_is_multiqueue(dev))
946 sch->flags |= TCQ_F_ONETXQUEUE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947 }
948
Thomas Graf3d54b822005-07-05 14:15:09 -0700949 sch->handle = handle;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950
Patrick McHardy1e904742008-01-22 22:11:17 -0800951 if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) {
John Fastabend22e0f8b2014-09-28 11:52:56 -0700952 if (qdisc_is_percpu_stats(sch)) {
953 sch->cpu_bstats =
Sabrina Dubroca7c1c97d2014-10-21 11:23:30 +0200954 netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
John Fastabend22e0f8b2014-09-28 11:52:56 -0700955 if (!sch->cpu_bstats)
956 goto err_out4;
John Fastabendb0ab6f92014-09-28 11:54:24 -0700957
958 sch->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
959 if (!sch->cpu_qstats)
960 goto err_out4;
John Fastabend22e0f8b2014-09-28 11:52:56 -0700961 }
962
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700963 if (tca[TCA_STAB]) {
964 stab = qdisc_get_stab(tca[TCA_STAB]);
965 if (IS_ERR(stab)) {
966 err = PTR_ERR(stab);
Jarek Poplawski7c64b9f2009-09-15 23:42:05 -0700967 goto err_out4;
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700968 }
Eric Dumazeta2da5702011-01-20 03:48:19 +0000969 rcu_assign_pointer(sch->stab, stab);
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700970 }
Patrick McHardy1e904742008-01-22 22:11:17 -0800971 if (tca[TCA_RATE]) {
Jarek Poplawskif6f9b932008-08-27 02:25:17 -0700972 spinlock_t *root_lock;
973
Patrick McHardy23bcf632009-09-09 18:11:23 -0700974 err = -EOPNOTSUPP;
975 if (sch->flags & TCQ_F_MQROOT)
976 goto err_out4;
977
Jarek Poplawskif6f9b932008-08-27 02:25:17 -0700978 if ((sch->parent != TC_H_ROOT) &&
Patrick McHardy23bcf632009-09-09 18:11:23 -0700979 !(sch->flags & TCQ_F_INGRESS) &&
980 (!p || !(p->flags & TCQ_F_MQROOT)))
Jarek Poplawskif6f9b932008-08-27 02:25:17 -0700981 root_lock = qdisc_root_sleeping_lock(sch);
982 else
983 root_lock = qdisc_lock(sch);
984
John Fastabend22e0f8b2014-09-28 11:52:56 -0700985 err = gen_new_estimator(&sch->bstats,
986 sch->cpu_bstats,
987 &sch->rate_est,
988 root_lock,
989 tca[TCA_RATE]);
Patrick McHardy23bcf632009-09-09 18:11:23 -0700990 if (err)
991 goto err_out4;
Thomas Graf023e09a2005-07-05 14:15:53 -0700992 }
Jarek Poplawskif6e0b232008-08-22 03:24:05 -0700993
994 qdisc_list_add(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996 return sch;
997 }
998err_out3:
999 dev_put(dev);
Thomas Graf3d54b822005-07-05 14:15:09 -07001000 kfree((char *) sch - sch->padded);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001err_out2:
1002 module_put(ops->owner);
1003err_out:
1004 *errp = err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005 return NULL;
Patrick McHardy23bcf632009-09-09 18:11:23 -07001006
1007err_out4:
John Fastabend22e0f8b2014-09-28 11:52:56 -07001008 free_percpu(sch->cpu_bstats);
John Fastabendb0ab6f92014-09-28 11:54:24 -07001009 free_percpu(sch->cpu_qstats);
Patrick McHardy23bcf632009-09-09 18:11:23 -07001010 /*
1011 * Any broken qdiscs that would require a ops->reset() here?
1012 * The qdisc was never in action so it shouldn't be necessary.
1013 */
Eric Dumazeta2da5702011-01-20 03:48:19 +00001014 qdisc_put_stab(rtnl_dereference(sch->stab));
Patrick McHardy23bcf632009-09-09 18:11:23 -07001015 if (ops->destroy)
1016 ops->destroy(sch);
1017 goto err_out3;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018}
1019
Patrick McHardy1e904742008-01-22 22:11:17 -08001020static int qdisc_change(struct Qdisc *sch, struct nlattr **tca)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021{
Eric Dumazeta2da5702011-01-20 03:48:19 +00001022 struct qdisc_size_table *ostab, *stab = NULL;
Jussi Kivilinna175f9c12008-07-20 00:08:47 -07001023 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001024
Jussi Kivilinna175f9c12008-07-20 00:08:47 -07001025 if (tca[TCA_OPTIONS]) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001026 if (sch->ops->change == NULL)
1027 return -EINVAL;
Patrick McHardy1e904742008-01-22 22:11:17 -08001028 err = sch->ops->change(sch, tca[TCA_OPTIONS]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001029 if (err)
1030 return err;
1031 }
Jussi Kivilinna175f9c12008-07-20 00:08:47 -07001032
1033 if (tca[TCA_STAB]) {
1034 stab = qdisc_get_stab(tca[TCA_STAB]);
1035 if (IS_ERR(stab))
1036 return PTR_ERR(stab);
1037 }
1038
Eric Dumazeta2da5702011-01-20 03:48:19 +00001039 ostab = rtnl_dereference(sch->stab);
1040 rcu_assign_pointer(sch->stab, stab);
1041 qdisc_put_stab(ostab);
Jussi Kivilinna175f9c12008-07-20 00:08:47 -07001042
Patrick McHardy23bcf632009-09-09 18:11:23 -07001043 if (tca[TCA_RATE]) {
Stephen Hemminger71bcb092008-11-25 21:13:31 -08001044 /* NB: ignores errors from replace_estimator
1045 because change can't be undone. */
Patrick McHardy23bcf632009-09-09 18:11:23 -07001046 if (sch->flags & TCQ_F_MQROOT)
1047 goto out;
John Fastabend22e0f8b2014-09-28 11:52:56 -07001048 gen_replace_estimator(&sch->bstats,
1049 sch->cpu_bstats,
1050 &sch->rate_est,
1051 qdisc_root_sleeping_lock(sch),
1052 tca[TCA_RATE]);
Patrick McHardy23bcf632009-09-09 18:11:23 -07001053 }
1054out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055 return 0;
1056}
1057
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001058struct check_loop_arg {
1059 struct qdisc_walker w;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001060 struct Qdisc *p;
1061 int depth;
1062};
1063
1064static int check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w);
1065
1066static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth)
1067{
1068 struct check_loop_arg arg;
1069
1070 if (q->ops->cl_ops == NULL)
1071 return 0;
1072
1073 arg.w.stop = arg.w.skip = arg.w.count = 0;
1074 arg.w.fn = check_loop_fn;
1075 arg.depth = depth;
1076 arg.p = p;
1077 q->ops->cl_ops->walk(q, &arg.w);
1078 return arg.w.stop ? -ELOOP : 0;
1079}
1080
1081static int
1082check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
1083{
1084 struct Qdisc *leaf;
Eric Dumazet20fea082007-11-14 01:44:41 -08001085 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001086 struct check_loop_arg *arg = (struct check_loop_arg *)w;
1087
1088 leaf = cops->leaf(q, cl);
1089 if (leaf) {
1090 if (leaf == arg->p || arg->depth > 7)
1091 return -ELOOP;
1092 return check_loop(leaf, arg->p, arg->depth + 1);
1093 }
1094 return 0;
1095}
1096
1097/*
1098 * Delete/get qdisc.
1099 */
1100
Thomas Graf661d2962013-03-21 07:45:29 +00001101static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001102{
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001103 struct net *net = sock_net(skb->sk);
David S. Miller02ef22c2012-06-26 21:50:05 -07001104 struct tcmsg *tcm = nlmsg_data(n);
Patrick McHardy1e904742008-01-22 22:11:17 -08001105 struct nlattr *tca[TCA_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106 struct net_device *dev;
Hong zhi guode179c82013-03-25 17:36:33 +00001107 u32 clid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108 struct Qdisc *q = NULL;
1109 struct Qdisc *p = NULL;
1110 int err;
1111
Stéphane Graber4e8bbb82014-04-30 11:25:43 -04001112 if ((n->nlmsg_type != RTM_GETQDISC) &&
David S. Miller5f013c9b2014-05-12 13:19:14 -04001113 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
Eric W. Biedermandfc47ef2012-11-16 03:03:00 +00001114 return -EPERM;
1115
Patrick McHardy1e904742008-01-22 22:11:17 -08001116 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
1117 if (err < 0)
1118 return err;
1119
Hong zhi guode179c82013-03-25 17:36:33 +00001120 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1121 if (!dev)
1122 return -ENODEV;
1123
1124 clid = tcm->tcm_parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001125 if (clid) {
1126 if (clid != TC_H_ROOT) {
1127 if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) {
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001128 p = qdisc_lookup(dev, TC_H_MAJ(clid));
1129 if (!p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130 return -ENOENT;
1131 q = qdisc_leaf(p, clid);
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001132 } else if (dev_ingress_queue(dev)) {
1133 q = dev_ingress_queue(dev)->qdisc_sleeping;
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +09001134 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001135 } else {
Patrick McHardyaf356af2009-09-04 06:41:18 +00001136 q = dev->qdisc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001137 }
1138 if (!q)
1139 return -ENOENT;
1140
1141 if (tcm->tcm_handle && q->handle != tcm->tcm_handle)
1142 return -EINVAL;
1143 } else {
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001144 q = qdisc_lookup(dev, tcm->tcm_handle);
1145 if (!q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146 return -ENOENT;
1147 }
1148
Patrick McHardy1e904742008-01-22 22:11:17 -08001149 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001150 return -EINVAL;
1151
1152 if (n->nlmsg_type == RTM_DELQDISC) {
1153 if (!clid)
1154 return -EINVAL;
1155 if (q->handle == 0)
1156 return -ENOENT;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001157 err = qdisc_graft(dev, p, skb, n, clid, NULL, q);
1158 if (err != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001160 } else {
Tom Goff7316ae82010-03-19 15:40:13 +00001161 qdisc_notify(net, skb, n, clid, NULL, q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162 }
1163 return 0;
1164}
1165
1166/*
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001167 * Create/change qdisc.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168 */
1169
Thomas Graf661d2962013-03-21 07:45:29 +00001170static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171{
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001172 struct net *net = sock_net(skb->sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001173 struct tcmsg *tcm;
Patrick McHardy1e904742008-01-22 22:11:17 -08001174 struct nlattr *tca[TCA_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001175 struct net_device *dev;
1176 u32 clid;
1177 struct Qdisc *q, *p;
1178 int err;
1179
David S. Miller5f013c9b2014-05-12 13:19:14 -04001180 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
Eric W. Biedermandfc47ef2012-11-16 03:03:00 +00001181 return -EPERM;
1182
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183replay:
1184 /* Reinit, just in case something touches this. */
Hong zhi guode179c82013-03-25 17:36:33 +00001185 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
1186 if (err < 0)
1187 return err;
1188
David S. Miller02ef22c2012-06-26 21:50:05 -07001189 tcm = nlmsg_data(n);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190 clid = tcm->tcm_parent;
1191 q = p = NULL;
1192
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001193 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1194 if (!dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195 return -ENODEV;
1196
Patrick McHardy1e904742008-01-22 22:11:17 -08001197
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198 if (clid) {
1199 if (clid != TC_H_ROOT) {
1200 if (clid != TC_H_INGRESS) {
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001201 p = qdisc_lookup(dev, TC_H_MAJ(clid));
1202 if (!p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203 return -ENOENT;
1204 q = qdisc_leaf(p, clid);
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001205 } else if (dev_ingress_queue_create(dev)) {
1206 q = dev_ingress_queue(dev)->qdisc_sleeping;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207 }
1208 } else {
Patrick McHardyaf356af2009-09-04 06:41:18 +00001209 q = dev->qdisc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210 }
1211
1212 /* It may be default qdisc, ignore it */
1213 if (q && q->handle == 0)
1214 q = NULL;
1215
1216 if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) {
1217 if (tcm->tcm_handle) {
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001218 if (q && !(n->nlmsg_flags & NLM_F_REPLACE))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219 return -EEXIST;
1220 if (TC_H_MIN(tcm->tcm_handle))
1221 return -EINVAL;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001222 q = qdisc_lookup(dev, tcm->tcm_handle);
1223 if (!q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001224 goto create_n_graft;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001225 if (n->nlmsg_flags & NLM_F_EXCL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226 return -EEXIST;
Patrick McHardy1e904742008-01-22 22:11:17 -08001227 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001228 return -EINVAL;
1229 if (q == p ||
1230 (p && check_loop(q, p, 0)))
1231 return -ELOOP;
1232 atomic_inc(&q->refcnt);
1233 goto graft;
1234 } else {
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001235 if (!q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236 goto create_n_graft;
1237
1238 /* This magic test requires explanation.
1239 *
1240 * We know, that some child q is already
1241 * attached to this parent and have choice:
1242 * either to change it or to create/graft new one.
1243 *
1244 * 1. We are allowed to create/graft only
1245 * if CREATE and REPLACE flags are set.
1246 *
1247 * 2. If EXCL is set, requestor wanted to say,
1248 * that qdisc tcm_handle is not expected
1249 * to exist, so that we choose create/graft too.
1250 *
1251 * 3. The last case is when no flags are set.
1252 * Alas, it is sort of hole in API, we
1253 * cannot decide what to do unambiguously.
1254 * For now we select create/graft, if
1255 * user gave KIND, which does not match existing.
1256 */
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001257 if ((n->nlmsg_flags & NLM_F_CREATE) &&
1258 (n->nlmsg_flags & NLM_F_REPLACE) &&
1259 ((n->nlmsg_flags & NLM_F_EXCL) ||
Patrick McHardy1e904742008-01-22 22:11:17 -08001260 (tca[TCA_KIND] &&
1261 nla_strcmp(tca[TCA_KIND], q->ops->id))))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001262 goto create_n_graft;
1263 }
1264 }
1265 } else {
1266 if (!tcm->tcm_handle)
1267 return -EINVAL;
1268 q = qdisc_lookup(dev, tcm->tcm_handle);
1269 }
1270
1271 /* Change qdisc parameters */
1272 if (q == NULL)
1273 return -ENOENT;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001274 if (n->nlmsg_flags & NLM_F_EXCL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001275 return -EEXIST;
Patrick McHardy1e904742008-01-22 22:11:17 -08001276 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001277 return -EINVAL;
1278 err = qdisc_change(q, tca);
1279 if (err == 0)
Tom Goff7316ae82010-03-19 15:40:13 +00001280 qdisc_notify(net, skb, n, clid, NULL, q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281 return err;
1282
1283create_n_graft:
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001284 if (!(n->nlmsg_flags & NLM_F_CREATE))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001285 return -ENOENT;
Eric Dumazet24824a02010-10-02 06:11:55 +00001286 if (clid == TC_H_INGRESS) {
1287 if (dev_ingress_queue(dev))
1288 q = qdisc_create(dev, dev_ingress_queue(dev), p,
1289 tcm->tcm_parent, tcm->tcm_parent,
1290 tca, &err);
1291 else
1292 err = -ENOENT;
1293 } else {
Jarek Poplawski926e61b2009-09-15 02:53:07 -07001294 struct netdev_queue *dev_queue;
David S. Miller6ec1c692009-09-06 01:58:51 -07001295
1296 if (p && p->ops->cl_ops && p->ops->cl_ops->select_queue)
Jarek Poplawski926e61b2009-09-15 02:53:07 -07001297 dev_queue = p->ops->cl_ops->select_queue(p, tcm);
1298 else if (p)
1299 dev_queue = p->dev_queue;
1300 else
1301 dev_queue = netdev_get_tx_queue(dev, 0);
David S. Miller6ec1c692009-09-06 01:58:51 -07001302
Jarek Poplawski926e61b2009-09-15 02:53:07 -07001303 q = qdisc_create(dev, dev_queue, p,
David S. Millerbb949fb2008-07-08 16:55:56 -07001304 tcm->tcm_parent, tcm->tcm_handle,
Patrick McHardyffc8fef2007-07-30 17:11:50 -07001305 tca, &err);
David S. Miller6ec1c692009-09-06 01:58:51 -07001306 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307 if (q == NULL) {
1308 if (err == -EAGAIN)
1309 goto replay;
1310 return err;
1311 }
1312
1313graft:
Ilpo Järvinene5befbd2008-08-18 22:30:01 -07001314 err = qdisc_graft(dev, p, skb, n, clid, q, NULL);
1315 if (err) {
1316 if (q)
1317 qdisc_destroy(q);
1318 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001319 }
Ilpo Järvinene5befbd2008-08-18 22:30:01 -07001320
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321 return 0;
1322}
1323
1324static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
Eric W. Biederman15e47302012-09-07 20:12:54 +00001325 u32 portid, u32 seq, u16 flags, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001326{
John Fastabend22e0f8b2014-09-28 11:52:56 -07001327 struct gnet_stats_basic_cpu __percpu *cpu_bstats = NULL;
John Fastabendb0ab6f92014-09-28 11:54:24 -07001328 struct gnet_stats_queue __percpu *cpu_qstats = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329 struct tcmsg *tcm;
1330 struct nlmsghdr *nlh;
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001331 unsigned char *b = skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001332 struct gnet_dump d;
Eric Dumazeta2da5702011-01-20 03:48:19 +00001333 struct qdisc_size_table *stab;
John Fastabend64015852014-09-28 11:53:57 -07001334 __u32 qlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001335
Eric Dumazetfba373d2014-03-10 17:11:43 -07001336 cond_resched();
Eric W. Biederman15e47302012-09-07 20:12:54 +00001337 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
David S. Miller02ef22c2012-06-26 21:50:05 -07001338 if (!nlh)
1339 goto out_nlmsg_trim;
1340 tcm = nlmsg_data(nlh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001341 tcm->tcm_family = AF_UNSPEC;
Patrick McHardy9ef1d4c2005-06-28 12:55:30 -07001342 tcm->tcm__pad1 = 0;
1343 tcm->tcm__pad2 = 0;
David S. Miller5ce2d482008-07-08 17:06:30 -07001344 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001345 tcm->tcm_parent = clid;
1346 tcm->tcm_handle = q->handle;
1347 tcm->tcm_info = atomic_read(&q->refcnt);
David S. Miller1b34ec42012-03-29 05:11:39 -04001348 if (nla_put_string(skb, TCA_KIND, q->ops->id))
1349 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001350 if (q->ops->dump && q->ops->dump(q, skb) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -08001351 goto nla_put_failure;
John Fastabend64015852014-09-28 11:53:57 -07001352 qlen = q->q.qlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001353
Eric Dumazeta2da5702011-01-20 03:48:19 +00001354 stab = rtnl_dereference(q->stab);
1355 if (stab && qdisc_dump_stab(skb, stab) < 0)
Jussi Kivilinna175f9c12008-07-20 00:08:47 -07001356 goto nla_put_failure;
1357
Jarek Poplawski102396a2008-08-29 14:21:52 -07001358 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1359 qdisc_root_sleeping_lock(q), &d) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -08001360 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001361
1362 if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -08001363 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001364
John Fastabendb0ab6f92014-09-28 11:54:24 -07001365 if (qdisc_is_percpu_stats(q)) {
John Fastabend22e0f8b2014-09-28 11:52:56 -07001366 cpu_bstats = q->cpu_bstats;
John Fastabendb0ab6f92014-09-28 11:54:24 -07001367 cpu_qstats = q->cpu_qstats;
1368 }
John Fastabend22e0f8b2014-09-28 11:52:56 -07001369
1370 if (gnet_stats_copy_basic(&d, cpu_bstats, &q->bstats) < 0 ||
Eric Dumazetd250a5f2009-10-02 10:32:18 +00001371 gnet_stats_copy_rate_est(&d, &q->bstats, &q->rate_est) < 0 ||
John Fastabendb0ab6f92014-09-28 11:54:24 -07001372 gnet_stats_copy_queue(&d, cpu_qstats, &q->qstats, qlen) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -08001373 goto nla_put_failure;
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +09001374
Linus Torvalds1da177e2005-04-16 15:20:36 -07001375 if (gnet_stats_finish_copy(&d) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -08001376 goto nla_put_failure;
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +09001377
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001378 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001379 return skb->len;
1380
David S. Miller02ef22c2012-06-26 21:50:05 -07001381out_nlmsg_trim:
Patrick McHardy1e904742008-01-22 22:11:17 -08001382nla_put_failure:
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -07001383 nlmsg_trim(skb, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001384 return -1;
1385}
1386
Eric Dumazet53b0f082010-05-22 20:37:44 +00001387static bool tc_qdisc_dump_ignore(struct Qdisc *q)
1388{
1389 return (q->flags & TCQ_F_BUILTIN) ? true : false;
1390}
1391
Tom Goff7316ae82010-03-19 15:40:13 +00001392static int qdisc_notify(struct net *net, struct sk_buff *oskb,
1393 struct nlmsghdr *n, u32 clid,
1394 struct Qdisc *old, struct Qdisc *new)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001395{
1396 struct sk_buff *skb;
Eric W. Biederman15e47302012-09-07 20:12:54 +00001397 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398
1399 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1400 if (!skb)
1401 return -ENOBUFS;
1402
Eric Dumazet53b0f082010-05-22 20:37:44 +00001403 if (old && !tc_qdisc_dump_ignore(old)) {
Eric W. Biederman15e47302012-09-07 20:12:54 +00001404 if (tc_fill_qdisc(skb, old, clid, portid, n->nlmsg_seq,
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001405 0, RTM_DELQDISC) < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001406 goto err_out;
1407 }
Eric Dumazet53b0f082010-05-22 20:37:44 +00001408 if (new && !tc_qdisc_dump_ignore(new)) {
Eric W. Biederman15e47302012-09-07 20:12:54 +00001409 if (tc_fill_qdisc(skb, new, clid, portid, n->nlmsg_seq,
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001410 old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001411 goto err_out;
1412 }
1413
1414 if (skb->len)
Eric W. Biederman15e47302012-09-07 20:12:54 +00001415 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001416 n->nlmsg_flags & NLM_F_ECHO);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001417
1418err_out:
1419 kfree_skb(skb);
1420 return -EINVAL;
1421}
1422
David S. Miller30723672008-07-18 22:50:15 -07001423static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
1424 struct netlink_callback *cb,
1425 int *q_idx_p, int s_q_idx)
1426{
1427 int ret = 0, q_idx = *q_idx_p;
1428 struct Qdisc *q;
1429
1430 if (!root)
1431 return 0;
1432
1433 q = root;
1434 if (q_idx < s_q_idx) {
1435 q_idx++;
1436 } else {
1437 if (!tc_qdisc_dump_ignore(q) &&
Eric W. Biederman15e47302012-09-07 20:12:54 +00001438 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
David S. Miller30723672008-07-18 22:50:15 -07001439 cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
1440 goto done;
1441 q_idx++;
1442 }
1443 list_for_each_entry(q, &root->list, list) {
1444 if (q_idx < s_q_idx) {
1445 q_idx++;
1446 continue;
1447 }
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001448 if (!tc_qdisc_dump_ignore(q) &&
Eric W. Biederman15e47302012-09-07 20:12:54 +00001449 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
David S. Miller30723672008-07-18 22:50:15 -07001450 cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
1451 goto done;
1452 q_idx++;
1453 }
1454
1455out:
1456 *q_idx_p = q_idx;
1457 return ret;
1458done:
1459 ret = -1;
1460 goto out;
1461}
1462
Linus Torvalds1da177e2005-04-16 15:20:36 -07001463static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
1464{
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001465 struct net *net = sock_net(skb->sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001466 int idx, q_idx;
1467 int s_idx, s_q_idx;
1468 struct net_device *dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001469
1470 s_idx = cb->args[0];
1471 s_q_idx = q_idx = cb->args[1];
stephen hemmingerf1e90162009-11-10 07:54:49 +00001472
Pavel Emelianov7562f872007-05-03 15:13:45 -07001473 idx = 0;
Eric Dumazet15dc36e2014-03-10 17:11:42 -07001474 ASSERT_RTNL();
1475 for_each_netdev(net, dev) {
David S. Miller30723672008-07-18 22:50:15 -07001476 struct netdev_queue *dev_queue;
1477
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478 if (idx < s_idx)
Pavel Emelianov7562f872007-05-03 15:13:45 -07001479 goto cont;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001480 if (idx > s_idx)
1481 s_q_idx = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482 q_idx = 0;
David S. Miller30723672008-07-18 22:50:15 -07001483
Patrick McHardyaf356af2009-09-04 06:41:18 +00001484 if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx) < 0)
David S. Miller30723672008-07-18 22:50:15 -07001485 goto done;
1486
Eric Dumazet24824a02010-10-02 06:11:55 +00001487 dev_queue = dev_ingress_queue(dev);
1488 if (dev_queue &&
1489 tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb,
1490 &q_idx, s_q_idx) < 0)
David S. Miller30723672008-07-18 22:50:15 -07001491 goto done;
1492
Pavel Emelianov7562f872007-05-03 15:13:45 -07001493cont:
1494 idx++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001495 }
1496
1497done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001498 cb->args[0] = idx;
1499 cb->args[1] = q_idx;
1500
1501 return skb->len;
1502}
1503
1504
1505
1506/************************************************
1507 * Traffic classes manipulation. *
1508 ************************************************/
1509
1510
1511
Thomas Graf661d2962013-03-21 07:45:29 +00001512static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001513{
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001514 struct net *net = sock_net(skb->sk);
David S. Miller02ef22c2012-06-26 21:50:05 -07001515 struct tcmsg *tcm = nlmsg_data(n);
Patrick McHardy1e904742008-01-22 22:11:17 -08001516 struct nlattr *tca[TCA_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517 struct net_device *dev;
1518 struct Qdisc *q = NULL;
Eric Dumazet20fea082007-11-14 01:44:41 -08001519 const struct Qdisc_class_ops *cops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001520 unsigned long cl = 0;
1521 unsigned long new_cl;
Hong zhi guode179c82013-03-25 17:36:33 +00001522 u32 portid;
1523 u32 clid;
1524 u32 qid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001525 int err;
1526
Stéphane Graber4e8bbb82014-04-30 11:25:43 -04001527 if ((n->nlmsg_type != RTM_GETTCLASS) &&
David S. Miller5f013c9b2014-05-12 13:19:14 -04001528 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
Eric W. Biedermandfc47ef2012-11-16 03:03:00 +00001529 return -EPERM;
1530
Patrick McHardy1e904742008-01-22 22:11:17 -08001531 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
1532 if (err < 0)
1533 return err;
1534
Hong zhi guode179c82013-03-25 17:36:33 +00001535 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1536 if (!dev)
1537 return -ENODEV;
1538
Linus Torvalds1da177e2005-04-16 15:20:36 -07001539 /*
1540 parent == TC_H_UNSPEC - unspecified parent.
1541 parent == TC_H_ROOT - class is root, which has no parent.
1542 parent == X:0 - parent is root class.
1543 parent == X:Y - parent is a node in hierarchy.
1544 parent == 0:Y - parent is X:Y, where X:0 is qdisc.
1545
1546 handle == 0:0 - generate handle from kernel pool.
1547 handle == 0:Y - class is X:Y, where X:0 is qdisc.
1548 handle == X:Y - clear.
1549 handle == X:0 - root class.
1550 */
1551
1552 /* Step 1. Determine qdisc handle X:0 */
1553
Hong zhi guode179c82013-03-25 17:36:33 +00001554 portid = tcm->tcm_parent;
1555 clid = tcm->tcm_handle;
1556 qid = TC_H_MAJ(clid);
1557
Eric W. Biederman15e47302012-09-07 20:12:54 +00001558 if (portid != TC_H_ROOT) {
1559 u32 qid1 = TC_H_MAJ(portid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001560
1561 if (qid && qid1) {
1562 /* If both majors are known, they must be identical. */
1563 if (qid != qid1)
1564 return -EINVAL;
1565 } else if (qid1) {
1566 qid = qid1;
1567 } else if (qid == 0)
Patrick McHardyaf356af2009-09-04 06:41:18 +00001568 qid = dev->qdisc->handle;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569
1570 /* Now qid is genuine qdisc handle consistent
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001571 * both with parent and child.
1572 *
Eric W. Biederman15e47302012-09-07 20:12:54 +00001573 * TC_H_MAJ(portid) still may be unspecified, complete it now.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001574 */
Eric W. Biederman15e47302012-09-07 20:12:54 +00001575 if (portid)
1576 portid = TC_H_MAKE(qid, portid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001577 } else {
1578 if (qid == 0)
Patrick McHardyaf356af2009-09-04 06:41:18 +00001579 qid = dev->qdisc->handle;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001580 }
1581
1582 /* OK. Locate qdisc */
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001583 q = qdisc_lookup(dev, qid);
1584 if (!q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001585 return -ENOENT;
1586
1587 /* An check that it supports classes */
1588 cops = q->ops->cl_ops;
1589 if (cops == NULL)
1590 return -EINVAL;
1591
1592 /* Now try to get class */
1593 if (clid == 0) {
Eric W. Biederman15e47302012-09-07 20:12:54 +00001594 if (portid == TC_H_ROOT)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001595 clid = qid;
1596 } else
1597 clid = TC_H_MAKE(qid, clid);
1598
1599 if (clid)
1600 cl = cops->get(q, clid);
1601
1602 if (cl == 0) {
1603 err = -ENOENT;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001604 if (n->nlmsg_type != RTM_NEWTCLASS ||
1605 !(n->nlmsg_flags & NLM_F_CREATE))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001606 goto out;
1607 } else {
1608 switch (n->nlmsg_type) {
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +09001609 case RTM_NEWTCLASS:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001610 err = -EEXIST;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001611 if (n->nlmsg_flags & NLM_F_EXCL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001612 goto out;
1613 break;
1614 case RTM_DELTCLASS:
Patrick McHardyde6d5cd2009-09-04 06:41:16 +00001615 err = -EOPNOTSUPP;
1616 if (cops->delete)
1617 err = cops->delete(q, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001618 if (err == 0)
Tom Goff7316ae82010-03-19 15:40:13 +00001619 tclass_notify(net, skb, n, q, cl, RTM_DELTCLASS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001620 goto out;
1621 case RTM_GETTCLASS:
Tom Goff7316ae82010-03-19 15:40:13 +00001622 err = tclass_notify(net, skb, n, q, cl, RTM_NEWTCLASS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001623 goto out;
1624 default:
1625 err = -EINVAL;
1626 goto out;
1627 }
1628 }
1629
1630 new_cl = cl;
Patrick McHardyde6d5cd2009-09-04 06:41:16 +00001631 err = -EOPNOTSUPP;
1632 if (cops->change)
Eric W. Biederman15e47302012-09-07 20:12:54 +00001633 err = cops->change(q, clid, portid, tca, &new_cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001634 if (err == 0)
Tom Goff7316ae82010-03-19 15:40:13 +00001635 tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001636
1637out:
1638 if (cl)
1639 cops->put(q, cl);
1640
1641 return err;
1642}
1643
1644
1645static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1646 unsigned long cl,
Eric W. Biederman15e47302012-09-07 20:12:54 +00001647 u32 portid, u32 seq, u16 flags, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001648{
1649 struct tcmsg *tcm;
1650 struct nlmsghdr *nlh;
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001651 unsigned char *b = skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652 struct gnet_dump d;
Eric Dumazet20fea082007-11-14 01:44:41 -08001653 const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001654
Eric Dumazetfba373d2014-03-10 17:11:43 -07001655 cond_resched();
Eric W. Biederman15e47302012-09-07 20:12:54 +00001656 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
David S. Miller02ef22c2012-06-26 21:50:05 -07001657 if (!nlh)
1658 goto out_nlmsg_trim;
1659 tcm = nlmsg_data(nlh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660 tcm->tcm_family = AF_UNSPEC;
Eric Dumazet16ebb5e2009-09-02 02:40:09 +00001661 tcm->tcm__pad1 = 0;
1662 tcm->tcm__pad2 = 0;
David S. Miller5ce2d482008-07-08 17:06:30 -07001663 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001664 tcm->tcm_parent = q->handle;
1665 tcm->tcm_handle = q->handle;
1666 tcm->tcm_info = 0;
David S. Miller1b34ec42012-03-29 05:11:39 -04001667 if (nla_put_string(skb, TCA_KIND, q->ops->id))
1668 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001669 if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -08001670 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001671
Jarek Poplawski102396a2008-08-29 14:21:52 -07001672 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1673 qdisc_root_sleeping_lock(q), &d) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -08001674 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001675
1676 if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -08001677 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001678
1679 if (gnet_stats_finish_copy(&d) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -08001680 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001681
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001682 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001683 return skb->len;
1684
David S. Miller02ef22c2012-06-26 21:50:05 -07001685out_nlmsg_trim:
Patrick McHardy1e904742008-01-22 22:11:17 -08001686nla_put_failure:
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -07001687 nlmsg_trim(skb, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001688 return -1;
1689}
1690
Tom Goff7316ae82010-03-19 15:40:13 +00001691static int tclass_notify(struct net *net, struct sk_buff *oskb,
1692 struct nlmsghdr *n, struct Qdisc *q,
1693 unsigned long cl, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001694{
1695 struct sk_buff *skb;
Eric W. Biederman15e47302012-09-07 20:12:54 +00001696 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697
1698 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1699 if (!skb)
1700 return -ENOBUFS;
1701
Eric W. Biederman15e47302012-09-07 20:12:54 +00001702 if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0, event) < 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001703 kfree_skb(skb);
1704 return -EINVAL;
1705 }
1706
Eric W. Biederman15e47302012-09-07 20:12:54 +00001707 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001708 n->nlmsg_flags & NLM_F_ECHO);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001709}
1710
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001711struct qdisc_dump_args {
1712 struct qdisc_walker w;
1713 struct sk_buff *skb;
1714 struct netlink_callback *cb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001715};
1716
1717static int qdisc_class_dump(struct Qdisc *q, unsigned long cl, struct qdisc_walker *arg)
1718{
1719 struct qdisc_dump_args *a = (struct qdisc_dump_args *)arg;
1720
Eric W. Biederman15e47302012-09-07 20:12:54 +00001721 return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).portid,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001722 a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTCLASS);
1723}
1724
David S. Miller30723672008-07-18 22:50:15 -07001725static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb,
1726 struct tcmsg *tcm, struct netlink_callback *cb,
1727 int *t_p, int s_t)
1728{
1729 struct qdisc_dump_args arg;
1730
1731 if (tc_qdisc_dump_ignore(q) ||
1732 *t_p < s_t || !q->ops->cl_ops ||
1733 (tcm->tcm_parent &&
1734 TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
1735 (*t_p)++;
1736 return 0;
1737 }
1738 if (*t_p > s_t)
1739 memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
1740 arg.w.fn = qdisc_class_dump;
1741 arg.skb = skb;
1742 arg.cb = cb;
1743 arg.w.stop = 0;
1744 arg.w.skip = cb->args[1];
1745 arg.w.count = 0;
1746 q->ops->cl_ops->walk(q, &arg.w);
1747 cb->args[1] = arg.w.count;
1748 if (arg.w.stop)
1749 return -1;
1750 (*t_p)++;
1751 return 0;
1752}
1753
1754static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
1755 struct tcmsg *tcm, struct netlink_callback *cb,
1756 int *t_p, int s_t)
1757{
1758 struct Qdisc *q;
1759
1760 if (!root)
1761 return 0;
1762
1763 if (tc_dump_tclass_qdisc(root, skb, tcm, cb, t_p, s_t) < 0)
1764 return -1;
1765
1766 list_for_each_entry(q, &root->list, list) {
1767 if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
1768 return -1;
1769 }
1770
1771 return 0;
1772}
1773
Linus Torvalds1da177e2005-04-16 15:20:36 -07001774static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
1775{
David S. Miller02ef22c2012-06-26 21:50:05 -07001776 struct tcmsg *tcm = nlmsg_data(cb->nlh);
David S. Miller30723672008-07-18 22:50:15 -07001777 struct net *net = sock_net(skb->sk);
1778 struct netdev_queue *dev_queue;
1779 struct net_device *dev;
1780 int t, s_t;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001781
Hong zhi guo573ce262013-03-27 06:47:04 +00001782 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001783 return 0;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001784 dev = dev_get_by_index(net, tcm->tcm_ifindex);
1785 if (!dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786 return 0;
1787
1788 s_t = cb->args[0];
1789 t = 0;
1790
Patrick McHardyaf356af2009-09-04 06:41:18 +00001791 if (tc_dump_tclass_root(dev->qdisc, skb, tcm, cb, &t, s_t) < 0)
David S. Miller30723672008-07-18 22:50:15 -07001792 goto done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001793
Eric Dumazet24824a02010-10-02 06:11:55 +00001794 dev_queue = dev_ingress_queue(dev);
1795 if (dev_queue &&
1796 tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb,
1797 &t, s_t) < 0)
David S. Miller30723672008-07-18 22:50:15 -07001798 goto done;
1799
1800done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001801 cb->args[0] = t;
1802
1803 dev_put(dev);
1804 return skb->len;
1805}
1806
1807/* Main classifier routine: scans classifier chain attached
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001808 * to this qdisc, (optionally) tests for protocol and asks
1809 * specific classifiers.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001810 */
Eric Dumazetdc7f9f62011-07-05 23:25:42 +00001811int tc_classify_compat(struct sk_buff *skb, const struct tcf_proto *tp,
Patrick McHardy73ca4912007-07-15 00:02:31 -07001812 struct tcf_result *res)
1813{
1814 __be16 protocol = skb->protocol;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001815 int err;
Patrick McHardy73ca4912007-07-15 00:02:31 -07001816
John Fastabend25d8c0d2014-09-12 20:05:27 -07001817 for (; tp; tp = rcu_dereference_bh(tp->next)) {
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001818 if (tp->protocol != protocol &&
1819 tp->protocol != htons(ETH_P_ALL))
1820 continue;
1821 err = tp->classify(skb, tp, res);
1822
1823 if (err >= 0) {
Patrick McHardy73ca4912007-07-15 00:02:31 -07001824#ifdef CONFIG_NET_CLS_ACT
1825 if (err != TC_ACT_RECLASSIFY && skb->tc_verd)
1826 skb->tc_verd = SET_TC_VERD(skb->tc_verd, 0);
1827#endif
1828 return err;
1829 }
1830 }
1831 return -1;
1832}
1833EXPORT_SYMBOL(tc_classify_compat);
1834
Eric Dumazetdc7f9f62011-07-05 23:25:42 +00001835int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
Patrick McHardy73ca4912007-07-15 00:02:31 -07001836 struct tcf_result *res)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001837{
1838 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001839#ifdef CONFIG_NET_CLS_ACT
Eric Dumazetdc7f9f62011-07-05 23:25:42 +00001840 const struct tcf_proto *otp = tp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001841reclassify:
Hagen Paul Pfeifer52bc9742011-02-25 05:45:21 +00001842#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001843
Patrick McHardy73ca4912007-07-15 00:02:31 -07001844 err = tc_classify_compat(skb, tp, res);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001845#ifdef CONFIG_NET_CLS_ACT
Patrick McHardy73ca4912007-07-15 00:02:31 -07001846 if (err == TC_ACT_RECLASSIFY) {
1847 u32 verd = G_TC_VERD(skb->tc_verd);
1848 tp = otp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001849
Patrick McHardy73ca4912007-07-15 00:02:31 -07001850 if (verd++ >= MAX_REC_LOOP) {
Joe Perchese87cc472012-05-13 21:56:26 +00001851 net_notice_ratelimited("%s: packet reclassify loop rule prio %u protocol %02x\n",
1852 tp->q->ops->id,
1853 tp->prio & 0xffff,
1854 ntohs(tp->protocol));
Patrick McHardy73ca4912007-07-15 00:02:31 -07001855 return TC_ACT_SHOT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001856 }
Patrick McHardy73ca4912007-07-15 00:02:31 -07001857 skb->tc_verd = SET_TC_VERD(skb->tc_verd, verd);
1858 goto reclassify;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001859 }
Patrick McHardy73ca4912007-07-15 00:02:31 -07001860#endif
1861 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001862}
Patrick McHardy73ca4912007-07-15 00:02:31 -07001863EXPORT_SYMBOL(tc_classify);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001864
Patrick McHardya48b5a62007-03-23 11:29:43 -07001865void tcf_destroy(struct tcf_proto *tp)
1866{
1867 tp->ops->destroy(tp);
1868 module_put(tp->ops->owner);
John Fastabend25d8c0d2014-09-12 20:05:27 -07001869 kfree_rcu(tp, rcu);
Patrick McHardya48b5a62007-03-23 11:29:43 -07001870}
1871
John Fastabend25d8c0d2014-09-12 20:05:27 -07001872void tcf_destroy_chain(struct tcf_proto __rcu **fl)
Patrick McHardya48b5a62007-03-23 11:29:43 -07001873{
1874 struct tcf_proto *tp;
1875
John Fastabend25d8c0d2014-09-12 20:05:27 -07001876 while ((tp = rtnl_dereference(*fl)) != NULL) {
1877 RCU_INIT_POINTER(*fl, tp->next);
Patrick McHardya48b5a62007-03-23 11:29:43 -07001878 tcf_destroy(tp);
1879 }
1880}
1881EXPORT_SYMBOL(tcf_destroy_chain);
1882
Linus Torvalds1da177e2005-04-16 15:20:36 -07001883#ifdef CONFIG_PROC_FS
1884static int psched_show(struct seq_file *seq, void *v)
1885{
Patrick McHardy3c0cfc12007-10-10 16:32:41 -07001886 struct timespec ts;
1887
1888 hrtimer_get_res(CLOCK_MONOTONIC, &ts);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001889 seq_printf(seq, "%08x %08x %08x %08x\n",
Jarek Poplawskica44d6e2009-06-15 02:31:47 -07001890 (u32)NSEC_PER_USEC, (u32)PSCHED_TICKS2NS(1),
Patrick McHardy514bca32007-03-16 12:34:52 -07001891 1000000,
Patrick McHardy3c0cfc12007-10-10 16:32:41 -07001892 (u32)NSEC_PER_SEC/(u32)ktime_to_ns(timespec_to_ktime(ts)));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001893
1894 return 0;
1895}
1896
1897static int psched_open(struct inode *inode, struct file *file)
1898{
Tom Goff7e5ab152010-03-30 19:44:56 -07001899 return single_open(file, psched_show, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001900}
1901
Arjan van de Venda7071d2007-02-12 00:55:36 -08001902static const struct file_operations psched_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001903 .owner = THIS_MODULE,
1904 .open = psched_open,
1905 .read = seq_read,
1906 .llseek = seq_lseek,
1907 .release = single_release,
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +09001908};
Tom Goff7316ae82010-03-19 15:40:13 +00001909
1910static int __net_init psched_net_init(struct net *net)
1911{
1912 struct proc_dir_entry *e;
1913
Gao fengd4beaa62013-02-18 01:34:54 +00001914 e = proc_create("psched", 0, net->proc_net, &psched_fops);
Tom Goff7316ae82010-03-19 15:40:13 +00001915 if (e == NULL)
1916 return -ENOMEM;
1917
1918 return 0;
1919}
1920
1921static void __net_exit psched_net_exit(struct net *net)
1922{
Gao fengece31ff2013-02-18 01:34:56 +00001923 remove_proc_entry("psched", net->proc_net);
Tom Goff7316ae82010-03-19 15:40:13 +00001924}
1925#else
1926static int __net_init psched_net_init(struct net *net)
1927{
1928 return 0;
1929}
1930
1931static void __net_exit psched_net_exit(struct net *net)
1932{
1933}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001934#endif
1935
Tom Goff7316ae82010-03-19 15:40:13 +00001936static struct pernet_operations psched_net_ops = {
1937 .init = psched_net_init,
1938 .exit = psched_net_exit,
1939};
1940
Linus Torvalds1da177e2005-04-16 15:20:36 -07001941static int __init pktsched_init(void)
1942{
Tom Goff7316ae82010-03-19 15:40:13 +00001943 int err;
1944
1945 err = register_pernet_subsys(&psched_net_ops);
1946 if (err) {
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001947 pr_err("pktsched_init: "
Tom Goff7316ae82010-03-19 15:40:13 +00001948 "cannot initialize per netns operations\n");
1949 return err;
1950 }
1951
stephen hemminger6da7c8f2013-08-27 16:19:08 -07001952 register_qdisc(&pfifo_fast_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001953 register_qdisc(&pfifo_qdisc_ops);
1954 register_qdisc(&bfifo_qdisc_ops);
Hagen Paul Pfeifer57dbb2d2010-01-24 12:30:59 +00001955 register_qdisc(&pfifo_head_drop_qdisc_ops);
David S. Miller6ec1c692009-09-06 01:58:51 -07001956 register_qdisc(&mq_qdisc_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001957
Greg Rosec7ac8672011-06-10 01:27:09 +00001958 rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL, NULL);
1959 rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL, NULL);
1960 rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc, NULL);
1961 rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL, NULL);
1962 rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL, NULL);
1963 rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass, NULL);
Thomas Grafbe577dd2007-03-22 11:55:50 -07001964
Linus Torvalds1da177e2005-04-16 15:20:36 -07001965 return 0;
1966}
1967
1968subsys_initcall(pktsched_init);