aboutsummaryrefslogtreecommitdiff
path: root/net/sched/sch_generic.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-01-06 12:30:19 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2011-01-06 12:30:19 -0800
commitabb359450f20c32ae03039d8736f12b1d561caf5 (patch)
tree6e8723885feb66a138f19f0ff31615dc13a8d859 /net/sched/sch_generic.c
parentcb600d2f83c854ec3d6660063e4466431999489b (diff)
parent4e3dbdb1392a83bd21a6ff8f6bc785495058d37c (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6: (1436 commits) cassini: Use local-mac-address prom property for Cassini MAC address net: remove the duplicate #ifdef __KERNEL__ net: bridge: check the length of skb after nf_bridge_maybe_copy_header() netconsole: clarify stopping message netconsole: don't announce stopping if nothing happened cnic: Fix the type field in SPQ messages netfilter: fix export secctx error handling netfilter: fix the race when initializing nf_ct_expect_hash_rnd ipv4: IP defragmentation must be ECN aware net: r6040: Return proper error for r6040_init_one dcb: use after free in dcb_flushapp() dcb: unlock on error in dcbnl_ieee_get() net: ixp4xx_eth: Return proper error for eth_init_one include/linux/if_ether.h: Add #define ETH_P_LINK_CTL for HPNA and wlan local tunnel net: add POLLPRI to sock_def_readable() af_unix: Avoid socket->sk NULL OOPS in stream connect security hooks. net_sched: pfifo_head_drop problem mac80211: remove stray extern mac80211: implement off-channel TX using hw r-o-c offload mac80211: implement hardware offload for remain-on-channel ...
Diffstat (limited to 'net/sched/sch_generic.c')
-rw-r--r--net/sched/sch_generic.c41
1 files changed, 28 insertions, 13 deletions
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 5dbb3cd96e5..34dc598440a 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -60,8 +60,7 @@ static inline struct sk_buff *dequeue_skb(struct Qdisc *q)
/* check the reason of requeuing without tx lock first */
txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
- if (!netif_tx_queue_stopped(txq) &&
- !netif_tx_queue_frozen(txq)) {
+ if (!netif_tx_queue_frozen_or_stopped(txq)) {
q->gso_skb = NULL;
q->q.qlen--;
} else
@@ -122,7 +121,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
spin_unlock(root_lock);
HARD_TX_LOCK(dev, txq, smp_processor_id());
- if (!netif_tx_queue_stopped(txq) && !netif_tx_queue_frozen(txq))
+ if (!netif_tx_queue_frozen_or_stopped(txq))
ret = dev_hard_start_xmit(skb, dev, txq);
HARD_TX_UNLOCK(dev, txq);
@@ -144,8 +143,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
ret = dev_requeue_skb(skb, q);
}
- if (ret && (netif_tx_queue_stopped(txq) ||
- netif_tx_queue_frozen(txq)))
+ if (ret && netif_tx_queue_frozen_or_stopped(txq))
ret = 0;
return ret;
@@ -555,7 +553,9 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
size = QDISC_ALIGN(sizeof(*sch));
size += ops->priv_size + (QDISC_ALIGNTO - 1);
- p = kzalloc(size, GFP_KERNEL);
+ p = kzalloc_node(size, GFP_KERNEL,
+ netdev_queue_numa_node_read(dev_queue));
+
if (!p)
goto errout;
sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
@@ -810,20 +810,35 @@ static bool some_qdisc_is_busy(struct net_device *dev)
return false;
}
-void dev_deactivate(struct net_device *dev)
+void dev_deactivate_many(struct list_head *head)
{
- netdev_for_each_tx_queue(dev, dev_deactivate_queue, &noop_qdisc);
- if (dev_ingress_queue(dev))
- dev_deactivate_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
+ struct net_device *dev;
+
+ list_for_each_entry(dev, head, unreg_list) {
+ netdev_for_each_tx_queue(dev, dev_deactivate_queue,
+ &noop_qdisc);
+ if (dev_ingress_queue(dev))
+ dev_deactivate_queue(dev, dev_ingress_queue(dev),
+ &noop_qdisc);
- dev_watchdog_down(dev);
+ dev_watchdog_down(dev);
+ }
/* Wait for outstanding qdisc-less dev_queue_xmit calls. */
synchronize_rcu();
/* Wait for outstanding qdisc_run calls. */
- while (some_qdisc_is_busy(dev))
- yield();
+ list_for_each_entry(dev, head, unreg_list)
+ while (some_qdisc_is_busy(dev))
+ yield();
+}
+
+void dev_deactivate(struct net_device *dev)
+{
+ LIST_HEAD(single);
+
+ list_add(&dev->unreg_list, &single);
+ dev_deactivate_many(&single);
}
static void dev_init_scheduler_queue(struct net_device *dev,