summaryrefslogtreecommitdiff
path: root/drivers/net/cxgb3/sge.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/cxgb3/sge.c')
-rw-r--r--drivers/net/cxgb3/sge.c45
1 files changed, 23 insertions, 22 deletions
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 1b0861d73ab..6990c0ddc85 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -1704,16 +1704,15 @@ int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
*/
static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
{
- skb->next = skb->prev = NULL;
- if (q->rx_tail)
- q->rx_tail->next = skb;
- else {
+ int was_empty = skb_queue_empty(&q->rx_queue);
+
+ __skb_queue_tail(&q->rx_queue, skb);
+
+ if (was_empty) {
struct sge_qset *qs = rspq_to_qset(q);
napi_schedule(&qs->napi);
- q->rx_head = skb;
}
- q->rx_tail = skb;
}
/**
@@ -1754,26 +1753,29 @@ static int ofld_poll(struct napi_struct *napi, int budget)
int work_done = 0;
while (work_done < budget) {
- struct sk_buff *head, *tail, *skbs[RX_BUNDLE_SIZE];
+ struct sk_buff *skb, *tmp, *skbs[RX_BUNDLE_SIZE];
+ struct sk_buff_head queue;
int ngathered;
spin_lock_irq(&q->lock);
- head = q->rx_head;
- if (!head) {
+ __skb_queue_head_init(&queue);
+ skb_queue_splice_init(&q->rx_queue, &queue);
+ if (skb_queue_empty(&queue)) {
napi_complete(napi);
spin_unlock_irq(&q->lock);
return work_done;
}
-
- tail = q->rx_tail;
- q->rx_head = q->rx_tail = NULL;
spin_unlock_irq(&q->lock);
- for (ngathered = 0; work_done < budget && head; work_done++) {
- prefetch(head->data);
- skbs[ngathered] = head;
- head = head->next;
- skbs[ngathered]->next = NULL;
+ ngathered = 0;
+ skb_queue_walk_safe(&queue, skb, tmp) {
+ if (work_done >= budget)
+ break;
+ work_done++;
+
+ __skb_unlink(skb, &queue);
+ prefetch(skb->data);
+ skbs[ngathered] = skb;
if (++ngathered == RX_BUNDLE_SIZE) {
q->offload_bundles++;
adapter->tdev.recv(&adapter->tdev, skbs,
@@ -1781,12 +1783,10 @@ static int ofld_poll(struct napi_struct *napi, int budget)
ngathered = 0;
}
}
- if (head) { /* splice remaining packets back onto Rx queue */
+ if (!skb_queue_empty(&queue)) {
+ /* splice remaining packets back onto Rx queue */
spin_lock_irq(&q->lock);
- tail->next = q->rx_head;
- if (!q->rx_head)
- q->rx_tail = tail;
- q->rx_head = head;
+ skb_queue_splice(&queue, &q->rx_queue);
spin_unlock_irq(&q->lock);
}
deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
@@ -2934,6 +2934,7 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
q->rspq.gen = 1;
q->rspq.size = p->rspq_size;
spin_lock_init(&q->rspq.lock);
+ skb_queue_head_init(&q->rspq.rx_queue);
q->txq[TXQ_ETH].stop_thres = nports *
flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);