blob: e26ec9d817cfad856f7efae89a105a21dce7f586 [file] [log] [blame]
Ivo van Doorn181d6902008-02-05 16:42:23 -05001/*
Ivo van Doorn7e613e12010-08-06 20:45:38 +02002 Copyright (C) 2010 Willow Garage <http://www.willowgarage.com>
3 Copyright (C) 2004 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
Gertjan van Wingerde9c9a0d12009-11-08 16:39:55 +01004 Copyright (C) 2004 - 2009 Gertjan van Wingerde <gwingerde@gmail.com>
Ivo van Doorn181d6902008-02-05 16:42:23 -05005 <http://rt2x00.serialmonkey.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the
19 Free Software Foundation, Inc.,
20 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 */
22
23/*
24 Module: rt2x00lib
25 Abstract: rt2x00 queue specific routines.
26 */
27
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090028#include <linux/slab.h>
Ivo van Doorn181d6902008-02-05 16:42:23 -050029#include <linux/kernel.h>
30#include <linux/module.h>
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +020031#include <linux/dma-mapping.h>
Ivo van Doorn181d6902008-02-05 16:42:23 -050032
33#include "rt2x00.h"
34#include "rt2x00lib.h"
35
Helmut Schaa88211022012-04-19 13:24:10 +020036struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry, gfp_t gfp)
Gertjan van Wingerde239c2492008-06-06 22:54:12 +020037{
Ivo van Doornfa695602010-10-11 15:37:25 +020038 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +020039 struct sk_buff *skb;
40 struct skb_frame_desc *skbdesc;
Ivo van Doorn2bb057d2008-08-04 16:37:44 +020041 unsigned int frame_size;
42 unsigned int head_size = 0;
43 unsigned int tail_size = 0;
Gertjan van Wingerde239c2492008-06-06 22:54:12 +020044
45 /*
46 * The frame size includes descriptor size, because the
47 * hardware directly receive the frame into the skbuffer.
48 */
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +020049 frame_size = entry->queue->data_size + entry->queue->desc_size;
Gertjan van Wingerde239c2492008-06-06 22:54:12 +020050
51 /*
Ivo van Doornff352392008-07-04 14:56:07 +020052 * The payload should be aligned to a 4-byte boundary,
53 * this means we need at least 3 bytes for moving the frame
54 * into the correct offset.
Gertjan van Wingerde239c2492008-06-06 22:54:12 +020055 */
Ivo van Doorn2bb057d2008-08-04 16:37:44 +020056 head_size = 4;
57
58 /*
59 * For IV/EIV/ICV assembly we must make sure there is
60 * at least 8 bytes bytes available in headroom for IV/EIV
Ivo van Doorn9c3444d2008-12-03 17:29:48 +010061 * and 8 bytes for ICV data as tailroon.
Ivo van Doorn2bb057d2008-08-04 16:37:44 +020062 */
Ivo van Doorn7dab73b2011-04-18 15:27:06 +020063 if (test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags)) {
Ivo van Doorn2bb057d2008-08-04 16:37:44 +020064 head_size += 8;
Ivo van Doorn9c3444d2008-12-03 17:29:48 +010065 tail_size += 8;
Ivo van Doorn2bb057d2008-08-04 16:37:44 +020066 }
Gertjan van Wingerde239c2492008-06-06 22:54:12 +020067
68 /*
69 * Allocate skbuffer.
70 */
Helmut Schaa88211022012-04-19 13:24:10 +020071 skb = __dev_alloc_skb(frame_size + head_size + tail_size, gfp);
Gertjan van Wingerde239c2492008-06-06 22:54:12 +020072 if (!skb)
73 return NULL;
74
Ivo van Doorn2bb057d2008-08-04 16:37:44 +020075 /*
76 * Make sure we not have a frame with the requested bytes
77 * available in the head and tail.
78 */
79 skb_reserve(skb, head_size);
Gertjan van Wingerde239c2492008-06-06 22:54:12 +020080 skb_put(skb, frame_size);
81
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +020082 /*
83 * Populate skbdesc.
84 */
85 skbdesc = get_skb_frame_desc(skb);
86 memset(skbdesc, 0, sizeof(*skbdesc));
87 skbdesc->entry = entry;
88
Ivo van Doorn7dab73b2011-04-18 15:27:06 +020089 if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags)) {
Stanislaw Gruszka4ea545d2013-02-13 14:27:05 +010090 dma_addr_t skb_dma;
91
92 skb_dma = dma_map_single(rt2x00dev->dev, skb->data, skb->len,
93 DMA_FROM_DEVICE);
94 if (unlikely(dma_mapping_error(rt2x00dev->dev, skb_dma))) {
95 dev_kfree_skb_any(skb);
96 return NULL;
97 }
98
99 skbdesc->skb_dma = skb_dma;
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200100 skbdesc->flags |= SKBDESC_DMA_MAPPED_RX;
101 }
102
Gertjan van Wingerde239c2492008-06-06 22:54:12 +0200103 return skb;
104}
Gertjan van Wingerde30caa6e2008-06-16 19:56:08 +0200105
Stanislaw Gruszka4ea545d2013-02-13 14:27:05 +0100106int rt2x00queue_map_txskb(struct queue_entry *entry)
Gertjan van Wingerde30caa6e2008-06-16 19:56:08 +0200107{
Ivo van Doornfa695602010-10-11 15:37:25 +0200108 struct device *dev = entry->queue->rt2x00dev->dev;
109 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200110
Ivo van Doorn3ee54a02008-08-29 21:04:50 +0200111 skbdesc->skb_dma =
Ivo van Doornfa695602010-10-11 15:37:25 +0200112 dma_map_single(dev, entry->skb->data, entry->skb->len, DMA_TO_DEVICE);
Stanislaw Gruszka4ea545d2013-02-13 14:27:05 +0100113
114 if (unlikely(dma_mapping_error(dev, skbdesc->skb_dma)))
115 return -ENOMEM;
116
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200117 skbdesc->flags |= SKBDESC_DMA_MAPPED_TX;
Stanislaw Gruszka4ea545d2013-02-13 14:27:05 +0100118 return 0;
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200119}
120EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb);
121
Ivo van Doornfa695602010-10-11 15:37:25 +0200122void rt2x00queue_unmap_skb(struct queue_entry *entry)
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200123{
Ivo van Doornfa695602010-10-11 15:37:25 +0200124 struct device *dev = entry->queue->rt2x00dev->dev;
125 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200126
127 if (skbdesc->flags & SKBDESC_DMA_MAPPED_RX) {
Ivo van Doornfa695602010-10-11 15:37:25 +0200128 dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len,
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200129 DMA_FROM_DEVICE);
130 skbdesc->flags &= ~SKBDESC_DMA_MAPPED_RX;
Helmut Schaa546adf22010-10-09 13:33:43 +0200131 } else if (skbdesc->flags & SKBDESC_DMA_MAPPED_TX) {
Ivo van Doornfa695602010-10-11 15:37:25 +0200132 dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len,
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200133 DMA_TO_DEVICE);
134 skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX;
135 }
136}
Gertjan van Wingerde0b8004a2010-06-03 10:51:45 +0200137EXPORT_SYMBOL_GPL(rt2x00queue_unmap_skb);
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200138
Ivo van Doornfa695602010-10-11 15:37:25 +0200139void rt2x00queue_free_skb(struct queue_entry *entry)
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200140{
Ivo van Doornfa695602010-10-11 15:37:25 +0200141 if (!entry->skb)
Ivo van Doorn9a613192008-07-05 15:11:57 +0200142 return;
143
Ivo van Doornfa695602010-10-11 15:37:25 +0200144 rt2x00queue_unmap_skb(entry);
145 dev_kfree_skb_any(entry->skb);
146 entry->skb = NULL;
Gertjan van Wingerde30caa6e2008-06-16 19:56:08 +0200147}
Gertjan van Wingerde239c2492008-06-06 22:54:12 +0200148
Ivo van Doorndaee6c02009-08-29 20:30:45 +0200149void rt2x00queue_align_frame(struct sk_buff *skb)
Ivo van Doorn9f166172009-04-26 16:08:50 +0200150{
Ivo van Doorn9f166172009-04-26 16:08:50 +0200151 unsigned int frame_length = skb->len;
Ivo van Doorndaee6c02009-08-29 20:30:45 +0200152 unsigned int align = ALIGN_SIZE(skb, 0);
Ivo van Doorn9f166172009-04-26 16:08:50 +0200153
154 if (!align)
155 return;
156
Ivo van Doorndaee6c02009-08-29 20:30:45 +0200157 skb_push(skb, align);
158 memmove(skb->data, skb->data + align, frame_length);
159 skb_trim(skb, frame_length);
160}
161
Ivo van Doorndaee6c02009-08-29 20:30:45 +0200162void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length)
163{
Gertjan van Wingerde2e331462009-12-04 23:47:03 +0100164 unsigned int payload_length = skb->len - header_length;
Ivo van Doorndaee6c02009-08-29 20:30:45 +0200165 unsigned int header_align = ALIGN_SIZE(skb, 0);
166 unsigned int payload_align = ALIGN_SIZE(skb, header_length);
Gertjan van Wingerdee54be4e2009-12-04 23:47:07 +0100167 unsigned int l2pad = payload_length ? L2PAD_SIZE(header_length) : 0;
Ivo van Doorndaee6c02009-08-29 20:30:45 +0200168
Gertjan van Wingerde2e331462009-12-04 23:47:03 +0100169 /*
170 * Adjust the header alignment if the payload needs to be moved more
171 * than the header.
172 */
173 if (payload_align > header_align)
174 header_align += 4;
Ivo van Doorndaee6c02009-08-29 20:30:45 +0200175
Gertjan van Wingerde2e331462009-12-04 23:47:03 +0100176 /* There is nothing to do if no alignment is needed */
177 if (!header_align)
178 return;
179
180 /* Reserve the amount of space needed in front of the frame */
181 skb_push(skb, header_align);
182
183 /*
184 * Move the header.
185 */
186 memmove(skb->data, skb->data + header_align, header_length);
187
188 /* Move the payload, if present and if required */
189 if (payload_length && payload_align)
Ivo van Doorndaee6c02009-08-29 20:30:45 +0200190 memmove(skb->data + header_length + l2pad,
Gertjan van Wingerdea5186e92009-11-24 23:11:32 +0100191 skb->data + header_length + l2pad + payload_align,
Gertjan van Wingerde2e331462009-12-04 23:47:03 +0100192 payload_length);
193
194 /* Trim the skb to the correct size */
195 skb_trim(skb, header_length + l2pad + payload_length);
Ivo van Doorn9f166172009-04-26 16:08:50 +0200196}
197
Ivo van Doorndaee6c02009-08-29 20:30:45 +0200198void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length)
199{
Gertjan van Wingerdea061a932010-12-13 12:33:12 +0100200 /*
201 * L2 padding is only present if the skb contains more than just the
202 * IEEE 802.11 header.
203 */
204 unsigned int l2pad = (skb->len > header_length) ?
205 L2PAD_SIZE(header_length) : 0;
Ivo van Doorndaee6c02009-08-29 20:30:45 +0200206
Gertjan van Wingerde354e39d2009-12-04 23:47:02 +0100207 if (!l2pad)
Ivo van Doorndaee6c02009-08-29 20:30:45 +0200208 return;
209
Gertjan van Wingerdea061a932010-12-13 12:33:12 +0100210 memmove(skb->data + l2pad, skb->data, header_length);
211 skb_pull(skb, l2pad);
Ivo van Doorndaee6c02009-08-29 20:30:45 +0200212}
213
Gertjan van Wingerde77b56212011-07-06 22:57:00 +0200214static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
215 struct sk_buff *skb,
Ivo van Doorn7b409822008-12-20 10:58:33 +0100216 struct txentry_desc *txdesc)
217{
Gertjan van Wingerde77b56212011-07-06 22:57:00 +0200218 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
219 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Ivo van Doorn7b409822008-12-20 10:58:33 +0100220 struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif);
Stanislaw Gruszkae5851da2012-06-01 11:29:40 +0200221 u16 seqno;
Ivo van Doorn7b409822008-12-20 10:58:33 +0100222
Helmut Schaac262e082011-03-03 19:39:56 +0100223 if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
Ivo van Doorn7b409822008-12-20 10:58:33 +0100224 return;
225
Helmut Schaa7fe7ee72011-03-03 19:42:01 +0100226 __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
227
Stanislaw Gruszkae66a8dd2012-04-02 13:21:06 +0200228 if (!test_bit(REQUIRE_SW_SEQNO, &rt2x00dev->cap_flags)) {
229 /*
230 * rt2800 has a H/W (or F/W) bug, device incorrectly increase
231 * seqno on retransmited data (non-QOS) frames. To workaround
232 * the problem let's generate seqno in software if QOS is
233 * disabled.
234 */
235 if (test_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags))
236 __clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
237 else
238 /* H/W will generate sequence number */
239 return;
240 }
Helmut Schaa7fe7ee72011-03-03 19:42:01 +0100241
Ivo van Doorn7b409822008-12-20 10:58:33 +0100242 /*
Helmut Schaa7fe7ee72011-03-03 19:42:01 +0100243 * The hardware is not able to insert a sequence number. Assign a
244 * software generated one here.
Ivo van Doorn7b409822008-12-20 10:58:33 +0100245 *
246 * This is wrong because beacons are not getting sequence
247 * numbers assigned properly.
248 *
249 * A secondary problem exists for drivers that cannot toggle
250 * sequence counting per-frame, since those will override the
251 * sequence counter given by mac80211.
252 */
Ivo van Doorn7b409822008-12-20 10:58:33 +0100253 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
Stanislaw Gruszkae5851da2012-06-01 11:29:40 +0200254 seqno = atomic_add_return(0x10, &intf->seqno);
255 else
256 seqno = atomic_read(&intf->seqno);
257
Ivo van Doorn7b409822008-12-20 10:58:33 +0100258 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
Stanislaw Gruszkae5851da2012-06-01 11:29:40 +0200259 hdr->seq_ctrl |= cpu_to_le16(seqno);
Ivo van Doorn7b409822008-12-20 10:58:33 +0100260}
261
Gertjan van Wingerde77b56212011-07-06 22:57:00 +0200262static void rt2x00queue_create_tx_descriptor_plcp(struct rt2x00_dev *rt2x00dev,
263 struct sk_buff *skb,
Ivo van Doorn7b409822008-12-20 10:58:33 +0100264 struct txentry_desc *txdesc,
265 const struct rt2x00_rate *hwrate)
266{
Gertjan van Wingerde77b56212011-07-06 22:57:00 +0200267 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Ivo van Doorn7b409822008-12-20 10:58:33 +0100268 struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
269 unsigned int data_length;
270 unsigned int duration;
271 unsigned int residual;
272
Helmut Schaa25177942011-03-03 19:43:25 +0100273 /*
274 * Determine with what IFS priority this frame should be send.
275 * Set ifs to IFS_SIFS when the this is not the first fragment,
276 * or this fragment came after RTS/CTS.
277 */
278 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
279 txdesc->u.plcp.ifs = IFS_BACKOFF;
280 else
281 txdesc->u.plcp.ifs = IFS_SIFS;
282
Ivo van Doorn7b409822008-12-20 10:58:33 +0100283 /* Data length + CRC + Crypto overhead (IV/EIV/ICV/MIC) */
Gertjan van Wingerde77b56212011-07-06 22:57:00 +0200284 data_length = skb->len + 4;
285 data_length += rt2x00crypto_tx_overhead(rt2x00dev, skb);
Ivo van Doorn7b409822008-12-20 10:58:33 +0100286
287 /*
288 * PLCP setup
289 * Length calculation depends on OFDM/CCK rate.
290 */
Helmut Schaa26a1d072011-03-03 19:42:35 +0100291 txdesc->u.plcp.signal = hwrate->plcp;
292 txdesc->u.plcp.service = 0x04;
Ivo van Doorn7b409822008-12-20 10:58:33 +0100293
294 if (hwrate->flags & DEV_RATE_OFDM) {
Helmut Schaa26a1d072011-03-03 19:42:35 +0100295 txdesc->u.plcp.length_high = (data_length >> 6) & 0x3f;
296 txdesc->u.plcp.length_low = data_length & 0x3f;
Ivo van Doorn7b409822008-12-20 10:58:33 +0100297 } else {
298 /*
299 * Convert length to microseconds.
300 */
301 residual = GET_DURATION_RES(data_length, hwrate->bitrate);
302 duration = GET_DURATION(data_length, hwrate->bitrate);
303
304 if (residual != 0) {
305 duration++;
306
307 /*
308 * Check if we need to set the Length Extension
309 */
310 if (hwrate->bitrate == 110 && residual <= 30)
Helmut Schaa26a1d072011-03-03 19:42:35 +0100311 txdesc->u.plcp.service |= 0x80;
Ivo van Doorn7b409822008-12-20 10:58:33 +0100312 }
313
Helmut Schaa26a1d072011-03-03 19:42:35 +0100314 txdesc->u.plcp.length_high = (duration >> 8) & 0xff;
315 txdesc->u.plcp.length_low = duration & 0xff;
Ivo van Doorn7b409822008-12-20 10:58:33 +0100316
317 /*
318 * When preamble is enabled we should set the
319 * preamble bit for the signal.
320 */
321 if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
Helmut Schaa26a1d072011-03-03 19:42:35 +0100322 txdesc->u.plcp.signal |= 0x08;
Ivo van Doorn7b409822008-12-20 10:58:33 +0100323 }
324}
325
Gertjan van Wingerde77b56212011-07-06 22:57:00 +0200326static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
327 struct sk_buff *skb,
Gertjan van Wingerde46a01ec2011-04-18 15:33:41 +0200328 struct txentry_desc *txdesc,
Thomas Huehn36323f82012-07-23 21:33:42 +0200329 struct ieee80211_sta *sta,
Gertjan van Wingerde46a01ec2011-04-18 15:33:41 +0200330 const struct rt2x00_rate *hwrate)
331{
Gertjan van Wingerde77b56212011-07-06 22:57:00 +0200332 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Gertjan van Wingerde46a01ec2011-04-18 15:33:41 +0200333 struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
Gertjan van Wingerde77b56212011-07-06 22:57:00 +0200334 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Helmut Schaaead2bb62011-09-08 14:37:19 +0200335 struct rt2x00_sta *sta_priv = NULL;
Gertjan van Wingerde46a01ec2011-04-18 15:33:41 +0200336
Thomas Huehn36323f82012-07-23 21:33:42 +0200337 if (sta) {
Gertjan van Wingerde46a01ec2011-04-18 15:33:41 +0200338 txdesc->u.ht.mpdu_density =
Thomas Huehn36323f82012-07-23 21:33:42 +0200339 sta->ht_cap.ampdu_density;
Gertjan van Wingerde46a01ec2011-04-18 15:33:41 +0200340
Thomas Huehn36323f82012-07-23 21:33:42 +0200341 sta_priv = sta_to_rt2x00_sta(sta);
Helmut Schaaead2bb62011-09-08 14:37:19 +0200342 txdesc->u.ht.wcid = sta_priv->wcid;
343 }
344
Gertjan van Wingerde46a01ec2011-04-18 15:33:41 +0200345 /*
346 * If IEEE80211_TX_RC_MCS is set txrate->idx just contains the
347 * mcs rate to be used
348 */
349 if (txrate->flags & IEEE80211_TX_RC_MCS) {
350 txdesc->u.ht.mcs = txrate->idx;
351
352 /*
353 * MIMO PS should be set to 1 for STA's using dynamic SM PS
354 * when using more then one tx stream (>MCS7).
355 */
Thomas Huehn36323f82012-07-23 21:33:42 +0200356 if (sta && txdesc->u.ht.mcs > 7 &&
357 ((sta->ht_cap.cap &
Gertjan van Wingerde46a01ec2011-04-18 15:33:41 +0200358 IEEE80211_HT_CAP_SM_PS) >>
359 IEEE80211_HT_CAP_SM_PS_SHIFT) ==
360 WLAN_HT_CAP_SM_PS_DYNAMIC)
361 __set_bit(ENTRY_TXD_HT_MIMO_PS, &txdesc->flags);
362 } else {
363 txdesc->u.ht.mcs = rt2x00_get_rate_mcs(hwrate->mcs);
364 if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
365 txdesc->u.ht.mcs |= 0x08;
366 }
367
Stanislaw Gruszkada40f402012-04-04 16:15:33 +0200368 if (test_bit(CONFIG_HT_DISABLED, &rt2x00dev->flags)) {
369 if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT))
370 txdesc->u.ht.txop = TXOP_SIFS;
371 else
372 txdesc->u.ht.txop = TXOP_BACKOFF;
373
374 /* Left zero on all other settings. */
375 return;
376 }
377
378 txdesc->u.ht.ba_size = 7; /* FIXME: What value is needed? */
379
380 /*
381 * Only one STBC stream is supported for now.
382 */
383 if (tx_info->flags & IEEE80211_TX_CTL_STBC)
384 txdesc->u.ht.stbc = 1;
385
Gertjan van Wingerde46a01ec2011-04-18 15:33:41 +0200386 /*
387 * This frame is eligible for an AMPDU, however, don't aggregate
388 * frames that are intended to probe a specific tx rate.
389 */
390 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU &&
391 !(tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
392 __set_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags);
393
394 /*
395 * Set 40Mhz mode if necessary (for legacy rates this will
396 * duplicate the frame to both channels).
397 */
398 if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH ||
399 txrate->flags & IEEE80211_TX_RC_DUP_DATA)
400 __set_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags);
401 if (txrate->flags & IEEE80211_TX_RC_SHORT_GI)
402 __set_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags);
403
404 /*
405 * Determine IFS values
406 * - Use TXOP_BACKOFF for management frames except beacons
407 * - Use TXOP_SIFS for fragment bursts
408 * - Use TXOP_HTTXOP for everything else
409 *
410 * Note: rt2800 devices won't use CTS protection (if used)
411 * for frames not transmitted with TXOP_HTTXOP
412 */
413 if (ieee80211_is_mgmt(hdr->frame_control) &&
414 !ieee80211_is_beacon(hdr->frame_control))
415 txdesc->u.ht.txop = TXOP_BACKOFF;
416 else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT))
417 txdesc->u.ht.txop = TXOP_SIFS;
418 else
419 txdesc->u.ht.txop = TXOP_HTTXOP;
420}
421
Gertjan van Wingerde77b56212011-07-06 22:57:00 +0200422static void rt2x00queue_create_tx_descriptor(struct rt2x00_dev *rt2x00dev,
423 struct sk_buff *skb,
Thomas Huehn36323f82012-07-23 21:33:42 +0200424 struct txentry_desc *txdesc,
425 struct ieee80211_sta *sta)
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200426{
Gertjan van Wingerde77b56212011-07-06 22:57:00 +0200427 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
428 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Helmut Schaa55b585e2011-03-03 19:43:49 +0100429 struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
430 struct ieee80211_rate *rate;
431 const struct rt2x00_rate *hwrate = NULL;
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200432
433 memset(txdesc, 0, sizeof(*txdesc));
434
435 /*
Gertjan van Wingerdedf624ca2010-05-03 22:43:05 +0200436 * Header and frame information.
Ivo van Doorn9f166172009-04-26 16:08:50 +0200437 */
Gertjan van Wingerde77b56212011-07-06 22:57:00 +0200438 txdesc->length = skb->len;
439 txdesc->header_length = ieee80211_get_hdrlen_from_skb(skb);
Ivo van Doorn9f166172009-04-26 16:08:50 +0200440
441 /*
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200442 * Check whether this frame is to be acked.
443 */
Johannes Berge039fa42008-05-15 12:55:29 +0200444 if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK))
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200445 __set_bit(ENTRY_TXD_ACK, &txdesc->flags);
446
447 /*
448 * Check if this is a RTS/CTS frame
449 */
Ivo van Doornac104462008-06-16 19:54:57 +0200450 if (ieee80211_is_rts(hdr->frame_control) ||
451 ieee80211_is_cts(hdr->frame_control)) {
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200452 __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
Ivo van Doornac104462008-06-16 19:54:57 +0200453 if (ieee80211_is_rts(hdr->frame_control))
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200454 __set_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags);
Johannes Berge039fa42008-05-15 12:55:29 +0200455 else
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200456 __set_bit(ENTRY_TXD_CTS_FRAME, &txdesc->flags);
Johannes Berge039fa42008-05-15 12:55:29 +0200457 if (tx_info->control.rts_cts_rate_idx >= 0)
Johannes Berg2e92e6f2008-05-15 12:55:27 +0200458 rate =
Johannes Berge039fa42008-05-15 12:55:29 +0200459 ieee80211_get_rts_cts_rate(rt2x00dev->hw, tx_info);
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200460 }
461
462 /*
463 * Determine retry information.
464 */
Johannes Berge6a98542008-10-21 12:40:02 +0200465 txdesc->retry_limit = tx_info->control.rates[0].count - 1;
Ivo van Doorn42c82852008-12-02 18:20:04 +0100466 if (txdesc->retry_limit >= rt2x00dev->long_retry)
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200467 __set_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags);
468
469 /*
470 * Check if more fragments are pending
471 */
Helmut Schaa2606e422010-06-14 22:10:09 +0200472 if (ieee80211_has_morefrags(hdr->frame_control)) {
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200473 __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
474 __set_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags);
475 }
476
477 /*
Helmut Schaa2606e422010-06-14 22:10:09 +0200478 * Check if more frames (!= fragments) are pending
479 */
480 if (tx_info->flags & IEEE80211_TX_CTL_MORE_FRAMES)
481 __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
482
483 /*
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200484 * Beacons and probe responses require the tsf timestamp
Helmut Schaa1bce85cf2011-02-20 13:56:07 +0100485 * to be inserted into the frame.
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200486 */
Helmut Schaa1bce85cf2011-02-20 13:56:07 +0100487 if (ieee80211_is_beacon(hdr->frame_control) ||
488 ieee80211_is_probe_resp(hdr->frame_control))
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200489 __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags);
490
Ivo van Doorn7b409822008-12-20 10:58:33 +0100491 if ((tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) &&
Helmut Schaa25177942011-03-03 19:43:25 +0100492 !test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags))
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200493 __set_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags);
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200494
Ivo van Doorn076f9582008-12-20 10:59:02 +0100495 /*
496 * Determine rate modulation.
497 */
Helmut Schaa55b585e2011-03-03 19:43:49 +0100498 if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD)
499 txdesc->rate_mode = RATE_MODE_HT_GREENFIELD;
500 else if (txrate->flags & IEEE80211_TX_RC_MCS)
501 txdesc->rate_mode = RATE_MODE_HT_MIX;
502 else {
503 rate = ieee80211_get_tx_rate(rt2x00dev->hw, tx_info);
504 hwrate = rt2x00_get_rate(rate->hw_value);
505 if (hwrate->flags & DEV_RATE_OFDM)
506 txdesc->rate_mode = RATE_MODE_OFDM;
507 else
508 txdesc->rate_mode = RATE_MODE_CCK;
509 }
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200510
Ivo van Doorn7b409822008-12-20 10:58:33 +0100511 /*
512 * Apply TX descriptor handling by components
513 */
Gertjan van Wingerde77b56212011-07-06 22:57:00 +0200514 rt2x00crypto_create_tx_descriptor(rt2x00dev, skb, txdesc);
515 rt2x00queue_create_tx_descriptor_seq(rt2x00dev, skb, txdesc);
Helmut Schaa26a1d072011-03-03 19:42:35 +0100516
Ivo van Doorn7dab73b2011-04-18 15:27:06 +0200517 if (test_bit(REQUIRE_HT_TX_DESC, &rt2x00dev->cap_flags))
Gertjan van Wingerde77b56212011-07-06 22:57:00 +0200518 rt2x00queue_create_tx_descriptor_ht(rt2x00dev, skb, txdesc,
Thomas Huehn36323f82012-07-23 21:33:42 +0200519 sta, hwrate);
Helmut Schaa26a1d072011-03-03 19:42:35 +0100520 else
Gertjan van Wingerde77b56212011-07-06 22:57:00 +0200521 rt2x00queue_create_tx_descriptor_plcp(rt2x00dev, skb, txdesc,
522 hwrate);
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200523}
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200524
Gertjan van Wingerde78eea112010-06-29 21:41:05 +0200525static int rt2x00queue_write_tx_data(struct queue_entry *entry,
526 struct txentry_desc *txdesc)
527{
528 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
529
530 /*
531 * This should not happen, we already checked the entry
532 * was ours. When the hardware disagrees there has been
533 * a queue corruption!
534 */
535 if (unlikely(rt2x00dev->ops->lib->get_entry_state &&
536 rt2x00dev->ops->lib->get_entry_state(entry))) {
537 ERROR(rt2x00dev,
538 "Corrupt queue %d, accessing entry which is not ours.\n"
539 "Please file bug report to %s.\n",
540 entry->queue->qid, DRV_PROJECT);
541 return -EINVAL;
542 }
543
544 /*
545 * Add the requested extra tx headroom in front of the skb.
546 */
547 skb_push(entry->skb, rt2x00dev->ops->extra_tx_headroom);
548 memset(entry->skb->data, 0, rt2x00dev->ops->extra_tx_headroom);
549
550 /*
Gertjan van Wingerde76dd5dd2010-06-29 21:42:23 +0200551 * Call the driver's write_tx_data function, if it exists.
Gertjan van Wingerde78eea112010-06-29 21:41:05 +0200552 */
Gertjan van Wingerde76dd5dd2010-06-29 21:42:23 +0200553 if (rt2x00dev->ops->lib->write_tx_data)
554 rt2x00dev->ops->lib->write_tx_data(entry, txdesc);
Gertjan van Wingerde78eea112010-06-29 21:41:05 +0200555
556 /*
557 * Map the skb to DMA.
558 */
Stanislaw Gruszka4ea545d2013-02-13 14:27:05 +0100559 if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags) &&
560 rt2x00queue_map_txskb(entry))
561 return -ENOMEM;
Gertjan van Wingerde78eea112010-06-29 21:41:05 +0200562
563 return 0;
564}
565
Ivo van Doornbd88a782008-07-09 15:12:44 +0200566static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
567 struct txentry_desc *txdesc)
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200568{
Ivo van Doornb8697672008-06-06 22:53:14 +0200569 struct data_queue *queue = entry->queue;
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200570
Ivo van Doorn93331452010-08-23 19:53:39 +0200571 queue->rt2x00dev->ops->lib->write_tx_desc(entry, txdesc);
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200572
573 /*
574 * All processing on the frame has been completed, this means
575 * it is now ready to be dumped to userspace through debugfs.
576 */
Ivo van Doorn93331452010-08-23 19:53:39 +0200577 rt2x00debug_dump_frame(queue->rt2x00dev, DUMP_FRAME_TX, entry->skb);
Gertjan van Wingerde6295d812010-05-09 21:24:22 +0200578}
579
Ivo van Doorn8be4eed2010-11-06 15:48:23 +0100580static void rt2x00queue_kick_tx_queue(struct data_queue *queue,
Gertjan van Wingerde6295d812010-05-09 21:24:22 +0200581 struct txentry_desc *txdesc)
582{
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200583 /*
Ivo van Doornb8697672008-06-06 22:53:14 +0200584 * Check if we need to kick the queue, there are however a few rules
Gertjan van Wingerde6295d812010-05-09 21:24:22 +0200585 * 1) Don't kick unless this is the last in frame in a burst.
Ivo van Doornb8697672008-06-06 22:53:14 +0200586 * When the burst flag is set, this frame is always followed
587 * by another frame which in some way are related to eachother.
588 * This is true for fragments, RTS or CTS-to-self frames.
Gertjan van Wingerde6295d812010-05-09 21:24:22 +0200589 * 2) Rule 1 can be broken when the available entries
Ivo van Doornb8697672008-06-06 22:53:14 +0200590 * in the queue are less then a certain threshold.
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200591 */
Ivo van Doornb8697672008-06-06 22:53:14 +0200592 if (rt2x00queue_threshold(queue) ||
593 !test_bit(ENTRY_TXD_BURST, &txdesc->flags))
Ivo van Doorndbba3062010-12-13 12:34:54 +0100594 queue->rt2x00dev->ops->lib->kick_queue(queue);
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200595}
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200596
Helmut Schaa84e9e8ebd2013-01-17 17:34:32 +0100597static void rt2x00queue_bar_check(struct queue_entry *entry)
598{
599 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
600 struct ieee80211_bar *bar = (void *) (entry->skb->data +
601 rt2x00dev->ops->extra_tx_headroom);
602 struct rt2x00_bar_list_entry *bar_entry;
603
604 if (likely(!ieee80211_is_back_req(bar->frame_control)))
605 return;
606
607 bar_entry = kmalloc(sizeof(*bar_entry), GFP_ATOMIC);
608
609 /*
610 * If the alloc fails we still send the BAR out but just don't track
611 * it in our bar list. And as a result we will report it to mac80211
612 * back as failed.
613 */
614 if (!bar_entry)
615 return;
616
617 bar_entry->entry = entry;
618 bar_entry->block_acked = 0;
619
620 /*
621 * Copy the relevant parts of the 802.11 BAR into out check list
622 * such that we can use RCU for less-overhead in the RX path since
623 * sending BARs and processing the according BlockAck should be
624 * the exception.
625 */
626 memcpy(bar_entry->ra, bar->ra, sizeof(bar->ra));
627 memcpy(bar_entry->ta, bar->ta, sizeof(bar->ta));
628 bar_entry->control = bar->control;
629 bar_entry->start_seq_num = bar->start_seq_num;
630
631 /*
632 * Insert BAR into our BAR check list.
633 */
634 spin_lock_bh(&rt2x00dev->bar_list_lock);
635 list_add_tail_rcu(&bar_entry->list, &rt2x00dev->bar_list);
636 spin_unlock_bh(&rt2x00dev->bar_list_lock);
637}
638
Johannes Berg7351c6b2009-11-19 01:08:30 +0100639int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
640 bool local)
Ivo van Doorn6db37862008-06-06 22:50:28 +0200641{
Johannes Berge6a98542008-10-21 12:40:02 +0200642 struct ieee80211_tx_info *tx_info;
Gertjan van Wingerde77a861c2011-07-06 22:56:24 +0200643 struct queue_entry *entry;
Ivo van Doorn6db37862008-06-06 22:50:28 +0200644 struct txentry_desc txdesc;
Ivo van Doornd74f5ba2008-06-16 19:56:54 +0200645 struct skb_frame_desc *skbdesc;
Johannes Berge6a98542008-10-21 12:40:02 +0200646 u8 rate_idx, rate_flags;
Gertjan van Wingerde77a861c2011-07-06 22:56:24 +0200647 int ret = 0;
648
Ivo van Doorn6db37862008-06-06 22:50:28 +0200649 /*
650 * Copy all TX descriptor information into txdesc,
651 * after that we are free to use the skb->cb array
652 * for our information.
653 */
Thomas Huehn36323f82012-07-23 21:33:42 +0200654 rt2x00queue_create_tx_descriptor(queue->rt2x00dev, skb, &txdesc, NULL);
Ivo van Doorn6db37862008-06-06 22:50:28 +0200655
Ivo van Doornd74f5ba2008-06-16 19:56:54 +0200656 /*
Johannes Berge6a98542008-10-21 12:40:02 +0200657 * All information is retrieved from the skb->cb array,
Ivo van Doorn2bb057d2008-08-04 16:37:44 +0200658 * now we should claim ownership of the driver part of that
Johannes Berge6a98542008-10-21 12:40:02 +0200659 * array, preserving the bitrate index and flags.
Ivo van Doornd74f5ba2008-06-16 19:56:54 +0200660 */
Johannes Berge6a98542008-10-21 12:40:02 +0200661 tx_info = IEEE80211_SKB_CB(skb);
662 rate_idx = tx_info->control.rates[0].idx;
663 rate_flags = tx_info->control.rates[0].flags;
Ivo van Doorn0e3de992008-11-12 00:01:37 +0100664 skbdesc = get_skb_frame_desc(skb);
Ivo van Doornd74f5ba2008-06-16 19:56:54 +0200665 memset(skbdesc, 0, sizeof(*skbdesc));
Johannes Berge6a98542008-10-21 12:40:02 +0200666 skbdesc->tx_rate_idx = rate_idx;
667 skbdesc->tx_rate_flags = rate_flags;
Ivo van Doornd74f5ba2008-06-16 19:56:54 +0200668
Johannes Berg7351c6b2009-11-19 01:08:30 +0100669 if (local)
670 skbdesc->flags |= SKBDESC_NOT_MAC80211;
671
Ivo van Doorn2bb057d2008-08-04 16:37:44 +0200672 /*
673 * When hardware encryption is supported, and this frame
674 * is to be encrypted, we should strip the IV/EIV data from
Daniel Mack3ad2f3f2010-02-03 08:01:28 +0800675 * the frame so we can provide it to the driver separately.
Ivo van Doorn2bb057d2008-08-04 16:37:44 +0200676 */
677 if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) &&
Ivo van Doorndddfb472008-12-02 18:20:42 +0100678 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) {
Ivo van Doorn7dab73b2011-04-18 15:27:06 +0200679 if (test_bit(REQUIRE_COPY_IV, &queue->rt2x00dev->cap_flags))
Ivo van Doorn9eb4e212009-04-26 16:08:30 +0200680 rt2x00crypto_tx_copy_iv(skb, &txdesc);
Ivo van Doorndddfb472008-12-02 18:20:42 +0100681 else
Ivo van Doorn9eb4e212009-04-26 16:08:30 +0200682 rt2x00crypto_tx_remove_iv(skb, &txdesc);
Ivo van Doorndddfb472008-12-02 18:20:42 +0100683 }
Ivo van Doorn2bb057d2008-08-04 16:37:44 +0200684
Ivo van Doorn93354cb2009-08-08 23:53:47 +0200685 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300686 * When DMA allocation is required we should guarantee to the
Ivo van Doorn93354cb2009-08-08 23:53:47 +0200687 * driver that the DMA is aligned to a 4-byte boundary.
Ivo van Doorn93354cb2009-08-08 23:53:47 +0200688 * However some drivers require L2 padding to pad the payload
689 * rather then the header. This could be a requirement for
690 * PCI and USB devices, while header alignment only is valid
691 * for PCI devices.
692 */
Ivo van Doorn7dab73b2011-04-18 15:27:06 +0200693 if (test_bit(REQUIRE_L2PAD, &queue->rt2x00dev->cap_flags))
Gertjan van Wingerde128f8f72011-07-06 22:57:37 +0200694 rt2x00queue_insert_l2pad(skb, txdesc.header_length);
Ivo van Doorn7dab73b2011-04-18 15:27:06 +0200695 else if (test_bit(REQUIRE_DMA, &queue->rt2x00dev->cap_flags))
Gertjan van Wingerde128f8f72011-07-06 22:57:37 +0200696 rt2x00queue_align_frame(skb);
697
Stanislaw Gruszka3780d032012-03-09 12:39:54 +0100698 /*
699 * That function must be called with bh disabled.
700 */
Gertjan van Wingerde128f8f72011-07-06 22:57:37 +0200701 spin_lock(&queue->tx_lock);
702
703 if (unlikely(rt2x00queue_full(queue))) {
704 ERROR(queue->rt2x00dev,
705 "Dropping frame due to full tx queue %d.\n", queue->qid);
706 ret = -ENOBUFS;
707 goto out;
708 }
709
710 entry = rt2x00queue_get_entry(queue, Q_INDEX);
711
712 if (unlikely(test_and_set_bit(ENTRY_OWNER_DEVICE_DATA,
713 &entry->flags))) {
714 ERROR(queue->rt2x00dev,
715 "Arrived at non-free entry in the non-full queue %d.\n"
716 "Please file bug report to %s.\n",
717 queue->qid, DRV_PROJECT);
718 ret = -EINVAL;
719 goto out;
720 }
721
722 skbdesc->entry = entry;
723 entry->skb = skb;
Ivo van Doorn9f166172009-04-26 16:08:50 +0200724
Ivo van Doorn2bb057d2008-08-04 16:37:44 +0200725 /*
726 * It could be possible that the queue was corrupted and this
Ivo van Doorn0e3de992008-11-12 00:01:37 +0100727 * call failed. Since we always return NETDEV_TX_OK to mac80211,
728 * this frame will simply be dropped.
Ivo van Doorn2bb057d2008-08-04 16:37:44 +0200729 */
Gertjan van Wingerde78eea112010-06-29 21:41:05 +0200730 if (unlikely(rt2x00queue_write_tx_data(entry, &txdesc))) {
Ivo van Doorn0262ab02008-08-29 21:04:26 +0200731 clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
Ivo van Doorn2bb057d2008-08-04 16:37:44 +0200732 entry->skb = NULL;
Gertjan van Wingerde77a861c2011-07-06 22:56:24 +0200733 ret = -EIO;
734 goto out;
Ivo van Doorn6db37862008-06-06 22:50:28 +0200735 }
736
Helmut Schaa84e9e8ebd2013-01-17 17:34:32 +0100737 /*
738 * Put BlockAckReqs into our check list for driver BA processing.
739 */
740 rt2x00queue_bar_check(entry);
741
Ivo van Doorn0262ab02008-08-29 21:04:26 +0200742 set_bit(ENTRY_DATA_PENDING, &entry->flags);
Ivo van Doorn6db37862008-06-06 22:50:28 +0200743
Johannes Stezenbach75256f02011-04-18 15:29:38 +0200744 rt2x00queue_index_inc(entry, Q_INDEX);
Ivo van Doorn6db37862008-06-06 22:50:28 +0200745 rt2x00queue_write_tx_descriptor(entry, &txdesc);
Ivo van Doorn8be4eed2010-11-06 15:48:23 +0100746 rt2x00queue_kick_tx_queue(queue, &txdesc);
Ivo van Doorn6db37862008-06-06 22:50:28 +0200747
Gertjan van Wingerde77a861c2011-07-06 22:56:24 +0200748out:
749 spin_unlock(&queue->tx_lock);
750 return ret;
Ivo van Doorn6db37862008-06-06 22:50:28 +0200751}
752
Helmut Schaa69cf36a2011-01-30 13:16:03 +0100753int rt2x00queue_clear_beacon(struct rt2x00_dev *rt2x00dev,
754 struct ieee80211_vif *vif)
755{
756 struct rt2x00_intf *intf = vif_to_intf(vif);
757
758 if (unlikely(!intf->beacon))
759 return -ENOBUFS;
760
761 mutex_lock(&intf->beacon_skb_mutex);
762
763 /*
764 * Clean up the beacon skb.
765 */
766 rt2x00queue_free_skb(intf->beacon);
767
768 /*
769 * Clear beacon (single bssid devices don't need to clear the beacon
770 * since the beacon queue will get stopped anyway).
771 */
772 if (rt2x00dev->ops->lib->clear_beacon)
773 rt2x00dev->ops->lib->clear_beacon(intf->beacon);
774
775 mutex_unlock(&intf->beacon_skb_mutex);
776
777 return 0;
778}
779
Helmut Schaa8414ff02011-01-30 13:16:28 +0100780int rt2x00queue_update_beacon_locked(struct rt2x00_dev *rt2x00dev,
781 struct ieee80211_vif *vif)
Ivo van Doornbd88a782008-07-09 15:12:44 +0200782{
783 struct rt2x00_intf *intf = vif_to_intf(vif);
784 struct skb_frame_desc *skbdesc;
785 struct txentry_desc txdesc;
Ivo van Doornbd88a782008-07-09 15:12:44 +0200786
787 if (unlikely(!intf->beacon))
788 return -ENOBUFS;
789
Igor Perminov17512dc2009-08-08 23:55:18 +0200790 /*
791 * Clean up the beacon skb.
792 */
Ivo van Doornfa695602010-10-11 15:37:25 +0200793 rt2x00queue_free_skb(intf->beacon);
Igor Perminov17512dc2009-08-08 23:55:18 +0200794
Ivo van Doornbd88a782008-07-09 15:12:44 +0200795 intf->beacon->skb = ieee80211_beacon_get(rt2x00dev->hw, vif);
Helmut Schaa8414ff02011-01-30 13:16:28 +0100796 if (!intf->beacon->skb)
Ivo van Doornbd88a782008-07-09 15:12:44 +0200797 return -ENOMEM;
798
799 /*
800 * Copy all TX descriptor information into txdesc,
801 * after that we are free to use the skb->cb array
802 * for our information.
803 */
Thomas Huehn36323f82012-07-23 21:33:42 +0200804 rt2x00queue_create_tx_descriptor(rt2x00dev, intf->beacon->skb, &txdesc, NULL);
Ivo van Doornbd88a782008-07-09 15:12:44 +0200805
806 /*
Ivo van Doornbd88a782008-07-09 15:12:44 +0200807 * Fill in skb descriptor
808 */
809 skbdesc = get_skb_frame_desc(intf->beacon->skb);
810 memset(skbdesc, 0, sizeof(*skbdesc));
Ivo van Doornbd88a782008-07-09 15:12:44 +0200811 skbdesc->entry = intf->beacon;
812
813 /*
Helmut Schaa69cf36a2011-01-30 13:16:03 +0100814 * Send beacon to hardware.
Ivo van Doornbd88a782008-07-09 15:12:44 +0200815 */
Gertjan van Wingerdef224f4e2010-05-08 23:40:25 +0200816 rt2x00dev->ops->lib->write_beacon(intf->beacon, &txdesc);
Ivo van Doornbd88a782008-07-09 15:12:44 +0200817
Helmut Schaa8414ff02011-01-30 13:16:28 +0100818 return 0;
819
820}
821
822int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
823 struct ieee80211_vif *vif)
824{
825 struct rt2x00_intf *intf = vif_to_intf(vif);
826 int ret;
827
828 mutex_lock(&intf->beacon_skb_mutex);
829 ret = rt2x00queue_update_beacon_locked(rt2x00dev, vif);
Igor Perminov17512dc2009-08-08 23:55:18 +0200830 mutex_unlock(&intf->beacon_skb_mutex);
831
Helmut Schaa8414ff02011-01-30 13:16:28 +0100832 return ret;
Ivo van Doornbd88a782008-07-09 15:12:44 +0200833}
834
Helmut Schaa10e11562011-04-18 15:27:43 +0200835bool rt2x00queue_for_each_entry(struct data_queue *queue,
Ivo van Doorn5eb7efe2010-08-23 19:54:21 +0200836 enum queue_index start,
837 enum queue_index end,
Stanislaw Gruszkadb36f792012-07-09 17:24:50 +0200838 bool (*fn)(struct queue_entry *entry))
Ivo van Doorn5eb7efe2010-08-23 19:54:21 +0200839{
840 unsigned long irqflags;
841 unsigned int index_start;
842 unsigned int index_end;
843 unsigned int i;
844
845 if (unlikely(start >= Q_INDEX_MAX || end >= Q_INDEX_MAX)) {
846 ERROR(queue->rt2x00dev,
847 "Entry requested from invalid index range (%d - %d)\n",
848 start, end);
Helmut Schaa10e11562011-04-18 15:27:43 +0200849 return true;
Ivo van Doorn5eb7efe2010-08-23 19:54:21 +0200850 }
851
852 /*
853 * Only protect the range we are going to loop over,
854 * if during our loop a extra entry is set to pending
855 * it should not be kicked during this run, since it
856 * is part of another TX operation.
857 */
Ivo van Doorn813f0332010-11-06 15:48:05 +0100858 spin_lock_irqsave(&queue->index_lock, irqflags);
Ivo van Doorn5eb7efe2010-08-23 19:54:21 +0200859 index_start = queue->index[start];
860 index_end = queue->index[end];
Ivo van Doorn813f0332010-11-06 15:48:05 +0100861 spin_unlock_irqrestore(&queue->index_lock, irqflags);
Ivo van Doorn5eb7efe2010-08-23 19:54:21 +0200862
863 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300864 * Start from the TX done pointer, this guarantees that we will
Ivo van Doorn5eb7efe2010-08-23 19:54:21 +0200865 * send out all frames in the correct order.
866 */
867 if (index_start < index_end) {
Helmut Schaa10e11562011-04-18 15:27:43 +0200868 for (i = index_start; i < index_end; i++) {
Stanislaw Gruszkadb36f792012-07-09 17:24:50 +0200869 if (fn(&queue->entries[i]))
Helmut Schaa10e11562011-04-18 15:27:43 +0200870 return true;
871 }
Ivo van Doorn5eb7efe2010-08-23 19:54:21 +0200872 } else {
Helmut Schaa10e11562011-04-18 15:27:43 +0200873 for (i = index_start; i < queue->limit; i++) {
Stanislaw Gruszkadb36f792012-07-09 17:24:50 +0200874 if (fn(&queue->entries[i]))
Helmut Schaa10e11562011-04-18 15:27:43 +0200875 return true;
876 }
Ivo van Doorn5eb7efe2010-08-23 19:54:21 +0200877
Helmut Schaa10e11562011-04-18 15:27:43 +0200878 for (i = 0; i < index_end; i++) {
Stanislaw Gruszkadb36f792012-07-09 17:24:50 +0200879 if (fn(&queue->entries[i]))
Helmut Schaa10e11562011-04-18 15:27:43 +0200880 return true;
881 }
Ivo van Doorn5eb7efe2010-08-23 19:54:21 +0200882 }
Helmut Schaa10e11562011-04-18 15:27:43 +0200883
884 return false;
Ivo van Doorn5eb7efe2010-08-23 19:54:21 +0200885}
886EXPORT_SYMBOL_GPL(rt2x00queue_for_each_entry);
887
Ivo van Doorn181d6902008-02-05 16:42:23 -0500888struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
889 enum queue_index index)
890{
891 struct queue_entry *entry;
Ivo van Doorn5f46c4d2008-03-09 22:44:30 +0100892 unsigned long irqflags;
Ivo van Doorn181d6902008-02-05 16:42:23 -0500893
894 if (unlikely(index >= Q_INDEX_MAX)) {
895 ERROR(queue->rt2x00dev,
896 "Entry requested from invalid index type (%d)\n", index);
897 return NULL;
898 }
899
Ivo van Doorn813f0332010-11-06 15:48:05 +0100900 spin_lock_irqsave(&queue->index_lock, irqflags);
Ivo van Doorn181d6902008-02-05 16:42:23 -0500901
902 entry = &queue->entries[queue->index[index]];
903
Ivo van Doorn813f0332010-11-06 15:48:05 +0100904 spin_unlock_irqrestore(&queue->index_lock, irqflags);
Ivo van Doorn181d6902008-02-05 16:42:23 -0500905
906 return entry;
907}
908EXPORT_SYMBOL_GPL(rt2x00queue_get_entry);
909
Johannes Stezenbach75256f02011-04-18 15:29:38 +0200910void rt2x00queue_index_inc(struct queue_entry *entry, enum queue_index index)
Ivo van Doorn181d6902008-02-05 16:42:23 -0500911{
Johannes Stezenbach75256f02011-04-18 15:29:38 +0200912 struct data_queue *queue = entry->queue;
Ivo van Doorn5f46c4d2008-03-09 22:44:30 +0100913 unsigned long irqflags;
914
Ivo van Doorn181d6902008-02-05 16:42:23 -0500915 if (unlikely(index >= Q_INDEX_MAX)) {
916 ERROR(queue->rt2x00dev,
917 "Index change on invalid index type (%d)\n", index);
918 return;
919 }
920
Ivo van Doorn813f0332010-11-06 15:48:05 +0100921 spin_lock_irqsave(&queue->index_lock, irqflags);
Ivo van Doorn181d6902008-02-05 16:42:23 -0500922
923 queue->index[index]++;
924 if (queue->index[index] >= queue->limit)
925 queue->index[index] = 0;
926
Johannes Stezenbach75256f02011-04-18 15:29:38 +0200927 entry->last_action = jiffies;
Ivo van Doorn652a9dd2010-08-30 21:15:19 +0200928
Ivo van Doorn10b6b802008-02-03 15:55:21 +0100929 if (index == Q_INDEX) {
930 queue->length++;
931 } else if (index == Q_INDEX_DONE) {
932 queue->length--;
John Daiker55887512008-10-17 12:16:17 -0700933 queue->count++;
Ivo van Doorn10b6b802008-02-03 15:55:21 +0100934 }
Ivo van Doorn181d6902008-02-05 16:42:23 -0500935
Ivo van Doorn813f0332010-11-06 15:48:05 +0100936 spin_unlock_irqrestore(&queue->index_lock, irqflags);
Ivo van Doorn181d6902008-02-05 16:42:23 -0500937}
Ivo van Doorn181d6902008-02-05 16:42:23 -0500938
Ivo van Doorn0b7fde52010-12-13 12:35:17 +0100939void rt2x00queue_pause_queue(struct data_queue *queue)
940{
941 if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
942 !test_bit(QUEUE_STARTED, &queue->flags) ||
943 test_and_set_bit(QUEUE_PAUSED, &queue->flags))
944 return;
945
946 switch (queue->qid) {
Ivo van Doornf615e9a2010-12-13 12:36:38 +0100947 case QID_AC_VO:
948 case QID_AC_VI:
Ivo van Doorn0b7fde52010-12-13 12:35:17 +0100949 case QID_AC_BE:
950 case QID_AC_BK:
Ivo van Doorn0b7fde52010-12-13 12:35:17 +0100951 /*
952 * For TX queues, we have to disable the queue
953 * inside mac80211.
954 */
955 ieee80211_stop_queue(queue->rt2x00dev->hw, queue->qid);
956 break;
957 default:
958 break;
959 }
960}
961EXPORT_SYMBOL_GPL(rt2x00queue_pause_queue);
962
963void rt2x00queue_unpause_queue(struct data_queue *queue)
964{
965 if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
966 !test_bit(QUEUE_STARTED, &queue->flags) ||
967 !test_and_clear_bit(QUEUE_PAUSED, &queue->flags))
968 return;
969
970 switch (queue->qid) {
Ivo van Doornf615e9a2010-12-13 12:36:38 +0100971 case QID_AC_VO:
972 case QID_AC_VI:
Ivo van Doorn0b7fde52010-12-13 12:35:17 +0100973 case QID_AC_BE:
974 case QID_AC_BK:
Ivo van Doorn0b7fde52010-12-13 12:35:17 +0100975 /*
976 * For TX queues, we have to enable the queue
977 * inside mac80211.
978 */
979 ieee80211_wake_queue(queue->rt2x00dev->hw, queue->qid);
980 break;
Ivo van Doorn5be65602010-12-13 12:35:40 +0100981 case QID_RX:
982 /*
983 * For RX we need to kick the queue now in order to
984 * receive frames.
985 */
986 queue->rt2x00dev->ops->lib->kick_queue(queue);
Ivo van Doorn0b7fde52010-12-13 12:35:17 +0100987 default:
988 break;
989 }
990}
991EXPORT_SYMBOL_GPL(rt2x00queue_unpause_queue);
992
993void rt2x00queue_start_queue(struct data_queue *queue)
994{
995 mutex_lock(&queue->status_lock);
996
997 if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
998 test_and_set_bit(QUEUE_STARTED, &queue->flags)) {
999 mutex_unlock(&queue->status_lock);
1000 return;
1001 }
1002
1003 set_bit(QUEUE_PAUSED, &queue->flags);
1004
1005 queue->rt2x00dev->ops->lib->start_queue(queue);
1006
1007 rt2x00queue_unpause_queue(queue);
1008
1009 mutex_unlock(&queue->status_lock);
1010}
1011EXPORT_SYMBOL_GPL(rt2x00queue_start_queue);
1012
1013void rt2x00queue_stop_queue(struct data_queue *queue)
1014{
1015 mutex_lock(&queue->status_lock);
1016
1017 if (!test_and_clear_bit(QUEUE_STARTED, &queue->flags)) {
1018 mutex_unlock(&queue->status_lock);
1019 return;
1020 }
1021
1022 rt2x00queue_pause_queue(queue);
1023
1024 queue->rt2x00dev->ops->lib->stop_queue(queue);
1025
1026 mutex_unlock(&queue->status_lock);
1027}
1028EXPORT_SYMBOL_GPL(rt2x00queue_stop_queue);
1029
Ivo van Doorn5be65602010-12-13 12:35:40 +01001030void rt2x00queue_flush_queue(struct data_queue *queue, bool drop)
1031{
Ivo van Doorn5be65602010-12-13 12:35:40 +01001032 bool started;
1033 bool tx_queue =
Ivo van Doornf615e9a2010-12-13 12:36:38 +01001034 (queue->qid == QID_AC_VO) ||
Ivo van Doorn5be65602010-12-13 12:35:40 +01001035 (queue->qid == QID_AC_VI) ||
Ivo van Doornf615e9a2010-12-13 12:36:38 +01001036 (queue->qid == QID_AC_BE) ||
1037 (queue->qid == QID_AC_BK);
Ivo van Doorn5be65602010-12-13 12:35:40 +01001038
1039 mutex_lock(&queue->status_lock);
1040
1041 /*
1042 * If the queue has been started, we must stop it temporarily
1043 * to prevent any new frames to be queued on the device. If
1044 * we are not dropping the pending frames, the queue must
1045 * only be stopped in the software and not the hardware,
1046 * otherwise the queue will never become empty on its own.
1047 */
1048 started = test_bit(QUEUE_STARTED, &queue->flags);
1049 if (started) {
1050 /*
1051 * Pause the queue
1052 */
1053 rt2x00queue_pause_queue(queue);
1054
1055 /*
1056 * If we are not supposed to drop any pending
1057 * frames, this means we must force a start (=kick)
1058 * to the queue to make sure the hardware will
1059 * start transmitting.
1060 */
1061 if (!drop && tx_queue)
1062 queue->rt2x00dev->ops->lib->kick_queue(queue);
1063 }
1064
1065 /*
Ivo van Doorn152a5992011-04-18 15:31:02 +02001066 * Check if driver supports flushing, if that is the case we can
1067 * defer the flushing to the driver. Otherwise we must use the
1068 * alternative which just waits for the queue to become empty.
Ivo van Doorn5be65602010-12-13 12:35:40 +01001069 */
Ivo van Doorn152a5992011-04-18 15:31:02 +02001070 if (likely(queue->rt2x00dev->ops->lib->flush_queue))
1071 queue->rt2x00dev->ops->lib->flush_queue(queue, drop);
Ivo van Doorn5be65602010-12-13 12:35:40 +01001072
1073 /*
1074 * The queue flush has failed...
1075 */
1076 if (unlikely(!rt2x00queue_empty(queue)))
Johannes Stezenbach21957c32011-01-30 13:22:22 +01001077 WARNING(queue->rt2x00dev, "Queue %d failed to flush\n", queue->qid);
Ivo van Doorn5be65602010-12-13 12:35:40 +01001078
1079 /*
1080 * Restore the queue to the previous status
1081 */
1082 if (started)
1083 rt2x00queue_unpause_queue(queue);
1084
1085 mutex_unlock(&queue->status_lock);
1086}
1087EXPORT_SYMBOL_GPL(rt2x00queue_flush_queue);
1088
Ivo van Doorn0b7fde52010-12-13 12:35:17 +01001089void rt2x00queue_start_queues(struct rt2x00_dev *rt2x00dev)
1090{
1091 struct data_queue *queue;
1092
1093 /*
1094 * rt2x00queue_start_queue will call ieee80211_wake_queue
1095 * for each queue after is has been properly initialized.
1096 */
1097 tx_queue_for_each(rt2x00dev, queue)
1098 rt2x00queue_start_queue(queue);
1099
1100 rt2x00queue_start_queue(rt2x00dev->rx);
1101}
1102EXPORT_SYMBOL_GPL(rt2x00queue_start_queues);
1103
1104void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev)
1105{
1106 struct data_queue *queue;
1107
1108 /*
1109 * rt2x00queue_stop_queue will call ieee80211_stop_queue
1110 * as well, but we are completely shutting doing everything
1111 * now, so it is much safer to stop all TX queues at once,
1112 * and use rt2x00queue_stop_queue for cleaning up.
1113 */
1114 ieee80211_stop_queues(rt2x00dev->hw);
1115
1116 tx_queue_for_each(rt2x00dev, queue)
1117 rt2x00queue_stop_queue(queue);
1118
1119 rt2x00queue_stop_queue(rt2x00dev->rx);
1120}
1121EXPORT_SYMBOL_GPL(rt2x00queue_stop_queues);
1122
Ivo van Doorn5be65602010-12-13 12:35:40 +01001123void rt2x00queue_flush_queues(struct rt2x00_dev *rt2x00dev, bool drop)
1124{
1125 struct data_queue *queue;
1126
1127 tx_queue_for_each(rt2x00dev, queue)
1128 rt2x00queue_flush_queue(queue, drop);
1129
1130 rt2x00queue_flush_queue(rt2x00dev->rx, drop);
1131}
1132EXPORT_SYMBOL_GPL(rt2x00queue_flush_queues);
1133
Ivo van Doorn181d6902008-02-05 16:42:23 -05001134static void rt2x00queue_reset(struct data_queue *queue)
1135{
Ivo van Doorn5f46c4d2008-03-09 22:44:30 +01001136 unsigned long irqflags;
Ivo van Doorn652a9dd2010-08-30 21:15:19 +02001137 unsigned int i;
Ivo van Doorn5f46c4d2008-03-09 22:44:30 +01001138
Ivo van Doorn813f0332010-11-06 15:48:05 +01001139 spin_lock_irqsave(&queue->index_lock, irqflags);
Ivo van Doorn181d6902008-02-05 16:42:23 -05001140
1141 queue->count = 0;
1142 queue->length = 0;
Ivo van Doorn652a9dd2010-08-30 21:15:19 +02001143
Johannes Stezenbach75256f02011-04-18 15:29:38 +02001144 for (i = 0; i < Q_INDEX_MAX; i++)
Ivo van Doorn652a9dd2010-08-30 21:15:19 +02001145 queue->index[i] = 0;
Ivo van Doorn181d6902008-02-05 16:42:23 -05001146
Ivo van Doorn813f0332010-11-06 15:48:05 +01001147 spin_unlock_irqrestore(&queue->index_lock, irqflags);
Ivo van Doorn181d6902008-02-05 16:42:23 -05001148}
1149
Ivo van Doorn798b7ad2008-11-08 15:25:33 +01001150void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev)
Ivo van Doorn181d6902008-02-05 16:42:23 -05001151{
1152 struct data_queue *queue;
1153 unsigned int i;
1154
Ivo van Doorn798b7ad2008-11-08 15:25:33 +01001155 queue_for_each(rt2x00dev, queue) {
Ivo van Doorn181d6902008-02-05 16:42:23 -05001156 rt2x00queue_reset(queue);
1157
Ivo van Doorn64e7d722010-12-13 12:36:00 +01001158 for (i = 0; i < queue->limit; i++)
Ivo van Doorn798b7ad2008-11-08 15:25:33 +01001159 rt2x00dev->ops->lib->clear_entry(&queue->entries[i]);
Ivo van Doorn181d6902008-02-05 16:42:23 -05001160 }
1161}
1162
1163static int rt2x00queue_alloc_entries(struct data_queue *queue,
1164 const struct data_queue_desc *qdesc)
1165{
1166 struct queue_entry *entries;
1167 unsigned int entry_size;
1168 unsigned int i;
1169
1170 rt2x00queue_reset(queue);
1171
1172 queue->limit = qdesc->entry_num;
Ivo van Doornb8697672008-06-06 22:53:14 +02001173 queue->threshold = DIV_ROUND_UP(qdesc->entry_num, 10);
Ivo van Doorn181d6902008-02-05 16:42:23 -05001174 queue->data_size = qdesc->data_size;
1175 queue->desc_size = qdesc->desc_size;
1176
1177 /*
1178 * Allocate all queue entries.
1179 */
1180 entry_size = sizeof(*entries) + qdesc->priv_size;
Joe Perchesbaeb2ff2010-08-11 07:02:48 +00001181 entries = kcalloc(queue->limit, entry_size, GFP_KERNEL);
Ivo van Doorn181d6902008-02-05 16:42:23 -05001182 if (!entries)
1183 return -ENOMEM;
1184
1185#define QUEUE_ENTRY_PRIV_OFFSET(__base, __index, __limit, __esize, __psize) \
Mark Einonf8bfbc32010-11-06 15:47:25 +01001186 (((char *)(__base)) + ((__limit) * (__esize)) + \
1187 ((__index) * (__psize)))
Ivo van Doorn181d6902008-02-05 16:42:23 -05001188
1189 for (i = 0; i < queue->limit; i++) {
1190 entries[i].flags = 0;
1191 entries[i].queue = queue;
1192 entries[i].skb = NULL;
1193 entries[i].entry_idx = i;
1194 entries[i].priv_data =
1195 QUEUE_ENTRY_PRIV_OFFSET(entries, i, queue->limit,
1196 sizeof(*entries), qdesc->priv_size);
1197 }
1198
1199#undef QUEUE_ENTRY_PRIV_OFFSET
1200
1201 queue->entries = entries;
1202
1203 return 0;
1204}
1205
Ivo van Doornfa695602010-10-11 15:37:25 +02001206static void rt2x00queue_free_skbs(struct data_queue *queue)
Gertjan van Wingerde30caa6e2008-06-16 19:56:08 +02001207{
1208 unsigned int i;
1209
1210 if (!queue->entries)
1211 return;
1212
1213 for (i = 0; i < queue->limit; i++) {
Ivo van Doornfa695602010-10-11 15:37:25 +02001214 rt2x00queue_free_skb(&queue->entries[i]);
Gertjan van Wingerde30caa6e2008-06-16 19:56:08 +02001215 }
1216}
1217
Ivo van Doornfa695602010-10-11 15:37:25 +02001218static int rt2x00queue_alloc_rxskbs(struct data_queue *queue)
Gertjan van Wingerde30caa6e2008-06-16 19:56:08 +02001219{
1220 unsigned int i;
1221 struct sk_buff *skb;
1222
1223 for (i = 0; i < queue->limit; i++) {
Helmut Schaa88211022012-04-19 13:24:10 +02001224 skb = rt2x00queue_alloc_rxskb(&queue->entries[i], GFP_KERNEL);
Gertjan van Wingerde30caa6e2008-06-16 19:56:08 +02001225 if (!skb)
Ivo van Doorn61243d82008-06-20 22:10:53 +02001226 return -ENOMEM;
Gertjan van Wingerde30caa6e2008-06-16 19:56:08 +02001227 queue->entries[i].skb = skb;
1228 }
1229
1230 return 0;
Gertjan van Wingerde30caa6e2008-06-16 19:56:08 +02001231}
1232
Ivo van Doorn181d6902008-02-05 16:42:23 -05001233int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev)
1234{
1235 struct data_queue *queue;
1236 int status;
1237
Ivo van Doorn181d6902008-02-05 16:42:23 -05001238 status = rt2x00queue_alloc_entries(rt2x00dev->rx, rt2x00dev->ops->rx);
1239 if (status)
1240 goto exit;
1241
1242 tx_queue_for_each(rt2x00dev, queue) {
1243 status = rt2x00queue_alloc_entries(queue, rt2x00dev->ops->tx);
1244 if (status)
1245 goto exit;
1246 }
1247
1248 status = rt2x00queue_alloc_entries(rt2x00dev->bcn, rt2x00dev->ops->bcn);
1249 if (status)
1250 goto exit;
1251
Ivo van Doorn7dab73b2011-04-18 15:27:06 +02001252 if (test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags)) {
Gertjan van Wingerdee74df4a2011-03-03 19:46:09 +01001253 status = rt2x00queue_alloc_entries(rt2x00dev->atim,
Gertjan van Wingerde30caa6e2008-06-16 19:56:08 +02001254 rt2x00dev->ops->atim);
1255 if (status)
1256 goto exit;
1257 }
Ivo van Doorn181d6902008-02-05 16:42:23 -05001258
Ivo van Doornfa695602010-10-11 15:37:25 +02001259 status = rt2x00queue_alloc_rxskbs(rt2x00dev->rx);
Ivo van Doorn181d6902008-02-05 16:42:23 -05001260 if (status)
1261 goto exit;
1262
1263 return 0;
1264
1265exit:
1266 ERROR(rt2x00dev, "Queue entries allocation failed.\n");
1267
1268 rt2x00queue_uninitialize(rt2x00dev);
1269
1270 return status;
1271}
1272
1273void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev)
1274{
1275 struct data_queue *queue;
1276
Ivo van Doornfa695602010-10-11 15:37:25 +02001277 rt2x00queue_free_skbs(rt2x00dev->rx);
Gertjan van Wingerde30caa6e2008-06-16 19:56:08 +02001278
Ivo van Doorn181d6902008-02-05 16:42:23 -05001279 queue_for_each(rt2x00dev, queue) {
1280 kfree(queue->entries);
1281 queue->entries = NULL;
1282 }
1283}
1284
Ivo van Doorn8f539272008-02-10 22:51:41 +01001285static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev,
1286 struct data_queue *queue, enum data_queue_qid qid)
1287{
Ivo van Doorn0b7fde52010-12-13 12:35:17 +01001288 mutex_init(&queue->status_lock);
Gertjan van Wingerde77a861c2011-07-06 22:56:24 +02001289 spin_lock_init(&queue->tx_lock);
Ivo van Doorn813f0332010-11-06 15:48:05 +01001290 spin_lock_init(&queue->index_lock);
Ivo van Doorn8f539272008-02-10 22:51:41 +01001291
1292 queue->rt2x00dev = rt2x00dev;
1293 queue->qid = qid;
Ivo van Doorn2af0a572008-08-29 21:05:45 +02001294 queue->txop = 0;
Ivo van Doorn8f539272008-02-10 22:51:41 +01001295 queue->aifs = 2;
1296 queue->cw_min = 5;
1297 queue->cw_max = 10;
1298}
1299
Ivo van Doorn181d6902008-02-05 16:42:23 -05001300int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
1301{
1302 struct data_queue *queue;
1303 enum data_queue_qid qid;
1304 unsigned int req_atim =
Ivo van Doorn7dab73b2011-04-18 15:27:06 +02001305 !!test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags);
Ivo van Doorn181d6902008-02-05 16:42:23 -05001306
1307 /*
1308 * We need the following queues:
1309 * RX: 1
Gertjan van Wingerde61448f82008-05-10 13:43:33 +02001310 * TX: ops->tx_queues
Ivo van Doorn181d6902008-02-05 16:42:23 -05001311 * Beacon: 1
1312 * Atim: 1 (if required)
1313 */
Gertjan van Wingerde61448f82008-05-10 13:43:33 +02001314 rt2x00dev->data_queues = 2 + rt2x00dev->ops->tx_queues + req_atim;
Ivo van Doorn181d6902008-02-05 16:42:23 -05001315
Joe Perchesbaeb2ff2010-08-11 07:02:48 +00001316 queue = kcalloc(rt2x00dev->data_queues, sizeof(*queue), GFP_KERNEL);
Ivo van Doorn181d6902008-02-05 16:42:23 -05001317 if (!queue) {
1318 ERROR(rt2x00dev, "Queue allocation failed.\n");
1319 return -ENOMEM;
1320 }
1321
1322 /*
1323 * Initialize pointers
1324 */
1325 rt2x00dev->rx = queue;
1326 rt2x00dev->tx = &queue[1];
Gertjan van Wingerde61448f82008-05-10 13:43:33 +02001327 rt2x00dev->bcn = &queue[1 + rt2x00dev->ops->tx_queues];
Gertjan van Wingerdee74df4a2011-03-03 19:46:09 +01001328 rt2x00dev->atim = req_atim ? &queue[2 + rt2x00dev->ops->tx_queues] : NULL;
Ivo van Doorn181d6902008-02-05 16:42:23 -05001329
1330 /*
1331 * Initialize queue parameters.
1332 * RX: qid = QID_RX
Ivo van Doornf615e9a2010-12-13 12:36:38 +01001333 * TX: qid = QID_AC_VO + index
Ivo van Doorn181d6902008-02-05 16:42:23 -05001334 * TX: cw_min: 2^5 = 32.
1335 * TX: cw_max: 2^10 = 1024.
Ivo van Doorn565a0192008-06-03 20:29:05 +02001336 * BCN: qid = QID_BEACON
1337 * ATIM: qid = QID_ATIM
Ivo van Doorn181d6902008-02-05 16:42:23 -05001338 */
Ivo van Doorn8f539272008-02-10 22:51:41 +01001339 rt2x00queue_init(rt2x00dev, rt2x00dev->rx, QID_RX);
1340
Ivo van Doornf615e9a2010-12-13 12:36:38 +01001341 qid = QID_AC_VO;
Ivo van Doorn8f539272008-02-10 22:51:41 +01001342 tx_queue_for_each(rt2x00dev, queue)
1343 rt2x00queue_init(rt2x00dev, queue, qid++);
Ivo van Doorn181d6902008-02-05 16:42:23 -05001344
Gertjan van Wingerdee74df4a2011-03-03 19:46:09 +01001345 rt2x00queue_init(rt2x00dev, rt2x00dev->bcn, QID_BEACON);
Ivo van Doorn181d6902008-02-05 16:42:23 -05001346 if (req_atim)
Gertjan van Wingerdee74df4a2011-03-03 19:46:09 +01001347 rt2x00queue_init(rt2x00dev, rt2x00dev->atim, QID_ATIM);
Ivo van Doorn181d6902008-02-05 16:42:23 -05001348
1349 return 0;
1350}
1351
1352void rt2x00queue_free(struct rt2x00_dev *rt2x00dev)
1353{
1354 kfree(rt2x00dev->rx);
1355 rt2x00dev->rx = NULL;
1356 rt2x00dev->tx = NULL;
1357 rt2x00dev->bcn = NULL;
1358}