blob: b9d66785144511ff294cb59096d6de1b766cbece [file] [log] [blame]
Dan Williams5cbafa62009-08-26 13:01:44 -07001/*
2 * Intel I/OAT DMA Linux driver
3 * Copyright(c) 2004 - 2009 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
20 *
21 */
22
23/*
24 * This driver supports an Intel I/OAT DMA engine (versions >= 2), which
25 * does asynchronous data movement and checksumming operations.
26 */
27
28#include <linux/init.h>
29#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090030#include <linux/slab.h>
Dan Williams5cbafa62009-08-26 13:01:44 -070031#include <linux/pci.h>
32#include <linux/interrupt.h>
33#include <linux/dmaengine.h>
34#include <linux/delay.h>
35#include <linux/dma-mapping.h>
36#include <linux/workqueue.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040037#include <linux/prefetch.h>
Dan Williams5cbafa62009-08-26 13:01:44 -070038#include <linux/i7300_idle.h>
39#include "dma.h"
40#include "dma_v2.h"
41#include "registers.h"
42#include "hw.h"
43
Russell King - ARM Linuxd2ebfb32012-03-06 22:34:26 +000044#include "../dmaengine.h"
45
Dan Williamsbf40a682009-09-08 17:42:55 -070046int ioat_ring_alloc_order = 8;
Dan Williams5cbafa62009-08-26 13:01:44 -070047module_param(ioat_ring_alloc_order, int, 0644);
48MODULE_PARM_DESC(ioat_ring_alloc_order,
Dan Williams376ec372009-09-16 15:16:50 -070049 "ioat2+: allocate 2^n descriptors per channel"
50 " (default: 8 max: 16)");
Dan Williamsa3092182009-09-08 12:02:01 -070051static int ioat_ring_max_alloc_order = IOAT_MAX_ORDER;
52module_param(ioat_ring_max_alloc_order, int, 0644);
53MODULE_PARM_DESC(ioat_ring_max_alloc_order,
Dan Williams376ec372009-09-16 15:16:50 -070054 "ioat2+: upper limit for ring size (default: 16)");
Dan Williams5cbafa62009-08-26 13:01:44 -070055
Dan Williamsb094ad32009-09-08 17:42:57 -070056void __ioat2_issue_pending(struct ioat2_dma_chan *ioat)
Dan Williams5cbafa62009-08-26 13:01:44 -070057{
Dan Williams281befa2010-03-03 11:47:43 -070058 struct ioat_chan_common *chan = &ioat->base;
Dan Williams5cbafa62009-08-26 13:01:44 -070059
Dan Williams376ec372009-09-16 15:16:50 -070060 ioat->dmacount += ioat2_ring_pending(ioat);
Dan Williams5cbafa62009-08-26 13:01:44 -070061 ioat->issued = ioat->head;
Dan Williams281befa2010-03-03 11:47:43 -070062 writew(ioat->dmacount, chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
63 dev_dbg(to_dev(chan),
Dan Williams6df91832009-09-08 12:00:55 -070064 "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
65 __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount);
Dan Williams5cbafa62009-08-26 13:01:44 -070066}
67
Dan Williams281befa2010-03-03 11:47:43 -070068void ioat2_issue_pending(struct dma_chan *c)
Dan Williams5cbafa62009-08-26 13:01:44 -070069{
Dan Williams281befa2010-03-03 11:47:43 -070070 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
Dan Williams5cbafa62009-08-26 13:01:44 -070071
Dan Williams281befa2010-03-03 11:47:43 -070072 if (ioat2_ring_pending(ioat)) {
Dan Williams074cc472010-05-01 15:22:55 -070073 spin_lock_bh(&ioat->prep_lock);
Dan Williams5cbafa62009-08-26 13:01:44 -070074 __ioat2_issue_pending(ioat);
Dan Williams074cc472010-05-01 15:22:55 -070075 spin_unlock_bh(&ioat->prep_lock);
Dan Williams281befa2010-03-03 11:47:43 -070076 }
Dan Williams5cbafa62009-08-26 13:01:44 -070077}
78
79/**
80 * ioat2_update_pending - log pending descriptors
81 * @ioat: ioat2+ channel
82 *
Dan Williams281befa2010-03-03 11:47:43 -070083 * Check if the number of unsubmitted descriptors has exceeded the
Dan Williams074cc472010-05-01 15:22:55 -070084 * watermark. Called with prep_lock held
Dan Williams5cbafa62009-08-26 13:01:44 -070085 */
86static void ioat2_update_pending(struct ioat2_dma_chan *ioat)
87{
Dan Williams281befa2010-03-03 11:47:43 -070088 if (ioat2_ring_pending(ioat) > ioat_pending_level)
Dan Williams5cbafa62009-08-26 13:01:44 -070089 __ioat2_issue_pending(ioat);
Dan Williams5cbafa62009-08-26 13:01:44 -070090}
91
92static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat)
93{
Dan Williams5cbafa62009-08-26 13:01:44 -070094 struct ioat_ring_ent *desc;
95 struct ioat_dma_descriptor *hw;
Dan Williams5cbafa62009-08-26 13:01:44 -070096
97 if (ioat2_ring_space(ioat) < 1) {
98 dev_err(to_dev(&ioat->base),
99 "Unable to start null desc - ring full\n");
100 return;
101 }
102
Dan Williams6df91832009-09-08 12:00:55 -0700103 dev_dbg(to_dev(&ioat->base), "%s: head: %#x tail: %#x issued: %#x\n",
104 __func__, ioat->head, ioat->tail, ioat->issued);
Dan Williams074cc472010-05-01 15:22:55 -0700105 desc = ioat2_get_ring_ent(ioat, ioat->head);
Dan Williams5cbafa62009-08-26 13:01:44 -0700106
107 hw = desc->hw;
108 hw->ctl = 0;
109 hw->ctl_f.null = 1;
110 hw->ctl_f.int_en = 1;
111 hw->ctl_f.compl_write = 1;
112 /* set size to non-zero value (channel returns error when size is 0) */
113 hw->size = NULL_DESC_BUFFER_SIZE;
114 hw->src_addr = 0;
115 hw->dst_addr = 0;
116 async_tx_ack(&desc->txd);
Dan Williams09c8a5b2009-09-08 12:01:49 -0700117 ioat2_set_chainaddr(ioat, desc->txd.phys);
Dan Williams6df91832009-09-08 12:00:55 -0700118 dump_desc_dbg(ioat, desc);
Dan Williams074cc472010-05-01 15:22:55 -0700119 wmb();
120 ioat->head += 1;
Dan Williams5cbafa62009-08-26 13:01:44 -0700121 __ioat2_issue_pending(ioat);
122}
123
124static void ioat2_start_null_desc(struct ioat2_dma_chan *ioat)
125{
Dan Williams074cc472010-05-01 15:22:55 -0700126 spin_lock_bh(&ioat->prep_lock);
Dan Williams5cbafa62009-08-26 13:01:44 -0700127 __ioat2_start_null_desc(ioat);
Dan Williams074cc472010-05-01 15:22:55 -0700128 spin_unlock_bh(&ioat->prep_lock);
Dan Williams5cbafa62009-08-26 13:01:44 -0700129}
130
Dan Williams27502932012-03-23 13:36:42 -0700131static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
Dan Williams5cbafa62009-08-26 13:01:44 -0700132{
133 struct ioat_chan_common *chan = &ioat->base;
Dan Williams09c8a5b2009-09-08 12:01:49 -0700134 struct dma_async_tx_descriptor *tx;
Dan Williams5cbafa62009-08-26 13:01:44 -0700135 struct ioat_ring_ent *desc;
136 bool seen_current = false;
137 u16 active;
Dan Williams074cc472010-05-01 15:22:55 -0700138 int idx = ioat->tail, i;
Dan Williams5cbafa62009-08-26 13:01:44 -0700139
Dan Williams6df91832009-09-08 12:00:55 -0700140 dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n",
141 __func__, ioat->head, ioat->tail, ioat->issued);
142
Dan Williams5cbafa62009-08-26 13:01:44 -0700143 active = ioat2_ring_active(ioat);
144 for (i = 0; i < active && !seen_current; i++) {
Dan Williams074cc472010-05-01 15:22:55 -0700145 smp_read_barrier_depends();
146 prefetch(ioat2_get_ring_ent(ioat, idx + i + 1));
147 desc = ioat2_get_ring_ent(ioat, idx + i);
Dan Williams5cbafa62009-08-26 13:01:44 -0700148 tx = &desc->txd;
Dan Williams6df91832009-09-08 12:00:55 -0700149 dump_desc_dbg(ioat, desc);
Dan Williams5cbafa62009-08-26 13:01:44 -0700150 if (tx->cookie) {
151 ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw);
Russell King - ARM Linuxf7fbce02012-03-06 22:35:07 +0000152 dma_cookie_complete(tx);
Dan Williams5cbafa62009-08-26 13:01:44 -0700153 if (tx->callback) {
154 tx->callback(tx->callback_param);
155 tx->callback = NULL;
156 }
157 }
158
159 if (tx->phys == phys_complete)
160 seen_current = true;
161 }
Dan Williams074cc472010-05-01 15:22:55 -0700162 smp_mb(); /* finish all descriptor reads before incrementing tail */
163 ioat->tail = idx + i;
Dan Williamsaa75db02010-03-03 21:21:10 -0700164 BUG_ON(active && !seen_current); /* no active descs have written a completion? */
Dan Williams5cbafa62009-08-26 13:01:44 -0700165
166 chan->last_completion = phys_complete;
Dan Williams074cc472010-05-01 15:22:55 -0700167 if (active - i == 0) {
Dan Williams09c8a5b2009-09-08 12:01:49 -0700168 dev_dbg(to_dev(chan), "%s: cancel completion timeout\n",
169 __func__);
170 clear_bit(IOAT_COMPLETION_PENDING, &chan->state);
Dan Williamsa3092182009-09-08 12:02:01 -0700171 mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
Dan Williams09c8a5b2009-09-08 12:01:49 -0700172 }
173}
Dan Williams5cbafa62009-08-26 13:01:44 -0700174
Dan Williams09c8a5b2009-09-08 12:01:49 -0700175/**
176 * ioat2_cleanup - clean finished descriptors (advance tail pointer)
177 * @chan: ioat channel to be cleaned up
178 */
179static void ioat2_cleanup(struct ioat2_dma_chan *ioat)
180{
181 struct ioat_chan_common *chan = &ioat->base;
Dan Williams27502932012-03-23 13:36:42 -0700182 dma_addr_t phys_complete;
Dan Williams09c8a5b2009-09-08 12:01:49 -0700183
Dan Williams074cc472010-05-01 15:22:55 -0700184 spin_lock_bh(&chan->cleanup_lock);
185 if (ioat_cleanup_preamble(chan, &phys_complete))
186 __cleanup(ioat, phys_complete);
Dan Williams5cbafa62009-08-26 13:01:44 -0700187 spin_unlock_bh(&chan->cleanup_lock);
188}
189
Dan Williamsaa4d72a2010-03-03 21:21:13 -0700190void ioat2_cleanup_event(unsigned long data)
Dan Williams5cbafa62009-08-26 13:01:44 -0700191{
Dan Williamsaa4d72a2010-03-03 21:21:13 -0700192 struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
Dan Williams5cbafa62009-08-26 13:01:44 -0700193
194 ioat2_cleanup(ioat);
Dan Williamsf6ab95b2009-09-08 12:01:21 -0700195 writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
Dan Williams5cbafa62009-08-26 13:01:44 -0700196}
197
Dan Williamsbf40a682009-09-08 17:42:55 -0700198void __ioat2_restart_chan(struct ioat2_dma_chan *ioat)
Dan Williams09c8a5b2009-09-08 12:01:49 -0700199{
200 struct ioat_chan_common *chan = &ioat->base;
201
202 /* set the tail to be re-issued */
203 ioat->issued = ioat->tail;
204 ioat->dmacount = 0;
205 set_bit(IOAT_COMPLETION_PENDING, &chan->state);
206 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
207
208 dev_dbg(to_dev(chan),
209 "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
210 __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount);
211
212 if (ioat2_ring_pending(ioat)) {
213 struct ioat_ring_ent *desc;
214
215 desc = ioat2_get_ring_ent(ioat, ioat->tail);
216 ioat2_set_chainaddr(ioat, desc->txd.phys);
217 __ioat2_issue_pending(ioat);
218 } else
219 __ioat2_start_null_desc(ioat);
220}
221
Dan Williamsa6d52d72009-12-19 15:36:02 -0700222int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo)
Dan Williams09c8a5b2009-09-08 12:01:49 -0700223{
Dan Williamsa6d52d72009-12-19 15:36:02 -0700224 unsigned long end = jiffies + tmo;
225 int err = 0;
Dan Williams09c8a5b2009-09-08 12:01:49 -0700226 u32 status;
227
228 status = ioat_chansts(chan);
229 if (is_ioat_active(status) || is_ioat_idle(status))
230 ioat_suspend(chan);
231 while (is_ioat_active(status) || is_ioat_idle(status)) {
Dan Williams7e55a702010-01-13 13:33:12 -0700232 if (tmo && time_after(jiffies, end)) {
Dan Williamsa6d52d72009-12-19 15:36:02 -0700233 err = -ETIMEDOUT;
234 break;
235 }
Dan Williams09c8a5b2009-09-08 12:01:49 -0700236 status = ioat_chansts(chan);
237 cpu_relax();
238 }
239
Dan Williamsa6d52d72009-12-19 15:36:02 -0700240 return err;
241}
242
243int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo)
244{
245 unsigned long end = jiffies + tmo;
246 int err = 0;
247
248 ioat_reset(chan);
249 while (ioat_reset_pending(chan)) {
250 if (end && time_after(jiffies, end)) {
251 err = -ETIMEDOUT;
252 break;
253 }
254 cpu_relax();
255 }
256
257 return err;
258}
259
260static void ioat2_restart_channel(struct ioat2_dma_chan *ioat)
261{
262 struct ioat_chan_common *chan = &ioat->base;
Dan Williams27502932012-03-23 13:36:42 -0700263 dma_addr_t phys_complete;
Dan Williamsa6d52d72009-12-19 15:36:02 -0700264
265 ioat2_quiesce(chan, 0);
Dan Williams09c8a5b2009-09-08 12:01:49 -0700266 if (ioat_cleanup_preamble(chan, &phys_complete))
267 __cleanup(ioat, phys_complete);
268
Dan Williamsbf40a682009-09-08 17:42:55 -0700269 __ioat2_restart_chan(ioat);
Dan Williams09c8a5b2009-09-08 12:01:49 -0700270}
271
Dan Williamse3232712009-09-08 17:43:02 -0700272void ioat2_timer_event(unsigned long data)
Dan Williams09c8a5b2009-09-08 12:01:49 -0700273{
Dan Williamsaa4d72a2010-03-03 21:21:13 -0700274 struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
Dan Williams09c8a5b2009-09-08 12:01:49 -0700275 struct ioat_chan_common *chan = &ioat->base;
276
Dan Williams09c8a5b2009-09-08 12:01:49 -0700277 if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
Dan Williams27502932012-03-23 13:36:42 -0700278 dma_addr_t phys_complete;
Dan Williams09c8a5b2009-09-08 12:01:49 -0700279 u64 status;
280
Dan Williams09c8a5b2009-09-08 12:01:49 -0700281 status = ioat_chansts(chan);
282
283 /* when halted due to errors check for channel
284 * programming errors before advancing the completion state
285 */
286 if (is_ioat_halted(status)) {
287 u32 chanerr;
288
289 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
Dan Williamsb57014d2009-11-19 17:10:07 -0700290 dev_err(to_dev(chan), "%s: Channel halted (%x)\n",
291 __func__, chanerr);
Dan Williams556ab452010-07-23 15:47:56 -0700292 if (test_bit(IOAT_RUN, &chan->state))
293 BUG_ON(is_ioat_bug(chanerr));
294 else /* we never got off the ground */
295 return;
Dan Williams09c8a5b2009-09-08 12:01:49 -0700296 }
297
298 /* if we haven't made progress and we have already
299 * acknowledged a pending completion once, then be more
300 * forceful with a restart
301 */
Dan Williams074cc472010-05-01 15:22:55 -0700302 spin_lock_bh(&chan->cleanup_lock);
303 if (ioat_cleanup_preamble(chan, &phys_complete)) {
Dan Williams09c8a5b2009-09-08 12:01:49 -0700304 __cleanup(ioat, phys_complete);
Dan Williams074cc472010-05-01 15:22:55 -0700305 } else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) {
306 spin_lock_bh(&ioat->prep_lock);
Dan Williams09c8a5b2009-09-08 12:01:49 -0700307 ioat2_restart_channel(ioat);
Dan Williams074cc472010-05-01 15:22:55 -0700308 spin_unlock_bh(&ioat->prep_lock);
309 } else {
Dan Williams09c8a5b2009-09-08 12:01:49 -0700310 set_bit(IOAT_COMPLETION_ACK, &chan->state);
311 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
312 }
Dan Williams074cc472010-05-01 15:22:55 -0700313 spin_unlock_bh(&chan->cleanup_lock);
Dan Williamsa3092182009-09-08 12:02:01 -0700314 } else {
315 u16 active;
316
317 /* if the ring is idle, empty, and oversized try to step
318 * down the size
319 */
Dan Williams074cc472010-05-01 15:22:55 -0700320 spin_lock_bh(&chan->cleanup_lock);
321 spin_lock_bh(&ioat->prep_lock);
Dan Williamsa3092182009-09-08 12:02:01 -0700322 active = ioat2_ring_active(ioat);
323 if (active == 0 && ioat->alloc_order > ioat_get_alloc_order())
324 reshape_ring(ioat, ioat->alloc_order-1);
Dan Williams074cc472010-05-01 15:22:55 -0700325 spin_unlock_bh(&ioat->prep_lock);
326 spin_unlock_bh(&chan->cleanup_lock);
Dan Williamsa3092182009-09-08 12:02:01 -0700327
328 /* keep shrinking until we get back to our minimum
329 * default size
330 */
331 if (ioat->alloc_order > ioat_get_alloc_order())
332 mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
Dan Williams09c8a5b2009-09-08 12:01:49 -0700333 }
Dan Williams09c8a5b2009-09-08 12:01:49 -0700334}
335
Dan Williamsa6d52d72009-12-19 15:36:02 -0700336static int ioat2_reset_hw(struct ioat_chan_common *chan)
337{
338 /* throw away whatever the channel was doing and get it initialized */
339 u32 chanerr;
340
341 ioat2_quiesce(chan, msecs_to_jiffies(100));
342
343 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
344 writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
345
346 return ioat2_reset_sync(chan, msecs_to_jiffies(200));
347}
348
Dan Williams5cbafa62009-08-26 13:01:44 -0700349/**
350 * ioat2_enumerate_channels - find and initialize the device's channels
351 * @device: the device to be enumerated
352 */
Dan Williamsbf40a682009-09-08 17:42:55 -0700353int ioat2_enumerate_channels(struct ioatdma_device *device)
Dan Williams5cbafa62009-08-26 13:01:44 -0700354{
355 struct ioat2_dma_chan *ioat;
356 struct device *dev = &device->pdev->dev;
357 struct dma_device *dma = &device->common;
358 u8 xfercap_log;
359 int i;
360
361 INIT_LIST_HEAD(&dma->channels);
362 dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
Dan Williamsbb320782009-09-08 12:01:14 -0700363 dma->chancnt &= 0x1f; /* bits [4:0] valid */
364 if (dma->chancnt > ARRAY_SIZE(device->idx)) {
365 dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n",
366 dma->chancnt, ARRAY_SIZE(device->idx));
367 dma->chancnt = ARRAY_SIZE(device->idx);
368 }
Dan Williams5cbafa62009-08-26 13:01:44 -0700369 xfercap_log = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
Dan Williamsbb320782009-09-08 12:01:14 -0700370 xfercap_log &= 0x1f; /* bits [4:0] valid */
Dan Williams5cbafa62009-08-26 13:01:44 -0700371 if (xfercap_log == 0)
372 return 0;
Dan Williams6df91832009-09-08 12:00:55 -0700373 dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log);
Dan Williams5cbafa62009-08-26 13:01:44 -0700374
375 /* FIXME which i/oat version is i7300? */
376#ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL
377 if (i7300_idle_platform_probe(NULL, NULL, 1) == 0)
378 dma->chancnt--;
379#endif
380 for (i = 0; i < dma->chancnt; i++) {
381 ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL);
382 if (!ioat)
383 break;
384
Dan Williamsaa4d72a2010-03-03 21:21:13 -0700385 ioat_init_channel(device, &ioat->base, i);
Dan Williams5cbafa62009-08-26 13:01:44 -0700386 ioat->xfercap_log = xfercap_log;
Dan Williams074cc472010-05-01 15:22:55 -0700387 spin_lock_init(&ioat->prep_lock);
Dan Williamsa6d52d72009-12-19 15:36:02 -0700388 if (device->reset_hw(&ioat->base)) {
389 i = 0;
390 break;
391 }
Dan Williams5cbafa62009-08-26 13:01:44 -0700392 }
393 dma->chancnt = i;
394 return i;
395}
396
397static dma_cookie_t ioat2_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
398{
399 struct dma_chan *c = tx->chan;
400 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
Dan Williams09c8a5b2009-09-08 12:01:49 -0700401 struct ioat_chan_common *chan = &ioat->base;
Russell King - ARM Linux884485e2012-03-06 22:34:46 +0000402 dma_cookie_t cookie;
Dan Williams5cbafa62009-08-26 13:01:44 -0700403
Russell King - ARM Linux884485e2012-03-06 22:34:46 +0000404 cookie = dma_cookie_assign(tx);
Dan Williams6df91832009-09-08 12:00:55 -0700405 dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie);
406
Dan Williams09c8a5b2009-09-08 12:01:49 -0700407 if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state))
408 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
Dan Williams074cc472010-05-01 15:22:55 -0700409
410 /* make descriptor updates visible before advancing ioat->head,
411 * this is purposefully not smp_wmb() since we are also
412 * publishing the descriptor updates to a dma device
413 */
414 wmb();
415
416 ioat->head += ioat->produce;
417
Dan Williams5cbafa62009-08-26 13:01:44 -0700418 ioat2_update_pending(ioat);
Dan Williams074cc472010-05-01 15:22:55 -0700419 spin_unlock_bh(&ioat->prep_lock);
Dan Williams5cbafa62009-08-26 13:01:44 -0700420
421 return cookie;
422}
423
Dan Williamsa3092182009-09-08 12:02:01 -0700424static struct ioat_ring_ent *ioat2_alloc_ring_ent(struct dma_chan *chan, gfp_t flags)
Dan Williams5cbafa62009-08-26 13:01:44 -0700425{
426 struct ioat_dma_descriptor *hw;
427 struct ioat_ring_ent *desc;
428 struct ioatdma_device *dma;
429 dma_addr_t phys;
430
431 dma = to_ioatdma_device(chan->device);
Dan Williamsa3092182009-09-08 12:02:01 -0700432 hw = pci_pool_alloc(dma->dma_pool, flags, &phys);
Dan Williams5cbafa62009-08-26 13:01:44 -0700433 if (!hw)
434 return NULL;
435 memset(hw, 0, sizeof(*hw));
436
Wei Yongjun921eead2012-08-27 12:40:42 +0800437 desc = kmem_cache_zalloc(ioat2_cache, flags);
Dan Williams5cbafa62009-08-26 13:01:44 -0700438 if (!desc) {
439 pci_pool_free(dma->dma_pool, hw, phys);
440 return NULL;
441 }
442
443 dma_async_tx_descriptor_init(&desc->txd, chan);
444 desc->txd.tx_submit = ioat2_tx_submit_unlock;
445 desc->hw = hw;
446 desc->txd.phys = phys;
447 return desc;
448}
449
450static void ioat2_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan)
451{
452 struct ioatdma_device *dma;
453
454 dma = to_ioatdma_device(chan->device);
455 pci_pool_free(dma->dma_pool, desc->hw, desc->txd.phys);
Dan Williams162b96e2009-09-08 17:53:04 -0700456 kmem_cache_free(ioat2_cache, desc);
Dan Williams5cbafa62009-08-26 13:01:44 -0700457}
458
Dan Williamsa3092182009-09-08 12:02:01 -0700459static struct ioat_ring_ent **ioat2_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
460{
461 struct ioat_ring_ent **ring;
462 int descs = 1 << order;
463 int i;
464
465 if (order > ioat_get_max_alloc_order())
466 return NULL;
467
468 /* allocate the array to hold the software ring */
469 ring = kcalloc(descs, sizeof(*ring), flags);
470 if (!ring)
471 return NULL;
472 for (i = 0; i < descs; i++) {
473 ring[i] = ioat2_alloc_ring_ent(c, flags);
474 if (!ring[i]) {
475 while (i--)
476 ioat2_free_ring_ent(ring[i], c);
477 kfree(ring);
478 return NULL;
479 }
480 set_desc_id(ring[i], i);
481 }
482
483 /* link descs */
484 for (i = 0; i < descs-1; i++) {
485 struct ioat_ring_ent *next = ring[i+1];
486 struct ioat_dma_descriptor *hw = ring[i]->hw;
487
488 hw->next = next->txd.phys;
489 }
490 ring[i]->hw->next = ring[0]->txd.phys;
491
492 return ring;
493}
494
Dan Williams556ab452010-07-23 15:47:56 -0700495void ioat2_free_chan_resources(struct dma_chan *c);
496
Dan Williams5cbafa62009-08-26 13:01:44 -0700497/* ioat2_alloc_chan_resources - allocate/initialize ioat2 descriptor ring
498 * @chan: channel to be initialized
499 */
Dan Williamsbf40a682009-09-08 17:42:55 -0700500int ioat2_alloc_chan_resources(struct dma_chan *c)
Dan Williams5cbafa62009-08-26 13:01:44 -0700501{
502 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
503 struct ioat_chan_common *chan = &ioat->base;
504 struct ioat_ring_ent **ring;
Dan Williams556ab452010-07-23 15:47:56 -0700505 u64 status;
Dan Williamsa3092182009-09-08 12:02:01 -0700506 int order;
Dimitri Sivanich19d78a62011-05-06 10:33:44 -0500507 int i = 0;
Dan Williams5cbafa62009-08-26 13:01:44 -0700508
509 /* have we already been set up? */
510 if (ioat->ring)
511 return 1 << ioat->alloc_order;
512
513 /* Setup register to interrupt and write completion status on error */
Dan Williamsf6ab95b2009-09-08 12:01:21 -0700514 writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET);
Dan Williams5cbafa62009-08-26 13:01:44 -0700515
Dan Williams5cbafa62009-08-26 13:01:44 -0700516 /* allocate a completion writeback area */
517 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
Dan Williams4fb9b9e2009-09-08 12:01:04 -0700518 chan->completion = pci_pool_alloc(chan->device->completion_pool,
519 GFP_KERNEL, &chan->completion_dma);
520 if (!chan->completion)
Dan Williams5cbafa62009-08-26 13:01:44 -0700521 return -ENOMEM;
522
Dan Williams4fb9b9e2009-09-08 12:01:04 -0700523 memset(chan->completion, 0, sizeof(*chan->completion));
524 writel(((u64) chan->completion_dma) & 0x00000000FFFFFFFF,
Dan Williams5cbafa62009-08-26 13:01:44 -0700525 chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
Dan Williams4fb9b9e2009-09-08 12:01:04 -0700526 writel(((u64) chan->completion_dma) >> 32,
Dan Williams5cbafa62009-08-26 13:01:44 -0700527 chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
528
Dan Williamsa3092182009-09-08 12:02:01 -0700529 order = ioat_get_alloc_order();
530 ring = ioat2_alloc_ring(c, order, GFP_KERNEL);
Dan Williams5cbafa62009-08-26 13:01:44 -0700531 if (!ring)
532 return -ENOMEM;
Dan Williams5cbafa62009-08-26 13:01:44 -0700533
Dan Williams074cc472010-05-01 15:22:55 -0700534 spin_lock_bh(&chan->cleanup_lock);
535 spin_lock_bh(&ioat->prep_lock);
Dan Williams5cbafa62009-08-26 13:01:44 -0700536 ioat->ring = ring;
537 ioat->head = 0;
538 ioat->issued = 0;
539 ioat->tail = 0;
Dan Williamsa3092182009-09-08 12:02:01 -0700540 ioat->alloc_order = order;
Dan Williams074cc472010-05-01 15:22:55 -0700541 spin_unlock_bh(&ioat->prep_lock);
542 spin_unlock_bh(&chan->cleanup_lock);
Dan Williams5cbafa62009-08-26 13:01:44 -0700543
544 tasklet_enable(&chan->cleanup_task);
545 ioat2_start_null_desc(ioat);
546
Dan Williams556ab452010-07-23 15:47:56 -0700547 /* check that we got off the ground */
Dimitri Sivanich19d78a62011-05-06 10:33:44 -0500548 do {
549 udelay(1);
550 status = ioat_chansts(chan);
551 } while (i++ < 20 && !is_ioat_active(status) && !is_ioat_idle(status));
552
Dan Williams556ab452010-07-23 15:47:56 -0700553 if (is_ioat_active(status) || is_ioat_idle(status)) {
554 set_bit(IOAT_RUN, &chan->state);
555 return 1 << ioat->alloc_order;
556 } else {
557 u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
558
559 dev_WARN(to_dev(chan),
560 "failed to start channel chanerr: %#x\n", chanerr);
561 ioat2_free_chan_resources(c);
562 return -EFAULT;
563 }
Dan Williamsa3092182009-09-08 12:02:01 -0700564}
565
Dan Williamsbf40a682009-09-08 17:42:55 -0700566bool reshape_ring(struct ioat2_dma_chan *ioat, int order)
Dan Williamsa3092182009-09-08 12:02:01 -0700567{
568 /* reshape differs from normal ring allocation in that we want
569 * to allocate a new software ring while only
570 * extending/truncating the hardware ring
571 */
572 struct ioat_chan_common *chan = &ioat->base;
573 struct dma_chan *c = &chan->common;
Dave Jiang21b764e2012-04-04 16:10:35 -0700574 const u32 curr_size = ioat2_ring_size(ioat);
Dan Williamsa3092182009-09-08 12:02:01 -0700575 const u16 active = ioat2_ring_active(ioat);
Dave Jiang21b764e2012-04-04 16:10:35 -0700576 const u32 new_size = 1 << order;
Dan Williamsa3092182009-09-08 12:02:01 -0700577 struct ioat_ring_ent **ring;
578 u16 i;
579
580 if (order > ioat_get_max_alloc_order())
581 return false;
582
583 /* double check that we have at least 1 free descriptor */
584 if (active == curr_size)
585 return false;
586
587 /* when shrinking, verify that we can hold the current active
588 * set in the new ring
589 */
590 if (active >= new_size)
591 return false;
592
593 /* allocate the array to hold the software ring */
594 ring = kcalloc(new_size, sizeof(*ring), GFP_NOWAIT);
595 if (!ring)
596 return false;
597
598 /* allocate/trim descriptors as needed */
599 if (new_size > curr_size) {
600 /* copy current descriptors to the new ring */
601 for (i = 0; i < curr_size; i++) {
602 u16 curr_idx = (ioat->tail+i) & (curr_size-1);
603 u16 new_idx = (ioat->tail+i) & (new_size-1);
604
605 ring[new_idx] = ioat->ring[curr_idx];
606 set_desc_id(ring[new_idx], new_idx);
607 }
608
609 /* add new descriptors to the ring */
610 for (i = curr_size; i < new_size; i++) {
611 u16 new_idx = (ioat->tail+i) & (new_size-1);
612
613 ring[new_idx] = ioat2_alloc_ring_ent(c, GFP_NOWAIT);
614 if (!ring[new_idx]) {
615 while (i--) {
616 u16 new_idx = (ioat->tail+i) & (new_size-1);
617
618 ioat2_free_ring_ent(ring[new_idx], c);
619 }
620 kfree(ring);
621 return false;
622 }
623 set_desc_id(ring[new_idx], new_idx);
624 }
625
626 /* hw link new descriptors */
627 for (i = curr_size-1; i < new_size; i++) {
628 u16 new_idx = (ioat->tail+i) & (new_size-1);
629 struct ioat_ring_ent *next = ring[(new_idx+1) & (new_size-1)];
630 struct ioat_dma_descriptor *hw = ring[new_idx]->hw;
631
632 hw->next = next->txd.phys;
633 }
634 } else {
635 struct ioat_dma_descriptor *hw;
636 struct ioat_ring_ent *next;
637
638 /* copy current descriptors to the new ring, dropping the
639 * removed descriptors
640 */
641 for (i = 0; i < new_size; i++) {
642 u16 curr_idx = (ioat->tail+i) & (curr_size-1);
643 u16 new_idx = (ioat->tail+i) & (new_size-1);
644
645 ring[new_idx] = ioat->ring[curr_idx];
646 set_desc_id(ring[new_idx], new_idx);
647 }
648
649 /* free deleted descriptors */
650 for (i = new_size; i < curr_size; i++) {
651 struct ioat_ring_ent *ent;
652
653 ent = ioat2_get_ring_ent(ioat, ioat->tail+i);
654 ioat2_free_ring_ent(ent, c);
655 }
656
657 /* fix up hardware ring */
658 hw = ring[(ioat->tail+new_size-1) & (new_size-1)]->hw;
659 next = ring[(ioat->tail+new_size) & (new_size-1)];
660 hw->next = next->txd.phys;
661 }
662
663 dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n",
664 __func__, new_size);
665
666 kfree(ioat->ring);
667 ioat->ring = ring;
668 ioat->alloc_order = order;
669
670 return true;
Dan Williams5cbafa62009-08-26 13:01:44 -0700671}
672
673/**
Dan Williams074cc472010-05-01 15:22:55 -0700674 * ioat2_check_space_lock - verify space and grab ring producer lock
Dan Williams5cbafa62009-08-26 13:01:44 -0700675 * @ioat: ioat2,3 channel (ring) to operate on
676 * @num_descs: allocation length
677 */
Dan Williams074cc472010-05-01 15:22:55 -0700678int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs)
Dan Williams5cbafa62009-08-26 13:01:44 -0700679{
680 struct ioat_chan_common *chan = &ioat->base;
Dan Williams074cc472010-05-01 15:22:55 -0700681 bool retry;
Dan Williams5cbafa62009-08-26 13:01:44 -0700682
Dan Williams074cc472010-05-01 15:22:55 -0700683 retry:
684 spin_lock_bh(&ioat->prep_lock);
Dan Williamsa3092182009-09-08 12:02:01 -0700685 /* never allow the last descriptor to be consumed, we need at
686 * least one free at all times to allow for on-the-fly ring
687 * resizing.
688 */
Dan Williams074cc472010-05-01 15:22:55 -0700689 if (likely(ioat2_ring_space(ioat) > num_descs)) {
690 dev_dbg(to_dev(chan), "%s: num_descs: %d (%x:%x:%x)\n",
691 __func__, num_descs, ioat->head, ioat->tail, ioat->issued);
692 ioat->produce = num_descs;
693 return 0; /* with ioat->prep_lock held */
694 }
695 retry = test_and_set_bit(IOAT_RESHAPE_PENDING, &chan->state);
696 spin_unlock_bh(&ioat->prep_lock);
Dan Williamsa3092182009-09-08 12:02:01 -0700697
Dan Williams074cc472010-05-01 15:22:55 -0700698 /* is another cpu already trying to expand the ring? */
699 if (retry)
700 goto retry;
Dan Williams5cbafa62009-08-26 13:01:44 -0700701
Dan Williams074cc472010-05-01 15:22:55 -0700702 spin_lock_bh(&chan->cleanup_lock);
703 spin_lock_bh(&ioat->prep_lock);
704 retry = reshape_ring(ioat, ioat->alloc_order + 1);
705 clear_bit(IOAT_RESHAPE_PENDING, &chan->state);
706 spin_unlock_bh(&ioat->prep_lock);
707 spin_unlock_bh(&chan->cleanup_lock);
Dan Williamsbf40a682009-09-08 17:42:55 -0700708
Dan Williams074cc472010-05-01 15:22:55 -0700709 /* if we were able to expand the ring retry the allocation */
710 if (retry)
711 goto retry;
712
713 if (printk_ratelimit())
714 dev_dbg(to_dev(chan), "%s: ring full! num_descs: %d (%x:%x:%x)\n",
715 __func__, num_descs, ioat->head, ioat->tail, ioat->issued);
716
717 /* progress reclaim in the allocation failure case we may be
718 * called under bh_disabled so we need to trigger the timer
719 * event directly
720 */
721 if (jiffies > chan->timer.expires && timer_pending(&chan->timer)) {
722 struct ioatdma_device *device = chan->device;
723
724 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
725 device->timer_fn((unsigned long) &chan->common);
Dan Williams5cbafa62009-08-26 13:01:44 -0700726 }
727
Dan Williams074cc472010-05-01 15:22:55 -0700728 return -ENOMEM;
Dan Williams5cbafa62009-08-26 13:01:44 -0700729}
730
Dan Williamsbf40a682009-09-08 17:42:55 -0700731struct dma_async_tx_descriptor *
Dan Williams5cbafa62009-08-26 13:01:44 -0700732ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
733 dma_addr_t dma_src, size_t len, unsigned long flags)
734{
735 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
736 struct ioat_dma_descriptor *hw;
737 struct ioat_ring_ent *desc;
738 dma_addr_t dst = dma_dest;
739 dma_addr_t src = dma_src;
740 size_t total_len = len;
Dan Williams074cc472010-05-01 15:22:55 -0700741 int num_descs, idx, i;
Dan Williams5cbafa62009-08-26 13:01:44 -0700742
743 num_descs = ioat2_xferlen_to_descs(ioat, len);
Dan Williams074cc472010-05-01 15:22:55 -0700744 if (likely(num_descs) && ioat2_check_space_lock(ioat, num_descs) == 0)
745 idx = ioat->head;
Dan Williams5cbafa62009-08-26 13:01:44 -0700746 else
747 return NULL;
Andrew Mortonf477f5b2009-09-21 09:17:58 -0700748 i = 0;
749 do {
Dan Williams5cbafa62009-08-26 13:01:44 -0700750 size_t copy = min_t(size_t, len, 1 << ioat->xfercap_log);
751
752 desc = ioat2_get_ring_ent(ioat, idx + i);
753 hw = desc->hw;
754
755 hw->size = copy;
756 hw->ctl = 0;
757 hw->src_addr = src;
758 hw->dst_addr = dst;
759
760 len -= copy;
761 dst += copy;
762 src += copy;
Dan Williams6df91832009-09-08 12:00:55 -0700763 dump_desc_dbg(ioat, desc);
Andrew Mortonf477f5b2009-09-21 09:17:58 -0700764 } while (++i < num_descs);
Dan Williams5cbafa62009-08-26 13:01:44 -0700765
766 desc->txd.flags = flags;
767 desc->len = total_len;
768 hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
Dan Williams128f2d52009-09-08 17:42:53 -0700769 hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
Dan Williams5cbafa62009-08-26 13:01:44 -0700770 hw->ctl_f.compl_write = 1;
Dan Williams6df91832009-09-08 12:00:55 -0700771 dump_desc_dbg(ioat, desc);
Dan Williams5cbafa62009-08-26 13:01:44 -0700772 /* we leave the channel locked to ensure in order submission */
773
774 return &desc->txd;
775}
776
777/**
778 * ioat2_free_chan_resources - release all the descriptors
779 * @chan: the channel to be cleaned
780 */
Dan Williamsbf40a682009-09-08 17:42:55 -0700781void ioat2_free_chan_resources(struct dma_chan *c)
Dan Williams5cbafa62009-08-26 13:01:44 -0700782{
783 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
784 struct ioat_chan_common *chan = &ioat->base;
Dan Williamsbf40a682009-09-08 17:42:55 -0700785 struct ioatdma_device *device = chan->device;
Dan Williams5cbafa62009-08-26 13:01:44 -0700786 struct ioat_ring_ent *desc;
787 const u16 total_descs = 1 << ioat->alloc_order;
788 int descs;
789 int i;
790
791 /* Before freeing channel resources first check
792 * if they have been previously allocated for this channel.
793 */
794 if (!ioat->ring)
795 return;
796
797 tasklet_disable(&chan->cleanup_task);
Dan Williams09c8a5b2009-09-08 12:01:49 -0700798 del_timer_sync(&chan->timer);
Dan Williamsaa4d72a2010-03-03 21:21:13 -0700799 device->cleanup_fn((unsigned long) c);
Dan Williamsa6d52d72009-12-19 15:36:02 -0700800 device->reset_hw(chan);
Dan Williams556ab452010-07-23 15:47:56 -0700801 clear_bit(IOAT_RUN, &chan->state);
Dan Williams5cbafa62009-08-26 13:01:44 -0700802
Dan Williams074cc472010-05-01 15:22:55 -0700803 spin_lock_bh(&chan->cleanup_lock);
804 spin_lock_bh(&ioat->prep_lock);
Dan Williams5cbafa62009-08-26 13:01:44 -0700805 descs = ioat2_ring_space(ioat);
Dan Williams6df91832009-09-08 12:00:55 -0700806 dev_dbg(to_dev(chan), "freeing %d idle descriptors\n", descs);
Dan Williams5cbafa62009-08-26 13:01:44 -0700807 for (i = 0; i < descs; i++) {
808 desc = ioat2_get_ring_ent(ioat, ioat->head + i);
809 ioat2_free_ring_ent(desc, c);
810 }
811
812 if (descs < total_descs)
813 dev_err(to_dev(chan), "Freeing %d in use descriptors!\n",
814 total_descs - descs);
815
816 for (i = 0; i < total_descs - descs; i++) {
817 desc = ioat2_get_ring_ent(ioat, ioat->tail + i);
Dan Williams6df91832009-09-08 12:00:55 -0700818 dump_desc_dbg(ioat, desc);
Dan Williams5cbafa62009-08-26 13:01:44 -0700819 ioat2_free_ring_ent(desc, c);
820 }
821
822 kfree(ioat->ring);
823 ioat->ring = NULL;
824 ioat->alloc_order = 0;
Dan Williamsbf40a682009-09-08 17:42:55 -0700825 pci_pool_free(device->completion_pool, chan->completion,
Dan Williams4fb9b9e2009-09-08 12:01:04 -0700826 chan->completion_dma);
Dan Williams074cc472010-05-01 15:22:55 -0700827 spin_unlock_bh(&ioat->prep_lock);
828 spin_unlock_bh(&chan->cleanup_lock);
Dan Williams5cbafa62009-08-26 13:01:44 -0700829
830 chan->last_completion = 0;
Dan Williams4fb9b9e2009-09-08 12:01:04 -0700831 chan->completion_dma = 0;
Dan Williams5cbafa62009-08-26 13:01:44 -0700832 ioat->dmacount = 0;
Dan Williams5cbafa62009-08-26 13:01:44 -0700833}
834
Dan Williams5669e312009-09-08 17:42:56 -0700835static ssize_t ring_size_show(struct dma_chan *c, char *page)
836{
837 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
838
839 return sprintf(page, "%d\n", (1 << ioat->alloc_order) & ~1);
840}
841static struct ioat_sysfs_entry ring_size_attr = __ATTR_RO(ring_size);
842
843static ssize_t ring_active_show(struct dma_chan *c, char *page)
844{
845 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
846
847 /* ...taken outside the lock, no need to be precise */
848 return sprintf(page, "%d\n", ioat2_ring_active(ioat));
849}
850static struct ioat_sysfs_entry ring_active_attr = __ATTR_RO(ring_active);
851
852static struct attribute *ioat2_attrs[] = {
853 &ring_size_attr.attr,
854 &ring_active_attr.attr,
855 &ioat_cap_attr.attr,
856 &ioat_version_attr.attr,
857 NULL,
858};
859
860struct kobj_type ioat2_ktype = {
861 .sysfs_ops = &ioat_sysfs_ops,
862 .default_attrs = ioat2_attrs,
863};
864
Dan Williams345d8522009-09-08 12:01:30 -0700865int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca)
Dan Williams5cbafa62009-08-26 13:01:44 -0700866{
867 struct pci_dev *pdev = device->pdev;
868 struct dma_device *dma;
869 struct dma_chan *c;
870 struct ioat_chan_common *chan;
871 int err;
872
873 device->enumerate_channels = ioat2_enumerate_channels;
Dan Williamsa6d52d72009-12-19 15:36:02 -0700874 device->reset_hw = ioat2_reset_hw;
Dan Williamsaa4d72a2010-03-03 21:21:13 -0700875 device->cleanup_fn = ioat2_cleanup_event;
Dan Williamsbf40a682009-09-08 17:42:55 -0700876 device->timer_fn = ioat2_timer_event;
Dan Williams9de6fc72009-09-08 17:42:58 -0700877 device->self_test = ioat_dma_self_test;
Dan Williams5cbafa62009-08-26 13:01:44 -0700878 dma = &device->common;
879 dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock;
880 dma->device_issue_pending = ioat2_issue_pending;
881 dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
882 dma->device_free_chan_resources = ioat2_free_chan_resources;
Dan Williamsc50a8982010-10-13 15:43:10 -0700883 dma->device_tx_status = ioat_dma_tx_status;
Dan Williams5cbafa62009-08-26 13:01:44 -0700884
885 err = ioat_probe(device);
886 if (err)
887 return err;
888 ioat_set_tcp_copy_break(2048);
889
890 list_for_each_entry(c, &dma->channels, device_node) {
891 chan = to_chan_common(c);
892 writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE | IOAT_DMA_DCA_ANY_CPU,
893 chan->reg_base + IOAT_DCACTRL_OFFSET);
894 }
895
896 err = ioat_register(device);
897 if (err)
898 return err;
Dan Williams5669e312009-09-08 17:42:56 -0700899
900 ioat_kobject_add(device, &ioat2_ktype);
901
Dan Williams5cbafa62009-08-26 13:01:44 -0700902 if (dca)
903 device->dca = ioat2_dca_init(pdev, device->reg_base);
904
Dan Williams5cbafa62009-08-26 13:01:44 -0700905 return err;
906}