blob: edf4f5e5de735d8fbf97ee76a2a24299a108abee [file] [log] [blame]
Chris Leech0bbd5f42006-05-23 17:35:34 -07001/*
Shannon Nelson43d6e362007-10-16 01:27:39 -07002 * Intel I/OAT DMA Linux driver
Maciej Sosnowski211a22c2009-02-26 11:05:43 +01003 * Copyright(c) 2004 - 2009 Intel Corporation.
Chris Leech0bbd5f42006-05-23 17:35:34 -07004 *
5 * This program is free software; you can redistribute it and/or modify it
Shannon Nelson43d6e362007-10-16 01:27:39 -07006 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
Chris Leech0bbd5f42006-05-23 17:35:34 -07008 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
Shannon Nelson43d6e362007-10-16 01:27:39 -070015 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
Chris Leech0bbd5f42006-05-23 17:35:34 -070017 *
Shannon Nelson43d6e362007-10-16 01:27:39 -070018 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
20 *
Chris Leech0bbd5f42006-05-23 17:35:34 -070021 */
22
23/*
24 * This driver supports an Intel I/OAT DMA engine, which does asynchronous
25 * copy operations.
26 */
27
28#include <linux/init.h>
29#include <linux/module.h>
30#include <linux/pci.h>
31#include <linux/interrupt.h>
32#include <linux/dmaengine.h>
33#include <linux/delay.h>
David S. Miller6b00c922006-05-23 17:37:58 -070034#include <linux/dma-mapping.h>
Maciej Sosnowski09177e82008-07-22 10:07:33 -070035#include <linux/workqueue.h>
Venki Pallipadi3ad0b022008-10-22 16:34:52 -070036#include <linux/i7300_idle.h>
Dan Williams584ec222009-07-28 14:32:12 -070037#include "dma.h"
38#include "registers.h"
39#include "hw.h"
Chris Leech0bbd5f42006-05-23 17:35:34 -070040
Dan Williams5cbafa62009-08-26 13:01:44 -070041int ioat_pending_level = 4;
Shannon Nelson7bb67c12007-11-14 16:59:51 -080042module_param(ioat_pending_level, int, 0644);
43MODULE_PARM_DESC(ioat_pending_level,
44 "high-water mark for pushing ioat descriptors (default: 4)");
45
Chris Leech0bbd5f42006-05-23 17:35:34 -070046/* internal functions */
Dan Williams5cbafa62009-08-26 13:01:44 -070047static void ioat1_cleanup(struct ioat_dma_chan *ioat);
48static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat);
Shannon Nelson3e037452007-10-16 01:27:40 -070049
50/**
51 * ioat_dma_do_interrupt - handler used for single vector interrupt mode
52 * @irq: interrupt id
53 * @data: interrupt data
54 */
55static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
56{
57 struct ioatdma_device *instance = data;
Dan Williamsdcbc8532009-07-28 14:44:50 -070058 struct ioat_chan_common *chan;
Shannon Nelson3e037452007-10-16 01:27:40 -070059 unsigned long attnstatus;
60 int bit;
61 u8 intrctrl;
62
63 intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
64
65 if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
66 return IRQ_NONE;
67
68 if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
69 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
70 return IRQ_NONE;
71 }
72
73 attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
74 for_each_bit(bit, &attnstatus, BITS_PER_LONG) {
Dan Williamsdcbc8532009-07-28 14:44:50 -070075 chan = ioat_chan_by_index(instance, bit);
76 tasklet_schedule(&chan->cleanup_task);
Shannon Nelson3e037452007-10-16 01:27:40 -070077 }
78
79 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
80 return IRQ_HANDLED;
81}
82
83/**
84 * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
85 * @irq: interrupt id
86 * @data: interrupt data
87 */
88static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
89{
Dan Williamsdcbc8532009-07-28 14:44:50 -070090 struct ioat_chan_common *chan = data;
Shannon Nelson3e037452007-10-16 01:27:40 -070091
Dan Williamsdcbc8532009-07-28 14:44:50 -070092 tasklet_schedule(&chan->cleanup_task);
Shannon Nelson3e037452007-10-16 01:27:40 -070093
94 return IRQ_HANDLED;
95}
96
Dan Williams5cbafa62009-08-26 13:01:44 -070097static void ioat1_cleanup_tasklet(unsigned long data);
98
99/* common channel initialization */
100void ioat_init_channel(struct ioatdma_device *device,
101 struct ioat_chan_common *chan, int idx,
102 work_func_t work_fn, void (*tasklet)(unsigned long),
103 unsigned long tasklet_data)
104{
105 struct dma_device *dma = &device->common;
106
107 chan->device = device;
108 chan->reg_base = device->reg_base + (0x80 * (idx + 1));
109 INIT_DELAYED_WORK(&chan->work, work_fn);
110 spin_lock_init(&chan->cleanup_lock);
111 chan->common.device = dma;
112 list_add_tail(&chan->common.device_node, &dma->channels);
113 device->idx[idx] = chan;
114 tasklet_init(&chan->cleanup_task, tasklet, tasklet_data);
115 tasklet_disable(&chan->cleanup_task);
116}
117
118static void ioat1_reset_part2(struct work_struct *work);
Shannon Nelson3e037452007-10-16 01:27:40 -0700119
120/**
Dan Williams5cbafa62009-08-26 13:01:44 -0700121 * ioat1_dma_enumerate_channels - find and initialize the device's channels
Shannon Nelson3e037452007-10-16 01:27:40 -0700122 * @device: the device to be enumerated
123 */
Dan Williams5cbafa62009-08-26 13:01:44 -0700124static int ioat1_enumerate_channels(struct ioatdma_device *device)
Chris Leech0bbd5f42006-05-23 17:35:34 -0700125{
126 u8 xfercap_scale;
127 u32 xfercap;
128 int i;
Dan Williamsdcbc8532009-07-28 14:44:50 -0700129 struct ioat_dma_chan *ioat;
Dan Williamse6c0b692009-09-08 17:29:44 -0700130 struct device *dev = &device->pdev->dev;
Dan Williamsf2427e22009-07-28 14:42:38 -0700131 struct dma_device *dma = &device->common;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700132
Dan Williamsf2427e22009-07-28 14:42:38 -0700133 INIT_LIST_HEAD(&dma->channels);
134 dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
Chris Leeche3828812007-03-08 09:57:35 -0800135 xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700136 xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
Dan Williams6df91832009-09-08 12:00:55 -0700137 dev_dbg(dev, "%s: xfercap = %d\n", __func__, xfercap);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700138
Venki Pallipadif371be62008-10-23 15:39:06 -0700139#ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL
Dan Williamsf2427e22009-07-28 14:42:38 -0700140 if (i7300_idle_platform_probe(NULL, NULL, 1) == 0)
141 dma->chancnt--;
Andy Henroid27471fd2008-10-09 11:45:22 -0700142#endif
Dan Williamsf2427e22009-07-28 14:42:38 -0700143 for (i = 0; i < dma->chancnt; i++) {
Dan Williamsdcbc8532009-07-28 14:44:50 -0700144 ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL);
Dan Williams5cbafa62009-08-26 13:01:44 -0700145 if (!ioat)
Chris Leech0bbd5f42006-05-23 17:35:34 -0700146 break;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700147
Dan Williams5cbafa62009-08-26 13:01:44 -0700148 ioat_init_channel(device, &ioat->base, i,
149 ioat1_reset_part2,
150 ioat1_cleanup_tasklet,
151 (unsigned long) ioat);
Dan Williamsdcbc8532009-07-28 14:44:50 -0700152 ioat->xfercap = xfercap;
Dan Williamsdcbc8532009-07-28 14:44:50 -0700153 spin_lock_init(&ioat->desc_lock);
154 INIT_LIST_HEAD(&ioat->free_desc);
155 INIT_LIST_HEAD(&ioat->used_desc);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700156 }
Dan Williams5cbafa62009-08-26 13:01:44 -0700157 dma->chancnt = i;
158 return i;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700159}
160
Shannon Nelson711924b2007-12-17 16:20:08 -0800161/**
162 * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
163 * descriptors to hw
164 * @chan: DMA channel handle
165 */
Dan Williamsbc3c7022009-07-28 14:33:42 -0700166static inline void
Dan Williamsdcbc8532009-07-28 14:44:50 -0700167__ioat1_dma_memcpy_issue_pending(struct ioat_dma_chan *ioat)
Shannon Nelson711924b2007-12-17 16:20:08 -0800168{
Dan Williamsdcbc8532009-07-28 14:44:50 -0700169 void __iomem *reg_base = ioat->base.reg_base;
170
Dan Williams6df91832009-09-08 12:00:55 -0700171 dev_dbg(to_dev(&ioat->base), "%s: pending: %d\n",
172 __func__, ioat->pending);
Dan Williamsdcbc8532009-07-28 14:44:50 -0700173 ioat->pending = 0;
174 writeb(IOAT_CHANCMD_APPEND, reg_base + IOAT1_CHANCMD_OFFSET);
Shannon Nelson711924b2007-12-17 16:20:08 -0800175}
176
177static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan)
178{
Dan Williamsdcbc8532009-07-28 14:44:50 -0700179 struct ioat_dma_chan *ioat = to_ioat_chan(chan);
Shannon Nelson711924b2007-12-17 16:20:08 -0800180
Dan Williamsdcbc8532009-07-28 14:44:50 -0700181 if (ioat->pending > 0) {
182 spin_lock_bh(&ioat->desc_lock);
183 __ioat1_dma_memcpy_issue_pending(ioat);
184 spin_unlock_bh(&ioat->desc_lock);
Shannon Nelson711924b2007-12-17 16:20:08 -0800185 }
186}
187
Maciej Sosnowski09177e82008-07-22 10:07:33 -0700188/**
Dan Williams5cbafa62009-08-26 13:01:44 -0700189 * ioat1_reset_part2 - reinit the channel after a reset
Maciej Sosnowski09177e82008-07-22 10:07:33 -0700190 */
Dan Williams5cbafa62009-08-26 13:01:44 -0700191static void ioat1_reset_part2(struct work_struct *work)
Maciej Sosnowski09177e82008-07-22 10:07:33 -0700192{
Dan Williamsdcbc8532009-07-28 14:44:50 -0700193 struct ioat_chan_common *chan;
194 struct ioat_dma_chan *ioat;
Maciej Sosnowski09177e82008-07-22 10:07:33 -0700195 struct ioat_desc_sw *desc;
Dan Williams5cbafa62009-08-26 13:01:44 -0700196 int dmacount;
197 bool start_null = false;
Maciej Sosnowski09177e82008-07-22 10:07:33 -0700198
Dan Williamsdcbc8532009-07-28 14:44:50 -0700199 chan = container_of(work, struct ioat_chan_common, work.work);
200 ioat = container_of(chan, struct ioat_dma_chan, base);
201 spin_lock_bh(&chan->cleanup_lock);
202 spin_lock_bh(&ioat->desc_lock);
Maciej Sosnowski09177e82008-07-22 10:07:33 -0700203
Dan Williamsdcbc8532009-07-28 14:44:50 -0700204 chan->completion_virt->low = 0;
205 chan->completion_virt->high = 0;
206 ioat->pending = 0;
Maciej Sosnowski09177e82008-07-22 10:07:33 -0700207
Dan Williams5cbafa62009-08-26 13:01:44 -0700208 /* count the descriptors waiting */
209 dmacount = 0;
Dan Williamsdcbc8532009-07-28 14:44:50 -0700210 if (ioat->used_desc.prev) {
211 desc = to_ioat_desc(ioat->used_desc.prev);
Maciej Sosnowski09177e82008-07-22 10:07:33 -0700212 do {
Dan Williams5cbafa62009-08-26 13:01:44 -0700213 dmacount++;
Maciej Sosnowski09177e82008-07-22 10:07:33 -0700214 desc = to_ioat_desc(desc->node.next);
Dan Williamsdcbc8532009-07-28 14:44:50 -0700215 } while (&desc->node != ioat->used_desc.next);
Maciej Sosnowski09177e82008-07-22 10:07:33 -0700216 }
217
Dan Williams5cbafa62009-08-26 13:01:44 -0700218 if (dmacount) {
219 /*
220 * write the new starting descriptor address
221 * this puts channel engine into ARMED state
222 */
223 desc = to_ioat_desc(ioat->used_desc.prev);
Dan Williamsbc3c7022009-07-28 14:33:42 -0700224 writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF,
Dan Williamsdcbc8532009-07-28 14:44:50 -0700225 chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);
Dan Williamsbc3c7022009-07-28 14:33:42 -0700226 writel(((u64) desc->txd.phys) >> 32,
Dan Williamsdcbc8532009-07-28 14:44:50 -0700227 chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH);
Maciej Sosnowski09177e82008-07-22 10:07:33 -0700228
Dan Williamsdcbc8532009-07-28 14:44:50 -0700229 writeb(IOAT_CHANCMD_START, chan->reg_base
230 + IOAT_CHANCMD_OFFSET(chan->device->version));
Dan Williams5cbafa62009-08-26 13:01:44 -0700231 } else
232 start_null = true;
Dan Williamsdcbc8532009-07-28 14:44:50 -0700233 spin_unlock_bh(&ioat->desc_lock);
234 spin_unlock_bh(&chan->cleanup_lock);
Dan Williams5cbafa62009-08-26 13:01:44 -0700235
236 dev_err(to_dev(chan),
237 "chan%d reset - %d descs waiting, %d total desc\n",
238 chan_num(chan), dmacount, ioat->desccount);
239
240 if (start_null)
241 ioat1_dma_start_null_desc(ioat);
Maciej Sosnowski09177e82008-07-22 10:07:33 -0700242}
243
244/**
Dan Williams5cbafa62009-08-26 13:01:44 -0700245 * ioat1_reset_channel - restart a channel
Dan Williamsdcbc8532009-07-28 14:44:50 -0700246 * @ioat: IOAT DMA channel handle
Maciej Sosnowski09177e82008-07-22 10:07:33 -0700247 */
Dan Williams5cbafa62009-08-26 13:01:44 -0700248static void ioat1_reset_channel(struct ioat_dma_chan *ioat)
Maciej Sosnowski09177e82008-07-22 10:07:33 -0700249{
Dan Williamsdcbc8532009-07-28 14:44:50 -0700250 struct ioat_chan_common *chan = &ioat->base;
251 void __iomem *reg_base = chan->reg_base;
Maciej Sosnowski09177e82008-07-22 10:07:33 -0700252 u32 chansts, chanerr;
253
Dan Williamsdcbc8532009-07-28 14:44:50 -0700254 if (!ioat->used_desc.prev)
Maciej Sosnowski09177e82008-07-22 10:07:33 -0700255 return;
256
Dan Williams6df91832009-09-08 12:00:55 -0700257 dev_dbg(to_dev(chan), "%s\n", __func__);
Dan Williamsdcbc8532009-07-28 14:44:50 -0700258 chanerr = readl(reg_base + IOAT_CHANERR_OFFSET);
259 chansts = (chan->completion_virt->low
Maciej Sosnowski09177e82008-07-22 10:07:33 -0700260 & IOAT_CHANSTS_DMA_TRANSFER_STATUS);
261 if (chanerr) {
Dan Williamsdcbc8532009-07-28 14:44:50 -0700262 dev_err(to_dev(chan),
Maciej Sosnowski09177e82008-07-22 10:07:33 -0700263 "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n",
Dan Williamsdcbc8532009-07-28 14:44:50 -0700264 chan_num(chan), chansts, chanerr);
265 writel(chanerr, reg_base + IOAT_CHANERR_OFFSET);
Maciej Sosnowski09177e82008-07-22 10:07:33 -0700266 }
267
268 /*
269 * whack it upside the head with a reset
270 * and wait for things to settle out.
271 * force the pending count to a really big negative
272 * to make sure no one forces an issue_pending
273 * while we're waiting.
274 */
275
Dan Williamsdcbc8532009-07-28 14:44:50 -0700276 spin_lock_bh(&ioat->desc_lock);
277 ioat->pending = INT_MIN;
Maciej Sosnowski09177e82008-07-22 10:07:33 -0700278 writeb(IOAT_CHANCMD_RESET,
Dan Williamsdcbc8532009-07-28 14:44:50 -0700279 reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
280 spin_unlock_bh(&ioat->desc_lock);
Maciej Sosnowski09177e82008-07-22 10:07:33 -0700281
282 /* schedule the 2nd half instead of sleeping a long time */
Dan Williamsdcbc8532009-07-28 14:44:50 -0700283 schedule_delayed_work(&chan->work, RESET_DELAY);
Maciej Sosnowski09177e82008-07-22 10:07:33 -0700284}
285
286/**
Dan Williams5cbafa62009-08-26 13:01:44 -0700287 * ioat1_chan_watchdog - watch for stuck channels
Maciej Sosnowski09177e82008-07-22 10:07:33 -0700288 */
Dan Williams5cbafa62009-08-26 13:01:44 -0700289static void ioat1_chan_watchdog(struct work_struct *work)
Maciej Sosnowski09177e82008-07-22 10:07:33 -0700290{
291 struct ioatdma_device *device =
292 container_of(work, struct ioatdma_device, work.work);
Dan Williamsdcbc8532009-07-28 14:44:50 -0700293 struct ioat_dma_chan *ioat;
294 struct ioat_chan_common *chan;
Maciej Sosnowski09177e82008-07-22 10:07:33 -0700295 int i;
296
297 union {
298 u64 full;
299 struct {
300 u32 low;
301 u32 high;
302 };
303 } completion_hw;
304 unsigned long compl_desc_addr_hw;
305
306 for (i = 0; i < device->common.chancnt; i++) {
Dan Williamsdcbc8532009-07-28 14:44:50 -0700307 chan = ioat_chan_by_index(device, i);
308 ioat = container_of(chan, struct ioat_dma_chan, base);
Maciej Sosnowski09177e82008-07-22 10:07:33 -0700309
Dan Williams5cbafa62009-08-26 13:01:44 -0700310 if (/* have we started processing anything yet */
311 chan->last_completion
312 /* have we completed any since last watchdog cycle? */
Dan Williamsdcbc8532009-07-28 14:44:50 -0700313 && (chan->last_completion == chan->watchdog_completion)
Dan Williams5cbafa62009-08-26 13:01:44 -0700314 /* has TCP stuck on one cookie since last watchdog? */
Dan Williamsdcbc8532009-07-28 14:44:50 -0700315 && (chan->watchdog_tcp_cookie == chan->watchdog_last_tcp_cookie)
316 && (chan->watchdog_tcp_cookie != chan->completed_cookie)
Dan Williams5cbafa62009-08-26 13:01:44 -0700317 /* is there something in the chain to be processed? */
318 /* CB1 chain always has at least the last one processed */
Dan Williamsdcbc8532009-07-28 14:44:50 -0700319 && (ioat->used_desc.prev != ioat->used_desc.next)
320 && ioat->pending == 0) {
Maciej Sosnowski09177e82008-07-22 10:07:33 -0700321
322 /*
323 * check CHANSTS register for completed
324 * descriptor address.
325 * if it is different than completion writeback,
326 * it is not zero
327 * and it has changed since the last watchdog
328 * we can assume that channel
329 * is still working correctly
330 * and the problem is in completion writeback.
331 * update completion writeback
332 * with actual CHANSTS value
333 * else
334 * try resetting the channel
335 */
336
Dan Williamsdcbc8532009-07-28 14:44:50 -0700337 completion_hw.low = readl(chan->reg_base +
338 IOAT_CHANSTS_OFFSET_LOW(chan->device->version));
339 completion_hw.high = readl(chan->reg_base +
340 IOAT_CHANSTS_OFFSET_HIGH(chan->device->version));
Maciej Sosnowski09177e82008-07-22 10:07:33 -0700341#if (BITS_PER_LONG == 64)
342 compl_desc_addr_hw =
343 completion_hw.full
344 & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
345#else
346 compl_desc_addr_hw =
347 completion_hw.low & IOAT_LOW_COMPLETION_MASK;
348#endif
349
350 if ((compl_desc_addr_hw != 0)
Dan Williamsdcbc8532009-07-28 14:44:50 -0700351 && (compl_desc_addr_hw != chan->watchdog_completion)
352 && (compl_desc_addr_hw != chan->last_compl_desc_addr_hw)) {
353 chan->last_compl_desc_addr_hw = compl_desc_addr_hw;
354 chan->completion_virt->low = completion_hw.low;
355 chan->completion_virt->high = completion_hw.high;
Maciej Sosnowski09177e82008-07-22 10:07:33 -0700356 } else {
Dan Williams5cbafa62009-08-26 13:01:44 -0700357 ioat1_reset_channel(ioat);
Dan Williamsdcbc8532009-07-28 14:44:50 -0700358 chan->watchdog_completion = 0;
359 chan->last_compl_desc_addr_hw = 0;
Maciej Sosnowski09177e82008-07-22 10:07:33 -0700360 }
Maciej Sosnowski09177e82008-07-22 10:07:33 -0700361 } else {
Dan Williamsdcbc8532009-07-28 14:44:50 -0700362 chan->last_compl_desc_addr_hw = 0;
363 chan->watchdog_completion = chan->last_completion;
Maciej Sosnowski09177e82008-07-22 10:07:33 -0700364 }
Dan Williams5cbafa62009-08-26 13:01:44 -0700365
Dan Williamsdcbc8532009-07-28 14:44:50 -0700366 chan->watchdog_last_tcp_cookie = chan->watchdog_tcp_cookie;
Maciej Sosnowski09177e82008-07-22 10:07:33 -0700367 }
368
369 schedule_delayed_work(&device->work, WATCHDOG_DELAY);
370}
371
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800372static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
Dan Williams7405f742007-01-02 11:10:43 -0700373{
Dan Williamsdcbc8532009-07-28 14:44:50 -0700374 struct dma_chan *c = tx->chan;
375 struct ioat_dma_chan *ioat = to_ioat_chan(c);
Dan Williamsa0587bc2009-07-28 14:44:04 -0700376 struct ioat_desc_sw *desc = tx_to_ioat_desc(tx);
377 struct ioat_desc_sw *first;
378 struct ioat_desc_sw *chain_tail;
Dan Williams7405f742007-01-02 11:10:43 -0700379 dma_cookie_t cookie;
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700380
Dan Williamsdcbc8532009-07-28 14:44:50 -0700381 spin_lock_bh(&ioat->desc_lock);
Dan Williams7405f742007-01-02 11:10:43 -0700382 /* cookie incr and addition to used_list must be atomic */
Dan Williamsdcbc8532009-07-28 14:44:50 -0700383 cookie = c->cookie;
Dan Williams7405f742007-01-02 11:10:43 -0700384 cookie++;
385 if (cookie < 0)
386 cookie = 1;
Dan Williamsdcbc8532009-07-28 14:44:50 -0700387 c->cookie = cookie;
388 tx->cookie = cookie;
Dan Williams6df91832009-09-08 12:00:55 -0700389 dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie);
Dan Williams7405f742007-01-02 11:10:43 -0700390
391 /* write address into NextDescriptor field of last desc in chain */
Dan Williamsa0587bc2009-07-28 14:44:04 -0700392 first = to_ioat_desc(tx->tx_list.next);
Dan Williamsdcbc8532009-07-28 14:44:50 -0700393 chain_tail = to_ioat_desc(ioat->used_desc.prev);
Dan Williamsa0587bc2009-07-28 14:44:04 -0700394 /* make descriptor updates globally visible before chaining */
395 wmb();
396 chain_tail->hw->next = first->txd.phys;
Dan Williamsdcbc8532009-07-28 14:44:50 -0700397 list_splice_tail_init(&tx->tx_list, &ioat->used_desc);
Dan Williams6df91832009-09-08 12:00:55 -0700398 dump_desc_dbg(ioat, chain_tail);
399 dump_desc_dbg(ioat, first);
Dan Williams7405f742007-01-02 11:10:43 -0700400
Dan Williamsdcbc8532009-07-28 14:44:50 -0700401 ioat->pending += desc->tx_cnt;
402 if (ioat->pending >= ioat_pending_level)
403 __ioat1_dma_memcpy_issue_pending(ioat);
404 spin_unlock_bh(&ioat->desc_lock);
Dan Williams7405f742007-01-02 11:10:43 -0700405
Dan Williams7405f742007-01-02 11:10:43 -0700406 return cookie;
407}
408
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800409/**
410 * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair
Dan Williamsdcbc8532009-07-28 14:44:50 -0700411 * @ioat: the channel supplying the memory pool for the descriptors
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800412 * @flags: allocation flags
413 */
Dan Williamsbc3c7022009-07-28 14:33:42 -0700414static struct ioat_desc_sw *
Dan Williamsdcbc8532009-07-28 14:44:50 -0700415ioat_dma_alloc_descriptor(struct ioat_dma_chan *ioat, gfp_t flags)
Chris Leech0bbd5f42006-05-23 17:35:34 -0700416{
417 struct ioat_dma_descriptor *desc;
418 struct ioat_desc_sw *desc_sw;
Shannon Nelson8ab89562007-10-16 01:27:39 -0700419 struct ioatdma_device *ioatdma_device;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700420 dma_addr_t phys;
421
Dan Williamsdcbc8532009-07-28 14:44:50 -0700422 ioatdma_device = ioat->base.device;
Shannon Nelson8ab89562007-10-16 01:27:39 -0700423 desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700424 if (unlikely(!desc))
425 return NULL;
426
427 desc_sw = kzalloc(sizeof(*desc_sw), flags);
428 if (unlikely(!desc_sw)) {
Shannon Nelson8ab89562007-10-16 01:27:39 -0700429 pci_pool_free(ioatdma_device->dma_pool, desc, phys);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700430 return NULL;
431 }
432
433 memset(desc, 0, sizeof(*desc));
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800434
Dan Williams5cbafa62009-08-26 13:01:44 -0700435 dma_async_tx_descriptor_init(&desc_sw->txd, &ioat->base.common);
436 desc_sw->txd.tx_submit = ioat1_tx_submit;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700437 desc_sw->hw = desc;
Dan Williamsbc3c7022009-07-28 14:33:42 -0700438 desc_sw->txd.phys = phys;
Dan Williams6df91832009-09-08 12:00:55 -0700439 set_desc_id(desc_sw, -1);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700440
441 return desc_sw;
442}
443
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800444static int ioat_initial_desc_count = 256;
445module_param(ioat_initial_desc_count, int, 0644);
446MODULE_PARM_DESC(ioat_initial_desc_count,
Dan Williams5cbafa62009-08-26 13:01:44 -0700447 "ioat1: initial descriptors per channel (default: 256)");
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800448/**
Dan Williams5cbafa62009-08-26 13:01:44 -0700449 * ioat1_dma_alloc_chan_resources - returns the number of allocated descriptors
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800450 * @chan: the channel to be filled out
451 */
Dan Williams5cbafa62009-08-26 13:01:44 -0700452static int ioat1_dma_alloc_chan_resources(struct dma_chan *c)
Chris Leech0bbd5f42006-05-23 17:35:34 -0700453{
Dan Williamsdcbc8532009-07-28 14:44:50 -0700454 struct ioat_dma_chan *ioat = to_ioat_chan(c);
455 struct ioat_chan_common *chan = &ioat->base;
Shannon Nelson711924b2007-12-17 16:20:08 -0800456 struct ioat_desc_sw *desc;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700457 u16 chanctrl;
458 u32 chanerr;
459 int i;
460 LIST_HEAD(tmp_list);
461
Shannon Nelsone4223972007-08-24 23:02:53 -0700462 /* have we already been set up? */
Dan Williamsdcbc8532009-07-28 14:44:50 -0700463 if (!list_empty(&ioat->free_desc))
464 return ioat->desccount;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700465
Shannon Nelson43d6e362007-10-16 01:27:39 -0700466 /* Setup register to interrupt and write completion status on error */
Shannon Nelsone4223972007-08-24 23:02:53 -0700467 chanctrl = IOAT_CHANCTRL_ERR_INT_EN |
Chris Leech0bbd5f42006-05-23 17:35:34 -0700468 IOAT_CHANCTRL_ANY_ERR_ABORT_EN |
469 IOAT_CHANCTRL_ERR_COMPLETION_EN;
Dan Williamsdcbc8532009-07-28 14:44:50 -0700470 writew(chanctrl, chan->reg_base + IOAT_CHANCTRL_OFFSET);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700471
Dan Williamsdcbc8532009-07-28 14:44:50 -0700472 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700473 if (chanerr) {
Dan Williamsdcbc8532009-07-28 14:44:50 -0700474 dev_err(to_dev(chan), "CHANERR = %x, clearing\n", chanerr);
475 writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700476 }
477
478 /* Allocate descriptors */
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800479 for (i = 0; i < ioat_initial_desc_count; i++) {
Dan Williamsdcbc8532009-07-28 14:44:50 -0700480 desc = ioat_dma_alloc_descriptor(ioat, GFP_KERNEL);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700481 if (!desc) {
Dan Williamsdcbc8532009-07-28 14:44:50 -0700482 dev_err(to_dev(chan), "Only %d initial descriptors\n", i);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700483 break;
484 }
Dan Williams6df91832009-09-08 12:00:55 -0700485 set_desc_id(desc, i);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700486 list_add_tail(&desc->node, &tmp_list);
487 }
Dan Williamsdcbc8532009-07-28 14:44:50 -0700488 spin_lock_bh(&ioat->desc_lock);
489 ioat->desccount = i;
490 list_splice(&tmp_list, &ioat->free_desc);
Dan Williamsdcbc8532009-07-28 14:44:50 -0700491 spin_unlock_bh(&ioat->desc_lock);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700492
493 /* allocate a completion writeback area */
494 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
Dan Williamsdcbc8532009-07-28 14:44:50 -0700495 chan->completion_virt = pci_pool_alloc(chan->device->completion_pool,
496 GFP_KERNEL,
497 &chan->completion_addr);
498 memset(chan->completion_virt, 0,
499 sizeof(*chan->completion_virt));
500 writel(((u64) chan->completion_addr) & 0x00000000FFFFFFFF,
501 chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
502 writel(((u64) chan->completion_addr) >> 32,
503 chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700504
Dan Williamsdcbc8532009-07-28 14:44:50 -0700505 tasklet_enable(&chan->cleanup_task);
Dan Williams5cbafa62009-08-26 13:01:44 -0700506 ioat1_dma_start_null_desc(ioat); /* give chain to dma device */
Dan Williams6df91832009-09-08 12:00:55 -0700507 dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n",
508 __func__, ioat->desccount);
Dan Williamsdcbc8532009-07-28 14:44:50 -0700509 return ioat->desccount;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700510}
511
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800512/**
Dan Williams5cbafa62009-08-26 13:01:44 -0700513 * ioat1_dma_free_chan_resources - release all the descriptors
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800514 * @chan: the channel to be cleaned
515 */
Dan Williams5cbafa62009-08-26 13:01:44 -0700516static void ioat1_dma_free_chan_resources(struct dma_chan *c)
Chris Leech0bbd5f42006-05-23 17:35:34 -0700517{
Dan Williamsdcbc8532009-07-28 14:44:50 -0700518 struct ioat_dma_chan *ioat = to_ioat_chan(c);
519 struct ioat_chan_common *chan = &ioat->base;
520 struct ioatdma_device *ioatdma_device = chan->device;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700521 struct ioat_desc_sw *desc, *_desc;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700522 int in_use_descs = 0;
523
Maciej Sosnowskic3d4f442008-11-07 01:45:52 +0000524 /* Before freeing channel resources first check
525 * if they have been previously allocated for this channel.
526 */
Dan Williamsdcbc8532009-07-28 14:44:50 -0700527 if (ioat->desccount == 0)
Maciej Sosnowskic3d4f442008-11-07 01:45:52 +0000528 return;
529
Dan Williamsdcbc8532009-07-28 14:44:50 -0700530 tasklet_disable(&chan->cleanup_task);
Dan Williams5cbafa62009-08-26 13:01:44 -0700531 ioat1_cleanup(ioat);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700532
Shannon Nelson3e037452007-10-16 01:27:40 -0700533 /* Delay 100ms after reset to allow internal DMA logic to quiesce
534 * before removing DMA descriptor resources.
535 */
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800536 writeb(IOAT_CHANCMD_RESET,
Dan Williamsdcbc8532009-07-28 14:44:50 -0700537 chan->reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
Shannon Nelson3e037452007-10-16 01:27:40 -0700538 mdelay(100);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700539
Dan Williamsdcbc8532009-07-28 14:44:50 -0700540 spin_lock_bh(&ioat->desc_lock);
Dan Williams6df91832009-09-08 12:00:55 -0700541 list_for_each_entry_safe(desc, _desc, &ioat->used_desc, node) {
542 dev_dbg(to_dev(chan), "%s: freeing %d from used list\n",
543 __func__, desc_id(desc));
544 dump_desc_dbg(ioat, desc);
Dan Williams5cbafa62009-08-26 13:01:44 -0700545 in_use_descs++;
546 list_del(&desc->node);
Shannon Nelson8ab89562007-10-16 01:27:39 -0700547 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
Dan Williamsbc3c7022009-07-28 14:33:42 -0700548 desc->txd.phys);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700549 kfree(desc);
Dan Williams5cbafa62009-08-26 13:01:44 -0700550 }
551 list_for_each_entry_safe(desc, _desc,
552 &ioat->free_desc, node) {
553 list_del(&desc->node);
554 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
555 desc->txd.phys);
556 kfree(desc);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700557 }
Dan Williamsdcbc8532009-07-28 14:44:50 -0700558 spin_unlock_bh(&ioat->desc_lock);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700559
Shannon Nelson8ab89562007-10-16 01:27:39 -0700560 pci_pool_free(ioatdma_device->completion_pool,
Dan Williamsdcbc8532009-07-28 14:44:50 -0700561 chan->completion_virt,
562 chan->completion_addr);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700563
564 /* one is ok since we left it on there on purpose */
565 if (in_use_descs > 1)
Dan Williamsdcbc8532009-07-28 14:44:50 -0700566 dev_err(to_dev(chan), "Freeing %d in use descriptors!\n",
Chris Leech0bbd5f42006-05-23 17:35:34 -0700567 in_use_descs - 1);
568
Dan Williamsdcbc8532009-07-28 14:44:50 -0700569 chan->last_completion = chan->completion_addr = 0;
570 chan->watchdog_completion = 0;
571 chan->last_compl_desc_addr_hw = 0;
572 chan->watchdog_tcp_cookie = chan->watchdog_last_tcp_cookie = 0;
573 ioat->pending = 0;
Dan Williamsdcbc8532009-07-28 14:44:50 -0700574 ioat->desccount = 0;
Shannon Nelson3e037452007-10-16 01:27:40 -0700575}
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700576
Shannon Nelson3e037452007-10-16 01:27:40 -0700577/**
Dan Williamsdcbc8532009-07-28 14:44:50 -0700578 * ioat1_dma_get_next_descriptor - return the next available descriptor
579 * @ioat: IOAT DMA channel handle
Shannon Nelson3e037452007-10-16 01:27:40 -0700580 *
581 * Gets the next descriptor from the chain, and must be called with the
582 * channel's desc_lock held. Allocates more descriptors if the channel
583 * has run out.
584 */
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700585static struct ioat_desc_sw *
Dan Williamsdcbc8532009-07-28 14:44:50 -0700586ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat)
Shannon Nelson3e037452007-10-16 01:27:40 -0700587{
Shannon Nelson711924b2007-12-17 16:20:08 -0800588 struct ioat_desc_sw *new;
Shannon Nelson3e037452007-10-16 01:27:40 -0700589
Dan Williamsdcbc8532009-07-28 14:44:50 -0700590 if (!list_empty(&ioat->free_desc)) {
591 new = to_ioat_desc(ioat->free_desc.next);
Shannon Nelson3e037452007-10-16 01:27:40 -0700592 list_del(&new->node);
593 } else {
594 /* try to get another desc */
Dan Williamsdcbc8532009-07-28 14:44:50 -0700595 new = ioat_dma_alloc_descriptor(ioat, GFP_ATOMIC);
Shannon Nelson711924b2007-12-17 16:20:08 -0800596 if (!new) {
Dan Williamsdcbc8532009-07-28 14:44:50 -0700597 dev_err(to_dev(&ioat->base), "alloc failed\n");
Shannon Nelson711924b2007-12-17 16:20:08 -0800598 return NULL;
599 }
Shannon Nelson3e037452007-10-16 01:27:40 -0700600 }
Dan Williams6df91832009-09-08 12:00:55 -0700601 dev_dbg(to_dev(&ioat->base), "%s: allocated: %d\n",
602 __func__, desc_id(new));
Shannon Nelson3e037452007-10-16 01:27:40 -0700603 prefetch(new->hw);
604 return new;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700605}
606
Dan Williamsbc3c7022009-07-28 14:33:42 -0700607static struct dma_async_tx_descriptor *
Dan Williamsdcbc8532009-07-28 14:44:50 -0700608ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
Dan Williamsbc3c7022009-07-28 14:33:42 -0700609 dma_addr_t dma_src, size_t len, unsigned long flags)
Chris Leech0bbd5f42006-05-23 17:35:34 -0700610{
Dan Williamsdcbc8532009-07-28 14:44:50 -0700611 struct ioat_dma_chan *ioat = to_ioat_chan(c);
Dan Williamsa0587bc2009-07-28 14:44:04 -0700612 struct ioat_desc_sw *desc;
613 size_t copy;
614 LIST_HEAD(chain);
615 dma_addr_t src = dma_src;
616 dma_addr_t dest = dma_dest;
617 size_t total_len = len;
618 struct ioat_dma_descriptor *hw = NULL;
619 int tx_cnt = 0;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700620
Dan Williamsdcbc8532009-07-28 14:44:50 -0700621 spin_lock_bh(&ioat->desc_lock);
Dan Williams5cbafa62009-08-26 13:01:44 -0700622 desc = ioat1_dma_get_next_descriptor(ioat);
Dan Williamsa0587bc2009-07-28 14:44:04 -0700623 do {
624 if (!desc)
625 break;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700626
Dan Williamsa0587bc2009-07-28 14:44:04 -0700627 tx_cnt++;
Dan Williamsdcbc8532009-07-28 14:44:50 -0700628 copy = min_t(size_t, len, ioat->xfercap);
Dan Williamsa0587bc2009-07-28 14:44:04 -0700629
630 hw = desc->hw;
631 hw->size = copy;
632 hw->ctl = 0;
633 hw->src_addr = src;
634 hw->dst_addr = dest;
635
636 list_add_tail(&desc->node, &chain);
637
638 len -= copy;
639 dest += copy;
640 src += copy;
641 if (len) {
642 struct ioat_desc_sw *next;
643
644 async_tx_ack(&desc->txd);
Dan Williams5cbafa62009-08-26 13:01:44 -0700645 next = ioat1_dma_get_next_descriptor(ioat);
Dan Williamsa0587bc2009-07-28 14:44:04 -0700646 hw->next = next ? next->txd.phys : 0;
Dan Williams6df91832009-09-08 12:00:55 -0700647 dump_desc_dbg(ioat, desc);
Dan Williamsa0587bc2009-07-28 14:44:04 -0700648 desc = next;
649 } else
650 hw->next = 0;
651 } while (len);
652
653 if (!desc) {
Dan Williamsdcbc8532009-07-28 14:44:50 -0700654 struct ioat_chan_common *chan = &ioat->base;
655
656 dev_err(to_dev(chan),
Dan Williams5cbafa62009-08-26 13:01:44 -0700657 "chan%d - get_next_desc failed\n", chan_num(chan));
Dan Williamsdcbc8532009-07-28 14:44:50 -0700658 list_splice(&chain, &ioat->free_desc);
659 spin_unlock_bh(&ioat->desc_lock);
Shannon Nelson711924b2007-12-17 16:20:08 -0800660 return NULL;
Maciej Sosnowski09177e82008-07-22 10:07:33 -0700661 }
Dan Williamsdcbc8532009-07-28 14:44:50 -0700662 spin_unlock_bh(&ioat->desc_lock);
Dan Williamsa0587bc2009-07-28 14:44:04 -0700663
664 desc->txd.flags = flags;
665 desc->tx_cnt = tx_cnt;
Dan Williamsa0587bc2009-07-28 14:44:04 -0700666 desc->len = total_len;
667 list_splice(&chain, &desc->txd.tx_list);
668 hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
669 hw->ctl_f.compl_write = 1;
Dan Williams6df91832009-09-08 12:00:55 -0700670 dump_desc_dbg(ioat, desc);
Dan Williamsa0587bc2009-07-28 14:44:04 -0700671
672 return &desc->txd;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700673}
674
Dan Williams5cbafa62009-08-26 13:01:44 -0700675static void ioat1_cleanup_tasklet(unsigned long data)
Shannon Nelson3e037452007-10-16 01:27:40 -0700676{
677 struct ioat_dma_chan *chan = (void *)data;
Dan Williams5cbafa62009-08-26 13:01:44 -0700678 ioat1_cleanup(chan);
Shannon Nelson3e037452007-10-16 01:27:40 -0700679 writew(IOAT_CHANCTRL_INT_DISABLE,
Dan Williamsdcbc8532009-07-28 14:44:50 -0700680 chan->base.reg_base + IOAT_CHANCTRL_OFFSET);
Shannon Nelson3e037452007-10-16 01:27:40 -0700681}
682
Dan Williams5cbafa62009-08-26 13:01:44 -0700683static void ioat_unmap(struct pci_dev *pdev, dma_addr_t addr, size_t len,
684 int direction, enum dma_ctrl_flags flags, bool dst)
Dan Williamse1d181e2008-07-04 00:13:40 -0700685{
Dan Williams5cbafa62009-08-26 13:01:44 -0700686 if ((dst && (flags & DMA_COMPL_DEST_UNMAP_SINGLE)) ||
687 (!dst && (flags & DMA_COMPL_SRC_UNMAP_SINGLE)))
688 pci_unmap_single(pdev, addr, len, direction);
689 else
690 pci_unmap_page(pdev, addr, len, direction);
Dan Williamse1d181e2008-07-04 00:13:40 -0700691}
692
Dan Williams5cbafa62009-08-26 13:01:44 -0700693
694void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
695 size_t len, struct ioat_dma_descriptor *hw)
Chris Leech0bbd5f42006-05-23 17:35:34 -0700696{
Dan Williams5cbafa62009-08-26 13:01:44 -0700697 struct pci_dev *pdev = chan->device->pdev;
698 size_t offset = len - hw->size;
699
700 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP))
701 ioat_unmap(pdev, hw->dst_addr - offset, len,
702 PCI_DMA_FROMDEVICE, flags, 1);
703
704 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP))
705 ioat_unmap(pdev, hw->src_addr - offset, len,
706 PCI_DMA_TODEVICE, flags, 0);
707}
708
709unsigned long ioat_get_current_completion(struct ioat_chan_common *chan)
710{
Chris Leech0bbd5f42006-05-23 17:35:34 -0700711 unsigned long phys_complete;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700712
713 /* The completion writeback can happen at any time,
714 so reads by the driver need to be atomic operations
715 The descriptor physical addresses are limited to 32-bits
716 when the CPU can only do a 32-bit mov */
717
718#if (BITS_PER_LONG == 64)
719 phys_complete =
Dan Williamsdcbc8532009-07-28 14:44:50 -0700720 chan->completion_virt->full
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700721 & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700722#else
Dan Williamsdcbc8532009-07-28 14:44:50 -0700723 phys_complete = chan->completion_virt->low & IOAT_LOW_COMPLETION_MASK;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700724#endif
725
Dan Williams6df91832009-09-08 12:00:55 -0700726 dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__,
727 (unsigned long long) phys_complete);
728
Dan Williamsdcbc8532009-07-28 14:44:50 -0700729 if ((chan->completion_virt->full
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700730 & IOAT_CHANSTS_DMA_TRANSFER_STATUS) ==
Shannon Nelson43d6e362007-10-16 01:27:39 -0700731 IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) {
Dan Williamsdcbc8532009-07-28 14:44:50 -0700732 dev_err(to_dev(chan), "Channel halted, chanerr = %x\n",
733 readl(chan->reg_base + IOAT_CHANERR_OFFSET));
Chris Leech0bbd5f42006-05-23 17:35:34 -0700734
735 /* TODO do something to salvage the situation */
736 }
737
Dan Williams5cbafa62009-08-26 13:01:44 -0700738 return phys_complete;
739}
740
741/**
742 * ioat1_cleanup - cleanup up finished descriptors
743 * @chan: ioat channel to be cleaned up
744 */
745static void ioat1_cleanup(struct ioat_dma_chan *ioat)
746{
747 struct ioat_chan_common *chan = &ioat->base;
748 unsigned long phys_complete;
749 struct ioat_desc_sw *desc, *_desc;
750 dma_cookie_t cookie = 0;
751 struct dma_async_tx_descriptor *tx;
752
753 prefetch(chan->completion_virt);
754
755 if (!spin_trylock_bh(&chan->cleanup_lock))
756 return;
757
758 phys_complete = ioat_get_current_completion(chan);
Dan Williamsdcbc8532009-07-28 14:44:50 -0700759 if (phys_complete == chan->last_completion) {
760 spin_unlock_bh(&chan->cleanup_lock);
Maciej Sosnowski09177e82008-07-22 10:07:33 -0700761 /*
762 * perhaps we're stuck so hard that the watchdog can't go off?
763 * try to catch it after 2 seconds
764 */
Dan Williams5cbafa62009-08-26 13:01:44 -0700765 if (time_after(jiffies,
766 chan->last_completion_time + HZ*WATCHDOG_DELAY)) {
767 ioat1_chan_watchdog(&(chan->device->work.work));
768 chan->last_completion_time = jiffies;
Maciej Sosnowski09177e82008-07-22 10:07:33 -0700769 }
770 return;
771 }
Dan Williamsdcbc8532009-07-28 14:44:50 -0700772 chan->last_completion_time = jiffies;
Maciej Sosnowski09177e82008-07-22 10:07:33 -0700773
774 cookie = 0;
Dan Williamsdcbc8532009-07-28 14:44:50 -0700775 if (!spin_trylock_bh(&ioat->desc_lock)) {
776 spin_unlock_bh(&chan->cleanup_lock);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700777 return;
778 }
779
Dan Williams6df91832009-09-08 12:00:55 -0700780 dev_dbg(to_dev(chan), "%s: phys_complete: %lx\n",
781 __func__, phys_complete);
Dan Williams5cbafa62009-08-26 13:01:44 -0700782 list_for_each_entry_safe(desc, _desc, &ioat->used_desc, node) {
Dan Williamsbc3c7022009-07-28 14:33:42 -0700783 tx = &desc->txd;
Dan Williams5cbafa62009-08-26 13:01:44 -0700784 /*
785 * Incoming DMA requests may use multiple descriptors,
786 * due to exceeding xfercap, perhaps. If so, only the
787 * last one will have a cookie, and require unmapping.
788 */
Dan Williams6df91832009-09-08 12:00:55 -0700789 dump_desc_dbg(ioat, desc);
Dan Williams5cbafa62009-08-26 13:01:44 -0700790 if (tx->cookie) {
791 cookie = tx->cookie;
792 ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw);
793 if (tx->callback) {
794 tx->callback(tx->callback_param);
795 tx->callback = NULL;
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800796 }
Chris Leech0bbd5f42006-05-23 17:35:34 -0700797 }
Dan Williams5cbafa62009-08-26 13:01:44 -0700798
799 if (tx->phys != phys_complete) {
800 /*
801 * a completed entry, but not the last, so clean
802 * up if the client is done with the descriptor
803 */
804 if (async_tx_test_ack(tx))
805 list_move_tail(&desc->node, &ioat->free_desc);
806 else
807 tx->cookie = 0;
808 } else {
809 /*
810 * last used desc. Do not remove, so we can
811 * append from it, but don't look at it next
812 * time, either
813 */
814 tx->cookie = 0;
815
816 /* TODO check status bits? */
817 break;
818 }
Chris Leech0bbd5f42006-05-23 17:35:34 -0700819 }
820
Dan Williamsdcbc8532009-07-28 14:44:50 -0700821 spin_unlock_bh(&ioat->desc_lock);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700822
Dan Williamsdcbc8532009-07-28 14:44:50 -0700823 chan->last_completion = phys_complete;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700824 if (cookie != 0)
Dan Williamsdcbc8532009-07-28 14:44:50 -0700825 chan->completed_cookie = cookie;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700826
Dan Williamsdcbc8532009-07-28 14:44:50 -0700827 spin_unlock_bh(&chan->cleanup_lock);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700828}
829
Dan Williamsbc3c7022009-07-28 14:33:42 -0700830static enum dma_status
Dan Williams5cbafa62009-08-26 13:01:44 -0700831ioat1_dma_is_complete(struct dma_chan *c, dma_cookie_t cookie,
832 dma_cookie_t *done, dma_cookie_t *used)
Chris Leech0bbd5f42006-05-23 17:35:34 -0700833{
Dan Williamsdcbc8532009-07-28 14:44:50 -0700834 struct ioat_dma_chan *ioat = to_ioat_chan(c);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700835
Dan Williams5cbafa62009-08-26 13:01:44 -0700836 if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS)
837 return DMA_SUCCESS;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700838
Dan Williams5cbafa62009-08-26 13:01:44 -0700839 ioat1_cleanup(ioat);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700840
Dan Williams5cbafa62009-08-26 13:01:44 -0700841 return ioat_is_complete(c, cookie, done, used);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700842}
843
Dan Williams5cbafa62009-08-26 13:01:44 -0700844static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat)
Chris Leech0bbd5f42006-05-23 17:35:34 -0700845{
Dan Williamsdcbc8532009-07-28 14:44:50 -0700846 struct ioat_chan_common *chan = &ioat->base;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700847 struct ioat_desc_sw *desc;
Dan Williamsc7984f42009-07-28 14:44:04 -0700848 struct ioat_dma_descriptor *hw;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700849
Dan Williamsdcbc8532009-07-28 14:44:50 -0700850 spin_lock_bh(&ioat->desc_lock);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700851
Dan Williams5cbafa62009-08-26 13:01:44 -0700852 desc = ioat1_dma_get_next_descriptor(ioat);
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -0700853
854 if (!desc) {
Dan Williamsdcbc8532009-07-28 14:44:50 -0700855 dev_err(to_dev(chan),
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -0700856 "Unable to start null desc - get next desc failed\n");
Dan Williamsdcbc8532009-07-28 14:44:50 -0700857 spin_unlock_bh(&ioat->desc_lock);
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -0700858 return;
859 }
860
Dan Williamsc7984f42009-07-28 14:44:04 -0700861 hw = desc->hw;
862 hw->ctl = 0;
863 hw->ctl_f.null = 1;
864 hw->ctl_f.int_en = 1;
865 hw->ctl_f.compl_write = 1;
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -0700866 /* set size to non-zero value (channel returns error when size is 0) */
Dan Williamsc7984f42009-07-28 14:44:04 -0700867 hw->size = NULL_DESC_BUFFER_SIZE;
868 hw->src_addr = 0;
869 hw->dst_addr = 0;
Dan Williamsbc3c7022009-07-28 14:33:42 -0700870 async_tx_ack(&desc->txd);
Dan Williams5cbafa62009-08-26 13:01:44 -0700871 hw->next = 0;
872 list_add_tail(&desc->node, &ioat->used_desc);
Dan Williams6df91832009-09-08 12:00:55 -0700873 dump_desc_dbg(ioat, desc);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700874
Dan Williams5cbafa62009-08-26 13:01:44 -0700875 writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF,
876 chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);
877 writel(((u64) desc->txd.phys) >> 32,
878 chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH);
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800879
Dan Williams5cbafa62009-08-26 13:01:44 -0700880 writeb(IOAT_CHANCMD_START, chan->reg_base
881 + IOAT_CHANCMD_OFFSET(chan->device->version));
Dan Williamsdcbc8532009-07-28 14:44:50 -0700882 spin_unlock_bh(&ioat->desc_lock);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700883}
884
885/*
886 * Perform a IOAT transaction to verify the HW works.
887 */
888#define IOAT_TEST_SIZE 2000
889
Shannon Nelson95218432007-10-18 03:07:15 -0700890static void ioat_dma_test_callback(void *dma_async_param)
891{
Dan Williamsb9bdcbb2009-01-06 11:38:22 -0700892 struct completion *cmp = dma_async_param;
893
894 complete(cmp);
Shannon Nelson95218432007-10-18 03:07:15 -0700895}
896
Shannon Nelson3e037452007-10-16 01:27:40 -0700897/**
898 * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
899 * @device: device to be tested
900 */
901static int ioat_dma_self_test(struct ioatdma_device *device)
Chris Leech0bbd5f42006-05-23 17:35:34 -0700902{
903 int i;
904 u8 *src;
905 u8 *dest;
Dan Williamsbc3c7022009-07-28 14:33:42 -0700906 struct dma_device *dma = &device->common;
907 struct device *dev = &device->pdev->dev;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700908 struct dma_chan *dma_chan;
Shannon Nelson711924b2007-12-17 16:20:08 -0800909 struct dma_async_tx_descriptor *tx;
Dan Williams00367312008-02-02 19:49:57 -0700910 dma_addr_t dma_dest, dma_src;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700911 dma_cookie_t cookie;
912 int err = 0;
Dan Williamsb9bdcbb2009-01-06 11:38:22 -0700913 struct completion cmp;
Dan Williams0c33e1c2009-03-02 13:31:35 -0700914 unsigned long tmo;
Maciej Sosnowski4f005db2009-04-23 12:31:51 +0200915 unsigned long flags;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700916
Christoph Lametere94b1762006-12-06 20:33:17 -0800917 src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700918 if (!src)
919 return -ENOMEM;
Christoph Lametere94b1762006-12-06 20:33:17 -0800920 dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700921 if (!dest) {
922 kfree(src);
923 return -ENOMEM;
924 }
925
926 /* Fill in src buffer */
927 for (i = 0; i < IOAT_TEST_SIZE; i++)
928 src[i] = (u8)i;
929
930 /* Start copy, using first DMA channel */
Dan Williamsbc3c7022009-07-28 14:33:42 -0700931 dma_chan = container_of(dma->channels.next, struct dma_chan,
Shannon Nelson43d6e362007-10-16 01:27:39 -0700932 device_node);
Dan Williamsbc3c7022009-07-28 14:33:42 -0700933 if (dma->device_alloc_chan_resources(dma_chan) < 1) {
934 dev_err(dev, "selftest cannot allocate chan resource\n");
Chris Leech0bbd5f42006-05-23 17:35:34 -0700935 err = -ENODEV;
936 goto out;
937 }
938
Dan Williamsbc3c7022009-07-28 14:33:42 -0700939 dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
940 dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
Dan Williamsa6a39ca2009-07-28 14:44:05 -0700941 flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_COMPL_DEST_UNMAP_SINGLE |
942 DMA_PREP_INTERRUPT;
Dan Williams00367312008-02-02 19:49:57 -0700943 tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
Maciej Sosnowski4f005db2009-04-23 12:31:51 +0200944 IOAT_TEST_SIZE, flags);
Shannon Nelson5149fd02007-10-18 03:07:13 -0700945 if (!tx) {
Dan Williamsbc3c7022009-07-28 14:33:42 -0700946 dev_err(dev, "Self-test prep failed, disabling\n");
Shannon Nelson5149fd02007-10-18 03:07:13 -0700947 err = -ENODEV;
948 goto free_resources;
949 }
950
Dan Williams7405f742007-01-02 11:10:43 -0700951 async_tx_ack(tx);
Dan Williamsb9bdcbb2009-01-06 11:38:22 -0700952 init_completion(&cmp);
Shannon Nelson95218432007-10-18 03:07:15 -0700953 tx->callback = ioat_dma_test_callback;
Dan Williamsb9bdcbb2009-01-06 11:38:22 -0700954 tx->callback_param = &cmp;
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800955 cookie = tx->tx_submit(tx);
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700956 if (cookie < 0) {
Dan Williamsbc3c7022009-07-28 14:33:42 -0700957 dev_err(dev, "Self-test setup failed, disabling\n");
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700958 err = -ENODEV;
959 goto free_resources;
960 }
Dan Williamsbc3c7022009-07-28 14:33:42 -0700961 dma->device_issue_pending(dma_chan);
Dan Williams532d3b12008-12-03 17:16:55 -0700962
Dan Williams0c33e1c2009-03-02 13:31:35 -0700963 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
Chris Leech0bbd5f42006-05-23 17:35:34 -0700964
Dan Williams0c33e1c2009-03-02 13:31:35 -0700965 if (tmo == 0 ||
Dan Williamsbc3c7022009-07-28 14:33:42 -0700966 dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL)
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800967 != DMA_SUCCESS) {
Dan Williamsbc3c7022009-07-28 14:33:42 -0700968 dev_err(dev, "Self-test copy timed out, disabling\n");
Chris Leech0bbd5f42006-05-23 17:35:34 -0700969 err = -ENODEV;
970 goto free_resources;
971 }
972 if (memcmp(src, dest, IOAT_TEST_SIZE)) {
Dan Williamsbc3c7022009-07-28 14:33:42 -0700973 dev_err(dev, "Self-test copy failed compare, disabling\n");
Chris Leech0bbd5f42006-05-23 17:35:34 -0700974 err = -ENODEV;
975 goto free_resources;
976 }
977
978free_resources:
Dan Williamsbc3c7022009-07-28 14:33:42 -0700979 dma->device_free_chan_resources(dma_chan);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700980out:
981 kfree(src);
982 kfree(dest);
983 return err;
984}
985
Shannon Nelson3e037452007-10-16 01:27:40 -0700986static char ioat_interrupt_style[32] = "msix";
987module_param_string(ioat_interrupt_style, ioat_interrupt_style,
988 sizeof(ioat_interrupt_style), 0644);
989MODULE_PARM_DESC(ioat_interrupt_style,
990 "set ioat interrupt style: msix (default), "
991 "msix-single-vector, msi, intx)");
992
993/**
994 * ioat_dma_setup_interrupts - setup interrupt handler
995 * @device: ioat device
996 */
997static int ioat_dma_setup_interrupts(struct ioatdma_device *device)
998{
Dan Williamsdcbc8532009-07-28 14:44:50 -0700999 struct ioat_chan_common *chan;
Dan Williamse6c0b692009-09-08 17:29:44 -07001000 struct pci_dev *pdev = device->pdev;
1001 struct device *dev = &pdev->dev;
1002 struct msix_entry *msix;
1003 int i, j, msixcnt;
1004 int err = -EINVAL;
Shannon Nelson3e037452007-10-16 01:27:40 -07001005 u8 intrctrl = 0;
1006
1007 if (!strcmp(ioat_interrupt_style, "msix"))
1008 goto msix;
1009 if (!strcmp(ioat_interrupt_style, "msix-single-vector"))
1010 goto msix_single_vector;
1011 if (!strcmp(ioat_interrupt_style, "msi"))
1012 goto msi;
1013 if (!strcmp(ioat_interrupt_style, "intx"))
1014 goto intx;
Dan Williamse6c0b692009-09-08 17:29:44 -07001015 dev_err(dev, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style);
Shannon Nelson5149fd02007-10-18 03:07:13 -07001016 goto err_no_irq;
Shannon Nelson3e037452007-10-16 01:27:40 -07001017
1018msix:
1019 /* The number of MSI-X vectors should equal the number of channels */
1020 msixcnt = device->common.chancnt;
1021 for (i = 0; i < msixcnt; i++)
1022 device->msix_entries[i].entry = i;
1023
Dan Williamse6c0b692009-09-08 17:29:44 -07001024 err = pci_enable_msix(pdev, device->msix_entries, msixcnt);
Shannon Nelson3e037452007-10-16 01:27:40 -07001025 if (err < 0)
1026 goto msi;
1027 if (err > 0)
1028 goto msix_single_vector;
1029
1030 for (i = 0; i < msixcnt; i++) {
Dan Williamse6c0b692009-09-08 17:29:44 -07001031 msix = &device->msix_entries[i];
Dan Williamsdcbc8532009-07-28 14:44:50 -07001032 chan = ioat_chan_by_index(device, i);
Dan Williamse6c0b692009-09-08 17:29:44 -07001033 err = devm_request_irq(dev, msix->vector,
1034 ioat_dma_do_interrupt_msix, 0,
Dan Williamsdcbc8532009-07-28 14:44:50 -07001035 "ioat-msix", chan);
Shannon Nelson3e037452007-10-16 01:27:40 -07001036 if (err) {
1037 for (j = 0; j < i; j++) {
Dan Williamse6c0b692009-09-08 17:29:44 -07001038 msix = &device->msix_entries[j];
Dan Williamsdcbc8532009-07-28 14:44:50 -07001039 chan = ioat_chan_by_index(device, j);
1040 devm_free_irq(dev, msix->vector, chan);
Shannon Nelson3e037452007-10-16 01:27:40 -07001041 }
1042 goto msix_single_vector;
1043 }
1044 }
1045 intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
Shannon Nelson3e037452007-10-16 01:27:40 -07001046 goto done;
1047
1048msix_single_vector:
Dan Williamse6c0b692009-09-08 17:29:44 -07001049 msix = &device->msix_entries[0];
1050 msix->entry = 0;
1051 err = pci_enable_msix(pdev, device->msix_entries, 1);
Shannon Nelson3e037452007-10-16 01:27:40 -07001052 if (err)
1053 goto msi;
1054
Dan Williamse6c0b692009-09-08 17:29:44 -07001055 err = devm_request_irq(dev, msix->vector, ioat_dma_do_interrupt, 0,
1056 "ioat-msix", device);
Shannon Nelson3e037452007-10-16 01:27:40 -07001057 if (err) {
Dan Williamse6c0b692009-09-08 17:29:44 -07001058 pci_disable_msix(pdev);
Shannon Nelson3e037452007-10-16 01:27:40 -07001059 goto msi;
1060 }
Shannon Nelson3e037452007-10-16 01:27:40 -07001061 goto done;
1062
1063msi:
Dan Williamse6c0b692009-09-08 17:29:44 -07001064 err = pci_enable_msi(pdev);
Shannon Nelson3e037452007-10-16 01:27:40 -07001065 if (err)
1066 goto intx;
1067
Dan Williamse6c0b692009-09-08 17:29:44 -07001068 err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0,
1069 "ioat-msi", device);
Shannon Nelson3e037452007-10-16 01:27:40 -07001070 if (err) {
Dan Williamse6c0b692009-09-08 17:29:44 -07001071 pci_disable_msi(pdev);
Shannon Nelson3e037452007-10-16 01:27:40 -07001072 goto intx;
1073 }
Shannon Nelson3e037452007-10-16 01:27:40 -07001074 goto done;
1075
1076intx:
Dan Williamse6c0b692009-09-08 17:29:44 -07001077 err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt,
1078 IRQF_SHARED, "ioat-intx", device);
Shannon Nelson3e037452007-10-16 01:27:40 -07001079 if (err)
1080 goto err_no_irq;
Shannon Nelson3e037452007-10-16 01:27:40 -07001081
1082done:
Dan Williamsf2427e22009-07-28 14:42:38 -07001083 if (device->intr_quirk)
1084 device->intr_quirk(device);
Shannon Nelson3e037452007-10-16 01:27:40 -07001085 intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN;
1086 writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET);
1087 return 0;
1088
1089err_no_irq:
1090 /* Disable all interrupt generation */
1091 writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
Dan Williamse6c0b692009-09-08 17:29:44 -07001092 dev_err(dev, "no usable interrupts\n");
1093 return err;
Shannon Nelson3e037452007-10-16 01:27:40 -07001094}
1095
Dan Williamse6c0b692009-09-08 17:29:44 -07001096static void ioat_disable_interrupts(struct ioatdma_device *device)
Shannon Nelson3e037452007-10-16 01:27:40 -07001097{
Shannon Nelson3e037452007-10-16 01:27:40 -07001098 /* Disable all interrupt generation */
1099 writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
Shannon Nelson3e037452007-10-16 01:27:40 -07001100}
1101
Dan Williams5cbafa62009-08-26 13:01:44 -07001102int ioat_probe(struct ioatdma_device *device)
Chris Leech0bbd5f42006-05-23 17:35:34 -07001103{
Dan Williamsf2427e22009-07-28 14:42:38 -07001104 int err = -ENODEV;
1105 struct dma_device *dma = &device->common;
1106 struct pci_dev *pdev = device->pdev;
Dan Williamse6c0b692009-09-08 17:29:44 -07001107 struct device *dev = &pdev->dev;
Chris Leech0bbd5f42006-05-23 17:35:34 -07001108
1109 /* DMA coherent memory pool for DMA descriptor allocations */
1110 device->dma_pool = pci_pool_create("dma_desc_pool", pdev,
Shannon Nelson8ab89562007-10-16 01:27:39 -07001111 sizeof(struct ioat_dma_descriptor),
1112 64, 0);
Chris Leech0bbd5f42006-05-23 17:35:34 -07001113 if (!device->dma_pool) {
1114 err = -ENOMEM;
1115 goto err_dma_pool;
1116 }
1117
Shannon Nelson43d6e362007-10-16 01:27:39 -07001118 device->completion_pool = pci_pool_create("completion_pool", pdev,
1119 sizeof(u64), SMP_CACHE_BYTES,
1120 SMP_CACHE_BYTES);
Dan Williams5cbafa62009-08-26 13:01:44 -07001121
Chris Leech0bbd5f42006-05-23 17:35:34 -07001122 if (!device->completion_pool) {
1123 err = -ENOMEM;
1124 goto err_completion_pool;
1125 }
1126
Dan Williams5cbafa62009-08-26 13:01:44 -07001127 device->enumerate_channels(device);
Chris Leech0bbd5f42006-05-23 17:35:34 -07001128
Dan Williamsf2427e22009-07-28 14:42:38 -07001129 dma_cap_set(DMA_MEMCPY, dma->cap_mask);
Dan Williamsf2427e22009-07-28 14:42:38 -07001130 dma->dev = &pdev->dev;
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001131
Dan Williamse6c0b692009-09-08 17:29:44 -07001132 dev_err(dev, "Intel(R) I/OAT DMA Engine found,"
Shannon Nelson5149fd02007-10-18 03:07:13 -07001133 " %d channels, device version 0x%02x, driver version %s\n",
Dan Williamsbc3c7022009-07-28 14:33:42 -07001134 dma->chancnt, device->version, IOAT_DMA_VERSION);
Shannon Nelson8ab89562007-10-16 01:27:39 -07001135
Dan Williamsbc3c7022009-07-28 14:33:42 -07001136 if (!dma->chancnt) {
Dan Williamse6c0b692009-09-08 17:29:44 -07001137 dev_err(dev, "Intel(R) I/OAT DMA Engine problem found: "
Maciej Sosnowski8b794b12009-02-26 11:04:54 +01001138 "zero channels detected\n");
1139 goto err_setup_interrupts;
1140 }
1141
Shannon Nelson3e037452007-10-16 01:27:40 -07001142 err = ioat_dma_setup_interrupts(device);
Shannon Nelson8ab89562007-10-16 01:27:39 -07001143 if (err)
Shannon Nelson3e037452007-10-16 01:27:40 -07001144 goto err_setup_interrupts;
Shannon Nelson8ab89562007-10-16 01:27:39 -07001145
Shannon Nelson3e037452007-10-16 01:27:40 -07001146 err = ioat_dma_self_test(device);
Chris Leech0bbd5f42006-05-23 17:35:34 -07001147 if (err)
1148 goto err_self_test;
1149
Dan Williamsf2427e22009-07-28 14:42:38 -07001150 return 0;
Chris Leech0bbd5f42006-05-23 17:35:34 -07001151
1152err_self_test:
Dan Williamse6c0b692009-09-08 17:29:44 -07001153 ioat_disable_interrupts(device);
Shannon Nelson3e037452007-10-16 01:27:40 -07001154err_setup_interrupts:
Chris Leech0bbd5f42006-05-23 17:35:34 -07001155 pci_pool_destroy(device->completion_pool);
1156err_completion_pool:
1157 pci_pool_destroy(device->dma_pool);
1158err_dma_pool:
Dan Williamsf2427e22009-07-28 14:42:38 -07001159 return err;
1160}
1161
Dan Williams5cbafa62009-08-26 13:01:44 -07001162int ioat_register(struct ioatdma_device *device)
Dan Williamsf2427e22009-07-28 14:42:38 -07001163{
1164 int err = dma_async_device_register(&device->common);
1165
1166 if (err) {
1167 ioat_disable_interrupts(device);
1168 pci_pool_destroy(device->completion_pool);
1169 pci_pool_destroy(device->dma_pool);
1170 }
1171
1172 return err;
1173}
1174
1175/* ioat1_intr_quirk - fix up dma ctrl register to enable / disable msi */
1176static void ioat1_intr_quirk(struct ioatdma_device *device)
1177{
1178 struct pci_dev *pdev = device->pdev;
1179 u32 dmactrl;
1180
1181 pci_read_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, &dmactrl);
1182 if (pdev->msi_enabled)
1183 dmactrl |= IOAT_PCI_DMACTRL_MSI_EN;
1184 else
1185 dmactrl &= ~IOAT_PCI_DMACTRL_MSI_EN;
1186 pci_write_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, dmactrl);
1187}
1188
1189int ioat1_dma_probe(struct ioatdma_device *device, int dca)
1190{
1191 struct pci_dev *pdev = device->pdev;
1192 struct dma_device *dma;
1193 int err;
1194
1195 device->intr_quirk = ioat1_intr_quirk;
Dan Williams5cbafa62009-08-26 13:01:44 -07001196 device->enumerate_channels = ioat1_enumerate_channels;
Dan Williamsf2427e22009-07-28 14:42:38 -07001197 dma = &device->common;
1198 dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy;
1199 dma->device_issue_pending = ioat1_dma_memcpy_issue_pending;
Dan Williams5cbafa62009-08-26 13:01:44 -07001200 dma->device_alloc_chan_resources = ioat1_dma_alloc_chan_resources;
1201 dma->device_free_chan_resources = ioat1_dma_free_chan_resources;
1202 dma->device_is_tx_complete = ioat1_dma_is_complete;
Dan Williamsf2427e22009-07-28 14:42:38 -07001203
1204 err = ioat_probe(device);
1205 if (err)
1206 return err;
1207 ioat_set_tcp_copy_break(4096);
1208 err = ioat_register(device);
1209 if (err)
1210 return err;
1211 if (dca)
1212 device->dca = ioat_dca_init(pdev, device->reg_base);
1213
Dan Williams5cbafa62009-08-26 13:01:44 -07001214 INIT_DELAYED_WORK(&device->work, ioat1_chan_watchdog);
Dan Williamsf2427e22009-07-28 14:42:38 -07001215 schedule_delayed_work(&device->work, WATCHDOG_DELAY);
1216
1217 return err;
1218}
1219
Shannon Nelson8ab89562007-10-16 01:27:39 -07001220void ioat_dma_remove(struct ioatdma_device *device)
Dan Aloni428ed602007-03-08 09:57:36 -08001221{
Dan Williamsbc3c7022009-07-28 14:33:42 -07001222 struct dma_device *dma = &device->common;
Chris Leech0bbd5f42006-05-23 17:35:34 -07001223
Maciej Sosnowski2b8a6bf2009-02-26 11:05:07 +01001224 if (device->version != IOAT_VER_3_0)
1225 cancel_delayed_work(&device->work);
1226
Dan Williamse6c0b692009-09-08 17:29:44 -07001227 ioat_disable_interrupts(device);
Shannon Nelson8ab89562007-10-16 01:27:39 -07001228
Dan Williamsbc3c7022009-07-28 14:33:42 -07001229 dma_async_device_unregister(dma);
Shannon Nelsondfe22992007-10-18 03:07:13 -07001230
Chris Leech0bbd5f42006-05-23 17:35:34 -07001231 pci_pool_destroy(device->dma_pool);
1232 pci_pool_destroy(device->completion_pool);
Shannon Nelson8ab89562007-10-16 01:27:39 -07001233
Dan Williamsdcbc8532009-07-28 14:44:50 -07001234 INIT_LIST_HEAD(&dma->channels);
Chris Leech0bbd5f42006-05-23 17:35:34 -07001235}