blob: 0f43b265eee6f5566c1020fe8fff57353c2befc8 [file] [log] [blame]
Yinghai Lu5aeecaf2008-08-19 20:49:59 -07001#include <linux/interrupt.h>
Suresh Siddhaad3ad3f2008-07-10 11:16:40 -07002#include <linux/dmar.h>
Suresh Siddha2ae21012008-07-10 11:16:43 -07003#include <linux/spinlock.h>
4#include <linux/jiffies.h>
5#include <linux/pci.h>
Suresh Siddhab6fcb332008-07-10 11:16:44 -07006#include <linux/irq.h>
Suresh Siddhaad3ad3f2008-07-10 11:16:40 -07007#include <asm/io_apic.h>
8#include "intel-iommu.h"
9#include "intr_remapping.h"
10
11static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
12static int ir_ioapic_num;
Suresh Siddha2ae21012008-07-10 11:16:43 -070013int intr_remapping_enabled;
14
Yinghai Lu5aeecaf2008-08-19 20:49:59 -070015struct irq_2_iommu {
Suresh Siddhab6fcb332008-07-10 11:16:44 -070016 struct intel_iommu *iommu;
17 u16 irte_index;
18 u16 sub_handle;
19 u8 irte_mask;
Yinghai Lu5aeecaf2008-08-19 20:49:59 -070020};
21
Yinghai Lue420dfb2008-08-19 20:50:21 -070022#ifdef CONFIG_HAVE_DYN_ARRAY
23static struct irq_2_iommu *irq_2_iommuX;
24DEFINE_DYN_ARRAY(irq_2_iommuX, sizeof(struct irq_2_iommu), nr_irqs, PAGE_SIZE, NULL);
Yinghai Lu5aeecaf2008-08-19 20:49:59 -070025#else
Yinghai Lue420dfb2008-08-19 20:50:21 -070026static struct irq_2_iommu irq_2_iommuX[NR_IRQS];
27#endif
28
29static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
30{
31 if (irq < nr_irqs)
32 return &irq_2_iommuX[irq];
33
34 return NULL;
35}
36static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
37{
38 return irq_2_iommu(irq);
39}
Suresh Siddhab6fcb332008-07-10 11:16:44 -070040
41static DEFINE_SPINLOCK(irq_2_ir_lock);
42
Yinghai Lue420dfb2008-08-19 20:50:21 -070043static struct irq_2_iommu *valid_irq_2_iommu(unsigned int irq)
44{
45 struct irq_2_iommu *irq_iommu;
46
47 irq_iommu = irq_2_iommu(irq);
48
49 if (!irq_iommu)
50 return NULL;
51
52 if (!irq_iommu->iommu)
53 return NULL;
54
55 return irq_iommu;
56}
57
Suresh Siddhab6fcb332008-07-10 11:16:44 -070058int irq_remapped(int irq)
59{
Yinghai Lue420dfb2008-08-19 20:50:21 -070060 return valid_irq_2_iommu(irq) != NULL;
Suresh Siddhab6fcb332008-07-10 11:16:44 -070061}
62
63int get_irte(int irq, struct irte *entry)
64{
65 int index;
Yinghai Lue420dfb2008-08-19 20:50:21 -070066 struct irq_2_iommu *irq_iommu;
Suresh Siddhab6fcb332008-07-10 11:16:44 -070067
Yinghai Lue420dfb2008-08-19 20:50:21 -070068 if (!entry)
Suresh Siddhab6fcb332008-07-10 11:16:44 -070069 return -1;
70
71 spin_lock(&irq_2_ir_lock);
Yinghai Lue420dfb2008-08-19 20:50:21 -070072 irq_iommu = valid_irq_2_iommu(irq);
73 if (!irq_iommu) {
Suresh Siddhab6fcb332008-07-10 11:16:44 -070074 spin_unlock(&irq_2_ir_lock);
75 return -1;
76 }
77
Yinghai Lue420dfb2008-08-19 20:50:21 -070078 index = irq_iommu->irte_index + irq_iommu->sub_handle;
79 *entry = *(irq_iommu->iommu->ir_table->base + index);
Suresh Siddhab6fcb332008-07-10 11:16:44 -070080
81 spin_unlock(&irq_2_ir_lock);
82 return 0;
83}
84
85int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
86{
87 struct ir_table *table = iommu->ir_table;
Yinghai Lue420dfb2008-08-19 20:50:21 -070088 struct irq_2_iommu *irq_iommu;
Suresh Siddhab6fcb332008-07-10 11:16:44 -070089 u16 index, start_index;
90 unsigned int mask = 0;
91 int i;
92
93 if (!count)
94 return -1;
95
Yinghai Lue420dfb2008-08-19 20:50:21 -070096 /* protect irq_2_iommu_alloc later */
97 if (irq >= nr_irqs)
98 return -1;
Yinghai Lue420dfb2008-08-19 20:50:21 -070099
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700100 /*
101 * start the IRTE search from index 0.
102 */
103 index = start_index = 0;
104
105 if (count > 1) {
106 count = __roundup_pow_of_two(count);
107 mask = ilog2(count);
108 }
109
110 if (mask > ecap_max_handle_mask(iommu->ecap)) {
111 printk(KERN_ERR
112 "Requested mask %x exceeds the max invalidation handle"
113 " mask value %Lx\n", mask,
114 ecap_max_handle_mask(iommu->ecap));
115 return -1;
116 }
117
118 spin_lock(&irq_2_ir_lock);
119 do {
120 for (i = index; i < index + count; i++)
121 if (table->base[i].present)
122 break;
123 /* empty index found */
124 if (i == index + count)
125 break;
126
127 index = (index + count) % INTR_REMAP_TABLE_ENTRIES;
128
129 if (index == start_index) {
130 spin_unlock(&irq_2_ir_lock);
131 printk(KERN_ERR "can't allocate an IRTE\n");
132 return -1;
133 }
134 } while (1);
135
136 for (i = index; i < index + count; i++)
137 table->base[i].present = 1;
138
Yinghai Lue420dfb2008-08-19 20:50:21 -0700139 irq_iommu = irq_2_iommu_alloc(irq);
140 irq_iommu->iommu = iommu;
141 irq_iommu->irte_index = index;
142 irq_iommu->sub_handle = 0;
143 irq_iommu->irte_mask = mask;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700144
145 spin_unlock(&irq_2_ir_lock);
146
147 return index;
148}
149
150static void qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
151{
152 struct qi_desc desc;
153
154 desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask)
155 | QI_IEC_SELECTIVE;
156 desc.high = 0;
157
158 qi_submit_sync(&desc, iommu);
159}
160
161int map_irq_to_irte_handle(int irq, u16 *sub_handle)
162{
163 int index;
Yinghai Lue420dfb2008-08-19 20:50:21 -0700164 struct irq_2_iommu *irq_iommu;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700165
166 spin_lock(&irq_2_ir_lock);
Yinghai Lue420dfb2008-08-19 20:50:21 -0700167 irq_iommu = valid_irq_2_iommu(irq);
168 if (!irq_iommu) {
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700169 spin_unlock(&irq_2_ir_lock);
170 return -1;
171 }
172
Yinghai Lue420dfb2008-08-19 20:50:21 -0700173 *sub_handle = irq_iommu->sub_handle;
174 index = irq_iommu->irte_index;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700175 spin_unlock(&irq_2_ir_lock);
176 return index;
177}
178
179int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
180{
Yinghai Lue420dfb2008-08-19 20:50:21 -0700181 struct irq_2_iommu *irq_iommu;
182
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700183 spin_lock(&irq_2_ir_lock);
Suresh Siddha7ddfb652008-08-20 17:22:51 -0700184
185 irq_iommu = irq_2_iommu_alloc(irq);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700186
Yinghai Lue420dfb2008-08-19 20:50:21 -0700187 irq_iommu->iommu = iommu;
188 irq_iommu->irte_index = index;
189 irq_iommu->sub_handle = subhandle;
190 irq_iommu->irte_mask = 0;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700191
192 spin_unlock(&irq_2_ir_lock);
193
194 return 0;
195}
196
197int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index)
198{
Yinghai Lue420dfb2008-08-19 20:50:21 -0700199 struct irq_2_iommu *irq_iommu;
200
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700201 spin_lock(&irq_2_ir_lock);
Yinghai Lue420dfb2008-08-19 20:50:21 -0700202 irq_iommu = valid_irq_2_iommu(irq);
203 if (!irq_iommu) {
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700204 spin_unlock(&irq_2_ir_lock);
205 return -1;
206 }
207
Yinghai Lue420dfb2008-08-19 20:50:21 -0700208 irq_iommu->iommu = NULL;
209 irq_iommu->irte_index = 0;
210 irq_iommu->sub_handle = 0;
211 irq_2_iommu(irq)->irte_mask = 0;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700212
213 spin_unlock(&irq_2_ir_lock);
214
215 return 0;
216}
217
218int modify_irte(int irq, struct irte *irte_modified)
219{
220 int index;
221 struct irte *irte;
222 struct intel_iommu *iommu;
Yinghai Lue420dfb2008-08-19 20:50:21 -0700223 struct irq_2_iommu *irq_iommu;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700224
225 spin_lock(&irq_2_ir_lock);
Yinghai Lue420dfb2008-08-19 20:50:21 -0700226 irq_iommu = valid_irq_2_iommu(irq);
227 if (!irq_iommu) {
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700228 spin_unlock(&irq_2_ir_lock);
229 return -1;
230 }
231
Yinghai Lue420dfb2008-08-19 20:50:21 -0700232 iommu = irq_iommu->iommu;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700233
Yinghai Lue420dfb2008-08-19 20:50:21 -0700234 index = irq_iommu->irte_index + irq_iommu->sub_handle;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700235 irte = &iommu->ir_table->base[index];
236
237 set_64bit((unsigned long *)irte, irte_modified->low | (1 << 1));
238 __iommu_flush_cache(iommu, irte, sizeof(*irte));
239
240 qi_flush_iec(iommu, index, 0);
241
242 spin_unlock(&irq_2_ir_lock);
243 return 0;
244}
245
246int flush_irte(int irq)
247{
248 int index;
249 struct intel_iommu *iommu;
Yinghai Lue420dfb2008-08-19 20:50:21 -0700250 struct irq_2_iommu *irq_iommu;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700251
252 spin_lock(&irq_2_ir_lock);
Yinghai Lue420dfb2008-08-19 20:50:21 -0700253 irq_iommu = valid_irq_2_iommu(irq);
254 if (!irq_iommu) {
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700255 spin_unlock(&irq_2_ir_lock);
256 return -1;
257 }
258
Yinghai Lue420dfb2008-08-19 20:50:21 -0700259 iommu = irq_iommu->iommu;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700260
Yinghai Lue420dfb2008-08-19 20:50:21 -0700261 index = irq_iommu->irte_index + irq_iommu->sub_handle;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700262
Yinghai Lue420dfb2008-08-19 20:50:21 -0700263 qi_flush_iec(iommu, index, irq_iommu->irte_mask);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700264 spin_unlock(&irq_2_ir_lock);
265
266 return 0;
267}
268
Suresh Siddha89027d32008-07-10 11:16:56 -0700269struct intel_iommu *map_ioapic_to_ir(int apic)
270{
271 int i;
272
273 for (i = 0; i < MAX_IO_APICS; i++)
274 if (ir_ioapic[i].id == apic)
275 return ir_ioapic[i].iommu;
276 return NULL;
277}
278
Suresh Siddha75c46fa2008-07-10 11:16:57 -0700279struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
280{
281 struct dmar_drhd_unit *drhd;
282
283 drhd = dmar_find_matched_drhd_unit(dev);
284 if (!drhd)
285 return NULL;
286
287 return drhd->iommu;
288}
289
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700290int free_irte(int irq)
291{
292 int index, i;
293 struct irte *irte;
294 struct intel_iommu *iommu;
Yinghai Lue420dfb2008-08-19 20:50:21 -0700295 struct irq_2_iommu *irq_iommu;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700296
297 spin_lock(&irq_2_ir_lock);
Yinghai Lue420dfb2008-08-19 20:50:21 -0700298 irq_iommu = valid_irq_2_iommu(irq);
299 if (!irq_iommu) {
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700300 spin_unlock(&irq_2_ir_lock);
301 return -1;
302 }
303
Yinghai Lue420dfb2008-08-19 20:50:21 -0700304 iommu = irq_iommu->iommu;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700305
Yinghai Lue420dfb2008-08-19 20:50:21 -0700306 index = irq_iommu->irte_index + irq_iommu->sub_handle;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700307 irte = &iommu->ir_table->base[index];
308
Yinghai Lue420dfb2008-08-19 20:50:21 -0700309 if (!irq_iommu->sub_handle) {
310 for (i = 0; i < (1 << irq_iommu->irte_mask); i++)
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700311 set_64bit((unsigned long *)irte, 0);
Yinghai Lue420dfb2008-08-19 20:50:21 -0700312 qi_flush_iec(iommu, index, irq_iommu->irte_mask);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700313 }
314
Yinghai Lue420dfb2008-08-19 20:50:21 -0700315 irq_iommu->iommu = NULL;
316 irq_iommu->irte_index = 0;
317 irq_iommu->sub_handle = 0;
318 irq_iommu->irte_mask = 0;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700319
320 spin_unlock(&irq_2_ir_lock);
321
322 return 0;
323}
324
Suresh Siddha2ae21012008-07-10 11:16:43 -0700325static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
326{
327 u64 addr;
328 u32 cmd, sts;
329 unsigned long flags;
330
331 addr = virt_to_phys((void *)iommu->ir_table->base);
332
333 spin_lock_irqsave(&iommu->register_lock, flags);
334
335 dmar_writeq(iommu->reg + DMAR_IRTA_REG,
336 (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
337
338 /* Set interrupt-remapping table pointer */
339 cmd = iommu->gcmd | DMA_GCMD_SIRTP;
340 writel(cmd, iommu->reg + DMAR_GCMD_REG);
341
342 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
343 readl, (sts & DMA_GSTS_IRTPS), sts);
344 spin_unlock_irqrestore(&iommu->register_lock, flags);
345
346 /*
347 * global invalidation of interrupt entry cache before enabling
348 * interrupt-remapping.
349 */
350 qi_global_iec(iommu);
351
352 spin_lock_irqsave(&iommu->register_lock, flags);
353
354 /* Enable interrupt-remapping */
355 cmd = iommu->gcmd | DMA_GCMD_IRE;
356 iommu->gcmd |= DMA_GCMD_IRE;
357 writel(cmd, iommu->reg + DMAR_GCMD_REG);
358
359 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
360 readl, (sts & DMA_GSTS_IRES), sts);
361
362 spin_unlock_irqrestore(&iommu->register_lock, flags);
363}
364
365
366static int setup_intr_remapping(struct intel_iommu *iommu, int mode)
367{
368 struct ir_table *ir_table;
369 struct page *pages;
370
371 ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table),
372 GFP_KERNEL);
373
374 if (!iommu->ir_table)
375 return -ENOMEM;
376
377 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, INTR_REMAP_PAGE_ORDER);
378
379 if (!pages) {
380 printk(KERN_ERR "failed to allocate pages of order %d\n",
381 INTR_REMAP_PAGE_ORDER);
382 kfree(iommu->ir_table);
383 return -ENOMEM;
384 }
385
386 ir_table->base = page_address(pages);
387
388 iommu_set_intr_remapping(iommu, mode);
389 return 0;
390}
391
392int __init enable_intr_remapping(int eim)
393{
394 struct dmar_drhd_unit *drhd;
395 int setup = 0;
396
397 /*
398 * check for the Interrupt-remapping support
399 */
400 for_each_drhd_unit(drhd) {
401 struct intel_iommu *iommu = drhd->iommu;
402
403 if (!ecap_ir_support(iommu->ecap))
404 continue;
405
406 if (eim && !ecap_eim_support(iommu->ecap)) {
407 printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, "
408 " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap);
409 return -1;
410 }
411 }
412
413 /*
414 * Enable queued invalidation for all the DRHD's.
415 */
416 for_each_drhd_unit(drhd) {
417 int ret;
418 struct intel_iommu *iommu = drhd->iommu;
419 ret = dmar_enable_qi(iommu);
420
421 if (ret) {
422 printk(KERN_ERR "DRHD %Lx: failed to enable queued, "
423 " invalidation, ecap %Lx, ret %d\n",
424 drhd->reg_base_addr, iommu->ecap, ret);
425 return -1;
426 }
427 }
428
429 /*
430 * Setup Interrupt-remapping for all the DRHD's now.
431 */
432 for_each_drhd_unit(drhd) {
433 struct intel_iommu *iommu = drhd->iommu;
434
435 if (!ecap_ir_support(iommu->ecap))
436 continue;
437
438 if (setup_intr_remapping(iommu, eim))
439 goto error;
440
441 setup = 1;
442 }
443
444 if (!setup)
445 goto error;
446
447 intr_remapping_enabled = 1;
448
449 return 0;
450
451error:
452 /*
453 * handle error condition gracefully here!
454 */
455 return -1;
456}
Suresh Siddhaad3ad3f2008-07-10 11:16:40 -0700457
458static int ir_parse_ioapic_scope(struct acpi_dmar_header *header,
459 struct intel_iommu *iommu)
460{
461 struct acpi_dmar_hardware_unit *drhd;
462 struct acpi_dmar_device_scope *scope;
463 void *start, *end;
464
465 drhd = (struct acpi_dmar_hardware_unit *)header;
466
467 start = (void *)(drhd + 1);
468 end = ((void *)drhd) + header->length;
469
470 while (start < end) {
471 scope = start;
472 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
473 if (ir_ioapic_num == MAX_IO_APICS) {
474 printk(KERN_WARNING "Exceeded Max IO APICS\n");
475 return -1;
476 }
477
478 printk(KERN_INFO "IOAPIC id %d under DRHD base"
479 " 0x%Lx\n", scope->enumeration_id,
480 drhd->address);
481
482 ir_ioapic[ir_ioapic_num].iommu = iommu;
483 ir_ioapic[ir_ioapic_num].id = scope->enumeration_id;
484 ir_ioapic_num++;
485 }
486 start += scope->length;
487 }
488
489 return 0;
490}
491
492/*
493 * Finds the assocaition between IOAPIC's and its Interrupt-remapping
494 * hardware unit.
495 */
496int __init parse_ioapics_under_ir(void)
497{
498 struct dmar_drhd_unit *drhd;
499 int ir_supported = 0;
500
501 for_each_drhd_unit(drhd) {
502 struct intel_iommu *iommu = drhd->iommu;
503
504 if (ecap_ir_support(iommu->ecap)) {
505 if (ir_parse_ioapic_scope(drhd->hdr, iommu))
506 return -1;
507
508 ir_supported = 1;
509 }
510 }
511
512 if (ir_supported && ir_ioapic_num != nr_ioapics) {
513 printk(KERN_WARNING
514 "Not all IO-APIC's listed under remapping hardware\n");
515 return -1;
516 }
517
518 return ir_supported;
519}