blob: 5ecfaf29933ad4634e2124544e3c800b9b309d44 [file] [log] [blame]
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001/*
David Woodhouseea8ea462014-03-05 17:09:32 +00002 * Copyright © 2006-2014 Intel Corporation.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
David Woodhouseea8ea462014-03-05 17:09:32 +000013 * Authors: David Woodhouse <dwmw2@infradead.org>,
14 * Ashok Raj <ashok.raj@intel.com>,
15 * Shaohua Li <shaohua.li@intel.com>,
16 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
17 * Fenghua Yu <fenghua.yu@intel.com>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070018 */
19
20#include <linux/init.h>
21#include <linux/bitmap.h>
mark gross5e0d2a62008-03-04 15:22:08 -080022#include <linux/debugfs.h>
Paul Gortmaker54485c32011-10-29 10:26:25 -040023#include <linux/export.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070024#include <linux/slab.h>
25#include <linux/irq.h>
26#include <linux/interrupt.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070027#include <linux/spinlock.h>
28#include <linux/pci.h>
29#include <linux/dmar.h>
30#include <linux/dma-mapping.h>
31#include <linux/mempool.h>
Jiang Liu75f05562014-02-19 14:07:37 +080032#include <linux/memory.h>
mark gross5e0d2a62008-03-04 15:22:08 -080033#include <linux/timer.h>
Kay, Allen M38717942008-09-09 18:37:29 +030034#include <linux/iova.h>
Joerg Roedel5d450802008-12-03 14:52:32 +010035#include <linux/iommu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030036#include <linux/intel-iommu.h>
Rafael J. Wysocki134fac32011-03-23 22:16:14 +010037#include <linux/syscore_ops.h>
Shane Wang69575d32009-09-01 18:25:07 -070038#include <linux/tboot.h>
Stephen Rothwelladb2fe02009-08-31 15:24:23 +100039#include <linux/dmi.h>
Joerg Roedel5cdede22011-04-04 15:55:18 +020040#include <linux/pci-ats.h>
Tejun Heo0ee332c2011-12-08 10:22:09 -080041#include <linux/memblock.h>
Akinobu Mita36746432014-06-04 16:06:51 -070042#include <linux/dma-contiguous.h>
Suresh Siddha8a8f4222012-03-30 11:47:08 -070043#include <asm/irq_remapping.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070044#include <asm/cacheflush.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090045#include <asm/iommu.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070046
Joerg Roedel078e1ee2012-09-26 12:44:43 +020047#include "irq_remapping.h"
48
Fenghua Yu5b6985c2008-10-16 18:02:32 -070049#define ROOT_SIZE VTD_PAGE_SIZE
50#define CONTEXT_SIZE VTD_PAGE_SIZE
51
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070052#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
David Woodhouse18436af2015-03-25 15:05:47 +000053#define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070054#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
David Woodhousee0fc7e0b2009-09-30 09:12:17 -070055#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070056
57#define IOAPIC_RANGE_START (0xfee00000)
58#define IOAPIC_RANGE_END (0xfeefffff)
59#define IOVA_START_ADDR (0x1000)
60
61#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
62
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070063#define MAX_AGAW_WIDTH 64
Jiang Liu5c645b32014-01-06 14:18:12 +080064#define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070065
David Woodhouse2ebe3152009-09-19 07:34:04 -070066#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
67#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
68
69/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
70 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
71#define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
72 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
73#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070074
Robin Murphy1b722502015-01-12 17:51:15 +000075/* IO virtual address start page frame number */
76#define IOVA_START_PFN (1)
77
Mark McLoughlinf27be032008-11-20 15:49:43 +000078#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
Yang Hongyang284901a2009-04-06 19:01:15 -070079#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
Yang Hongyang6a355282009-04-06 19:01:13 -070080#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
mark gross5e0d2a62008-03-04 15:22:08 -080081
Andrew Mortondf08cdc2010-09-22 13:05:11 -070082/* page table handling */
83#define LEVEL_STRIDE (9)
84#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
85
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +020086/*
87 * This bitmap is used to advertise the page sizes our hardware support
88 * to the IOMMU core, which will then use this information to split
89 * physically contiguous memory regions it is mapping into page sizes
90 * that we support.
91 *
92 * Traditionally the IOMMU core just handed us the mappings directly,
93 * after making sure the size is an order of a 4KiB page and that the
94 * mapping has natural alignment.
95 *
96 * To retain this behavior, we currently advertise that we support
97 * all page sizes that are an order of 4KiB.
98 *
99 * If at some point we'd like to utilize the IOMMU core's new behavior,
100 * we could change this to advertise the real page sizes we support.
101 */
102#define INTEL_IOMMU_PGSIZES (~0xFFFUL)
103
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700104static inline int agaw_to_level(int agaw)
105{
106 return agaw + 2;
107}
108
109static inline int agaw_to_width(int agaw)
110{
Jiang Liu5c645b32014-01-06 14:18:12 +0800111 return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700112}
113
114static inline int width_to_agaw(int width)
115{
Jiang Liu5c645b32014-01-06 14:18:12 +0800116 return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700117}
118
119static inline unsigned int level_to_offset_bits(int level)
120{
121 return (level - 1) * LEVEL_STRIDE;
122}
123
124static inline int pfn_level_offset(unsigned long pfn, int level)
125{
126 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
127}
128
129static inline unsigned long level_mask(int level)
130{
131 return -1UL << level_to_offset_bits(level);
132}
133
134static inline unsigned long level_size(int level)
135{
136 return 1UL << level_to_offset_bits(level);
137}
138
139static inline unsigned long align_to_level(unsigned long pfn, int level)
140{
141 return (pfn + level_size(level) - 1) & level_mask(level);
142}
David Woodhousefd18de52009-05-10 23:57:41 +0100143
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100144static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
145{
Jiang Liu5c645b32014-01-06 14:18:12 +0800146 return 1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100147}
148
David Woodhousedd4e8312009-06-27 16:21:20 +0100149/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
150 are never going to work. */
151static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
152{
153 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
154}
155
156static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
157{
158 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
159}
160static inline unsigned long page_to_dma_pfn(struct page *pg)
161{
162 return mm_to_dma_pfn(page_to_pfn(pg));
163}
164static inline unsigned long virt_to_dma_pfn(void *p)
165{
166 return page_to_dma_pfn(virt_to_page(p));
167}
168
Weidong Hand9630fe2008-12-08 11:06:32 +0800169/* global iommu list, set NULL for ignored DMAR units */
170static struct intel_iommu **g_iommus;
171
David Woodhousee0fc7e0b2009-09-30 09:12:17 -0700172static void __init check_tylersburg_isoch(void);
David Woodhouse9af88142009-02-13 23:18:03 +0000173static int rwbf_quirk;
174
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000175/*
Joseph Cihulab7792602011-05-03 00:08:37 -0700176 * set to 1 to panic kernel if can't successfully enable VT-d
177 * (used when kernel is launched w/ TXT)
178 */
179static int force_on = 0;
180
181/*
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000182 * 0: Present
183 * 1-11: Reserved
184 * 12-63: Context Ptr (12 - (haw-1))
185 * 64-127: Reserved
186 */
187struct root_entry {
David Woodhouse03ecc322015-02-13 14:35:21 +0000188 u64 lo;
189 u64 hi;
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000190};
191#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000192
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000193
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000194/*
195 * low 64 bits:
196 * 0: present
197 * 1: fault processing disable
198 * 2-3: translation type
199 * 12-63: address space root
200 * high 64 bits:
201 * 0-2: address width
202 * 3-6: aval
203 * 8-23: domain id
204 */
205struct context_entry {
206 u64 lo;
207 u64 hi;
208};
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000209
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000210static inline bool context_present(struct context_entry *context)
211{
212 return (context->lo & 1);
213}
214static inline void context_set_present(struct context_entry *context)
215{
216 context->lo |= 1;
217}
218
219static inline void context_set_fault_enable(struct context_entry *context)
220{
221 context->lo &= (((u64)-1) << 2) | 1;
222}
223
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000224static inline void context_set_translation_type(struct context_entry *context,
225 unsigned long value)
226{
227 context->lo &= (((u64)-1) << 4) | 3;
228 context->lo |= (value & 3) << 2;
229}
230
231static inline void context_set_address_root(struct context_entry *context,
232 unsigned long value)
233{
Li, Zhen-Hua1a2262f2014-11-05 15:30:19 +0800234 context->lo &= ~VTD_PAGE_MASK;
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000235 context->lo |= value & VTD_PAGE_MASK;
236}
237
238static inline void context_set_address_width(struct context_entry *context,
239 unsigned long value)
240{
241 context->hi |= value & 7;
242}
243
244static inline void context_set_domain_id(struct context_entry *context,
245 unsigned long value)
246{
247 context->hi |= (value & ((1 << 16) - 1)) << 8;
248}
249
250static inline void context_clear_entry(struct context_entry *context)
251{
252 context->lo = 0;
253 context->hi = 0;
254}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000255
Mark McLoughlin622ba122008-11-20 15:49:46 +0000256/*
257 * 0: readable
258 * 1: writable
259 * 2-6: reserved
260 * 7: super page
Sheng Yang9cf066972009-03-18 15:33:07 +0800261 * 8-10: available
262 * 11: snoop behavior
Mark McLoughlin622ba122008-11-20 15:49:46 +0000263 * 12-63: Host physcial address
264 */
265struct dma_pte {
266 u64 val;
267};
Mark McLoughlin622ba122008-11-20 15:49:46 +0000268
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000269static inline void dma_clear_pte(struct dma_pte *pte)
270{
271 pte->val = 0;
272}
273
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000274static inline u64 dma_pte_addr(struct dma_pte *pte)
275{
David Woodhousec85994e2009-07-01 19:21:24 +0100276#ifdef CONFIG_64BIT
277 return pte->val & VTD_PAGE_MASK;
278#else
279 /* Must have a full atomic 64-bit read */
David Woodhouse1a8bd482010-08-10 01:38:53 +0100280 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
David Woodhousec85994e2009-07-01 19:21:24 +0100281#endif
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000282}
283
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000284static inline bool dma_pte_present(struct dma_pte *pte)
285{
286 return (pte->val & 3) != 0;
287}
Mark McLoughlin622ba122008-11-20 15:49:46 +0000288
Allen Kay4399c8b2011-10-14 12:32:46 -0700289static inline bool dma_pte_superpage(struct dma_pte *pte)
290{
Joerg Roedelc3c75eb2014-07-04 11:19:10 +0200291 return (pte->val & DMA_PTE_LARGE_PAGE);
Allen Kay4399c8b2011-10-14 12:32:46 -0700292}
293
David Woodhouse75e6bf92009-07-02 11:21:16 +0100294static inline int first_pte_in_page(struct dma_pte *pte)
295{
296 return !((unsigned long)pte & ~VTD_PAGE_MASK);
297}
298
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700299/*
300 * This domain is a statically identity mapping domain.
301 * 1. This domain creats a static 1:1 mapping to all usable memory.
302 * 2. It maps to each iommu if successful.
303 * 3. Each iommu mapps to this domain if successful.
304 */
David Woodhouse19943b02009-08-04 16:19:20 +0100305static struct dmar_domain *si_domain;
306static int hw_pass_through = 1;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700307
Weidong Han1ce28fe2008-12-08 16:35:39 +0800308/* domain represents a virtual machine, more than one devices
309 * across iommus may be owned in one domain, e.g. kvm guest.
310 */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800311#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 0)
Weidong Han1ce28fe2008-12-08 16:35:39 +0800312
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700313/* si_domain contains mulitple devices */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800314#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700315
Mark McLoughlin99126f72008-11-20 15:49:47 +0000316struct dmar_domain {
317 int id; /* domain id */
Suresh Siddha4c923d42009-10-02 11:01:24 -0700318 int nid; /* node id */
Jiang Liu78d8e702014-11-09 22:47:57 +0800319 DECLARE_BITMAP(iommu_bmp, DMAR_UNITS_SUPPORTED);
Mike Travis1b198bb2012-03-05 15:05:16 -0800320 /* bitmap of iommus this domain uses*/
Mark McLoughlin99126f72008-11-20 15:49:47 +0000321
Joerg Roedel00a77de2015-03-26 13:43:08 +0100322 struct list_head devices; /* all devices' list */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000323 struct iova_domain iovad; /* iova's that belong to this domain */
324
325 struct dma_pte *pgd; /* virtual address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000326 int gaw; /* max guest address width */
327
328 /* adjusted guest address width, 0 is level 2 30-bit */
329 int agaw;
330
Weidong Han3b5410e2008-12-08 09:17:15 +0800331 int flags; /* flags to find out type of domain */
Weidong Han8e6040972008-12-08 15:49:06 +0800332
333 int iommu_coherency;/* indicate coherency of iommu access */
Sheng Yang58c610b2009-03-18 15:33:05 +0800334 int iommu_snooping; /* indicate snooping control feature*/
Weidong Hanc7151a82008-12-08 22:51:37 +0800335 int iommu_count; /* reference count of iommu */
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100336 int iommu_superpage;/* Level of superpages supported:
337 0 == 4KiB (no superpages), 1 == 2MiB,
338 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
Weidong Hanc7151a82008-12-08 22:51:37 +0800339 spinlock_t iommu_lock; /* protect iommu set in domain */
Weidong Hanfe40f1e2008-12-08 23:10:23 +0800340 u64 max_addr; /* maximum mapped address */
Joerg Roedel00a77de2015-03-26 13:43:08 +0100341
342 struct iommu_domain domain; /* generic domain data structure for
343 iommu core */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000344};
345
Mark McLoughlina647dac2008-11-20 15:49:48 +0000346/* PCI domain-device relationship */
347struct device_domain_info {
348 struct list_head link; /* link to domain siblings */
349 struct list_head global; /* link to global list */
David Woodhouse276dbf992009-04-04 01:45:37 +0100350 u8 bus; /* PCI bus number */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000351 u8 devfn; /* PCI devfn number */
David Woodhouse0bcb3e22014-03-06 17:12:03 +0000352 struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
Yu Zhao93a23a72009-05-18 13:51:37 +0800353 struct intel_iommu *iommu; /* IOMMU used by this device */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000354 struct dmar_domain *domain; /* pointer to domain */
355};
356
Jiang Liub94e4112014-02-19 14:07:25 +0800357struct dmar_rmrr_unit {
358 struct list_head list; /* list of rmrr units */
359 struct acpi_dmar_header *hdr; /* ACPI header */
360 u64 base_address; /* reserved base address*/
361 u64 end_address; /* reserved end address */
David Woodhouse832bd852014-03-07 15:08:36 +0000362 struct dmar_dev_scope *devices; /* target devices */
Jiang Liub94e4112014-02-19 14:07:25 +0800363 int devices_cnt; /* target device count */
364};
365
366struct dmar_atsr_unit {
367 struct list_head list; /* list of ATSR units */
368 struct acpi_dmar_header *hdr; /* ACPI header */
David Woodhouse832bd852014-03-07 15:08:36 +0000369 struct dmar_dev_scope *devices; /* target devices */
Jiang Liub94e4112014-02-19 14:07:25 +0800370 int devices_cnt; /* target device count */
371 u8 include_all:1; /* include all ports */
372};
373
374static LIST_HEAD(dmar_atsr_units);
375static LIST_HEAD(dmar_rmrr_units);
376
377#define for_each_rmrr_units(rmrr) \
378 list_for_each_entry(rmrr, &dmar_rmrr_units, list)
379
mark gross5e0d2a62008-03-04 15:22:08 -0800380static void flush_unmaps_timeout(unsigned long data);
381
Jiang Liub707cb02014-01-06 14:18:26 +0800382static DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
mark gross5e0d2a62008-03-04 15:22:08 -0800383
mark gross80b20dd2008-04-18 13:53:58 -0700384#define HIGH_WATER_MARK 250
385struct deferred_flush_tables {
386 int next;
387 struct iova *iova[HIGH_WATER_MARK];
388 struct dmar_domain *domain[HIGH_WATER_MARK];
David Woodhouseea8ea462014-03-05 17:09:32 +0000389 struct page *freelist[HIGH_WATER_MARK];
mark gross80b20dd2008-04-18 13:53:58 -0700390};
391
392static struct deferred_flush_tables *deferred_flush;
393
mark gross5e0d2a62008-03-04 15:22:08 -0800394/* bitmap for indexing intel_iommus */
mark gross5e0d2a62008-03-04 15:22:08 -0800395static int g_num_of_iommus;
396
397static DEFINE_SPINLOCK(async_umap_flush_lock);
398static LIST_HEAD(unmaps_to_do);
399
400static int timer_on;
401static long list_size;
mark gross5e0d2a62008-03-04 15:22:08 -0800402
Jiang Liu92d03cc2014-02-19 14:07:28 +0800403static void domain_exit(struct dmar_domain *domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700404static void domain_remove_dev_info(struct dmar_domain *domain);
Jiang Liub94e4112014-02-19 14:07:25 +0800405static void domain_remove_one_dev_info(struct dmar_domain *domain,
David Woodhousebf9c9ed2014-03-09 16:19:13 -0700406 struct device *dev);
Jiang Liu92d03cc2014-02-19 14:07:28 +0800407static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
David Woodhouse0bcb3e22014-03-06 17:12:03 +0000408 struct device *dev);
Jiang Liu2a46ddf2014-07-11 14:19:30 +0800409static int domain_detach_iommu(struct dmar_domain *domain,
410 struct intel_iommu *iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700411
Suresh Siddhad3f13812011-08-23 17:05:25 -0700412#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800413int dmar_disabled = 0;
414#else
415int dmar_disabled = 1;
Suresh Siddhad3f13812011-08-23 17:05:25 -0700416#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800417
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -0200418int intel_iommu_enabled = 0;
419EXPORT_SYMBOL_GPL(intel_iommu_enabled);
420
David Woodhouse2d9e6672010-06-15 10:57:57 +0100421static int dmar_map_gfx = 1;
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700422static int dmar_forcedac;
mark gross5e0d2a62008-03-04 15:22:08 -0800423static int intel_iommu_strict;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100424static int intel_iommu_superpage = 1;
David Woodhousec83b2f22015-06-12 10:15:49 +0100425static int intel_iommu_ecs = 1;
426
427/* We only actually use ECS when PASID support (on the new bit 40)
428 * is also advertised. Some early implementations — the ones with
429 * PASID support on bit 28 — have issues even when we *only* use
430 * extended root/context tables. */
431#define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \
432 ecap_pasid(iommu->ecap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700433
David Woodhousec0771df2011-10-14 20:59:46 +0100434int intel_iommu_gfx_mapped;
435EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
436
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700437#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
438static DEFINE_SPINLOCK(device_domain_lock);
439static LIST_HEAD(device_domain_list);
440
Thierry Redingb22f6432014-06-27 09:03:12 +0200441static const struct iommu_ops intel_iommu_ops;
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +0100442
Joerg Roedel00a77de2015-03-26 13:43:08 +0100443/* Convert generic 'struct iommu_domain to private struct dmar_domain */
444static struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
445{
446 return container_of(dom, struct dmar_domain, domain);
447}
448
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700449static int __init intel_iommu_setup(char *str)
450{
451 if (!str)
452 return -EINVAL;
453 while (*str) {
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800454 if (!strncmp(str, "on", 2)) {
455 dmar_disabled = 0;
456 printk(KERN_INFO "Intel-IOMMU: enabled\n");
457 } else if (!strncmp(str, "off", 3)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700458 dmar_disabled = 1;
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800459 printk(KERN_INFO "Intel-IOMMU: disabled\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700460 } else if (!strncmp(str, "igfx_off", 8)) {
461 dmar_map_gfx = 0;
462 printk(KERN_INFO
463 "Intel-IOMMU: disable GFX device mapping\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700464 } else if (!strncmp(str, "forcedac", 8)) {
mark gross5e0d2a62008-03-04 15:22:08 -0800465 printk(KERN_INFO
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700466 "Intel-IOMMU: Forcing DAC for PCI devices\n");
467 dmar_forcedac = 1;
mark gross5e0d2a62008-03-04 15:22:08 -0800468 } else if (!strncmp(str, "strict", 6)) {
469 printk(KERN_INFO
470 "Intel-IOMMU: disable batched IOTLB flush\n");
471 intel_iommu_strict = 1;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100472 } else if (!strncmp(str, "sp_off", 6)) {
473 printk(KERN_INFO
474 "Intel-IOMMU: disable supported super page\n");
475 intel_iommu_superpage = 0;
David Woodhousec83b2f22015-06-12 10:15:49 +0100476 } else if (!strncmp(str, "ecs_off", 7)) {
477 printk(KERN_INFO
478 "Intel-IOMMU: disable extended context table support\n");
479 intel_iommu_ecs = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700480 }
481
482 str += strcspn(str, ",");
483 while (*str == ',')
484 str++;
485 }
486 return 0;
487}
488__setup("intel_iommu=", intel_iommu_setup);
489
490static struct kmem_cache *iommu_domain_cache;
491static struct kmem_cache *iommu_devinfo_cache;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700492
Suresh Siddha4c923d42009-10-02 11:01:24 -0700493static inline void *alloc_pgtable_page(int node)
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700494{
Suresh Siddha4c923d42009-10-02 11:01:24 -0700495 struct page *page;
496 void *vaddr = NULL;
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700497
Suresh Siddha4c923d42009-10-02 11:01:24 -0700498 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
499 if (page)
500 vaddr = page_address(page);
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700501 return vaddr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700502}
503
504static inline void free_pgtable_page(void *vaddr)
505{
506 free_page((unsigned long)vaddr);
507}
508
509static inline void *alloc_domain_mem(void)
510{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900511 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700512}
513
Kay, Allen M38717942008-09-09 18:37:29 +0300514static void free_domain_mem(void *vaddr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700515{
516 kmem_cache_free(iommu_domain_cache, vaddr);
517}
518
519static inline void * alloc_devinfo_mem(void)
520{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900521 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700522}
523
524static inline void free_devinfo_mem(void *vaddr)
525{
526 kmem_cache_free(iommu_devinfo_cache, vaddr);
527}
528
Jiang Liuab8dfe22014-07-11 14:19:27 +0800529static inline int domain_type_is_vm(struct dmar_domain *domain)
530{
531 return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE;
532}
533
534static inline int domain_type_is_vm_or_si(struct dmar_domain *domain)
535{
536 return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE |
537 DOMAIN_FLAG_STATIC_IDENTITY);
538}
Weidong Han1b573682008-12-08 15:34:06 +0800539
Jiang Liu162d1b12014-07-11 14:19:35 +0800540static inline int domain_pfn_supported(struct dmar_domain *domain,
541 unsigned long pfn)
542{
543 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
544
545 return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
546}
547
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700548static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
Weidong Han1b573682008-12-08 15:34:06 +0800549{
550 unsigned long sagaw;
551 int agaw = -1;
552
553 sagaw = cap_sagaw(iommu->cap);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700554 for (agaw = width_to_agaw(max_gaw);
Weidong Han1b573682008-12-08 15:34:06 +0800555 agaw >= 0; agaw--) {
556 if (test_bit(agaw, &sagaw))
557 break;
558 }
559
560 return agaw;
561}
562
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700563/*
564 * Calculate max SAGAW for each iommu.
565 */
566int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
567{
568 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
569}
570
571/*
572 * calculate agaw for each iommu.
573 * "SAGAW" may be different across iommus, use a default agaw, and
574 * get a supported less agaw for iommus that don't support the default agaw.
575 */
576int iommu_calculate_agaw(struct intel_iommu *iommu)
577{
578 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
579}
580
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700581/* This functionin only returns single iommu in a domain */
Weidong Han8c11e792008-12-08 15:29:22 +0800582static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
583{
584 int iommu_id;
585
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700586 /* si_domain and vm domain should not get here. */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800587 BUG_ON(domain_type_is_vm_or_si(domain));
Mike Travis1b198bb2012-03-05 15:05:16 -0800588 iommu_id = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
Weidong Han8c11e792008-12-08 15:29:22 +0800589 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
590 return NULL;
591
592 return g_iommus[iommu_id];
593}
594
Weidong Han8e6040972008-12-08 15:49:06 +0800595static void domain_update_iommu_coherency(struct dmar_domain *domain)
596{
David Woodhoused0501962014-03-11 17:10:29 -0700597 struct dmar_drhd_unit *drhd;
598 struct intel_iommu *iommu;
Quentin Lambert2f119c72015-02-06 10:59:53 +0100599 bool found = false;
600 int i;
Weidong Han8e6040972008-12-08 15:49:06 +0800601
David Woodhoused0501962014-03-11 17:10:29 -0700602 domain->iommu_coherency = 1;
Weidong Han8e6040972008-12-08 15:49:06 +0800603
Mike Travis1b198bb2012-03-05 15:05:16 -0800604 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
Quentin Lambert2f119c72015-02-06 10:59:53 +0100605 found = true;
Weidong Han8e6040972008-12-08 15:49:06 +0800606 if (!ecap_coherent(g_iommus[i]->ecap)) {
607 domain->iommu_coherency = 0;
608 break;
609 }
Weidong Han8e6040972008-12-08 15:49:06 +0800610 }
David Woodhoused0501962014-03-11 17:10:29 -0700611 if (found)
612 return;
613
614 /* No hardware attached; use lowest common denominator */
615 rcu_read_lock();
616 for_each_active_iommu(iommu, drhd) {
617 if (!ecap_coherent(iommu->ecap)) {
618 domain->iommu_coherency = 0;
619 break;
620 }
621 }
622 rcu_read_unlock();
Weidong Han8e6040972008-12-08 15:49:06 +0800623}
624
Jiang Liu161f6932014-07-11 14:19:37 +0800625static int domain_update_iommu_snooping(struct intel_iommu *skip)
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100626{
Allen Kay8140a952011-10-14 12:32:17 -0700627 struct dmar_drhd_unit *drhd;
Jiang Liu161f6932014-07-11 14:19:37 +0800628 struct intel_iommu *iommu;
629 int ret = 1;
630
631 rcu_read_lock();
632 for_each_active_iommu(iommu, drhd) {
633 if (iommu != skip) {
634 if (!ecap_sc_support(iommu->ecap)) {
635 ret = 0;
636 break;
637 }
638 }
639 }
640 rcu_read_unlock();
641
642 return ret;
643}
644
645static int domain_update_iommu_superpage(struct intel_iommu *skip)
646{
647 struct dmar_drhd_unit *drhd;
648 struct intel_iommu *iommu;
Allen Kay8140a952011-10-14 12:32:17 -0700649 int mask = 0xf;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100650
651 if (!intel_iommu_superpage) {
Jiang Liu161f6932014-07-11 14:19:37 +0800652 return 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100653 }
654
Allen Kay8140a952011-10-14 12:32:17 -0700655 /* set iommu_superpage to the smallest common denominator */
Jiang Liu0e242612014-02-19 14:07:34 +0800656 rcu_read_lock();
Allen Kay8140a952011-10-14 12:32:17 -0700657 for_each_active_iommu(iommu, drhd) {
Jiang Liu161f6932014-07-11 14:19:37 +0800658 if (iommu != skip) {
659 mask &= cap_super_page_val(iommu->cap);
660 if (!mask)
661 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100662 }
663 }
Jiang Liu0e242612014-02-19 14:07:34 +0800664 rcu_read_unlock();
665
Jiang Liu161f6932014-07-11 14:19:37 +0800666 return fls(mask);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100667}
668
Sheng Yang58c610b2009-03-18 15:33:05 +0800669/* Some capabilities may be different across iommus */
670static void domain_update_iommu_cap(struct dmar_domain *domain)
671{
672 domain_update_iommu_coherency(domain);
Jiang Liu161f6932014-07-11 14:19:37 +0800673 domain->iommu_snooping = domain_update_iommu_snooping(NULL);
674 domain->iommu_superpage = domain_update_iommu_superpage(NULL);
Sheng Yang58c610b2009-03-18 15:33:05 +0800675}
676
David Woodhouse03ecc322015-02-13 14:35:21 +0000677static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu,
678 u8 bus, u8 devfn, int alloc)
679{
680 struct root_entry *root = &iommu->root_entry[bus];
681 struct context_entry *context;
682 u64 *entry;
683
David Woodhousec83b2f22015-06-12 10:15:49 +0100684 if (ecs_enabled(iommu)) {
David Woodhouse03ecc322015-02-13 14:35:21 +0000685 if (devfn >= 0x80) {
686 devfn -= 0x80;
687 entry = &root->hi;
688 }
689 devfn *= 2;
690 }
691 entry = &root->lo;
692 if (*entry & 1)
693 context = phys_to_virt(*entry & VTD_PAGE_MASK);
694 else {
695 unsigned long phy_addr;
696 if (!alloc)
697 return NULL;
698
699 context = alloc_pgtable_page(iommu->node);
700 if (!context)
701 return NULL;
702
703 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
704 phy_addr = virt_to_phys((void *)context);
705 *entry = phy_addr | 1;
706 __iommu_flush_cache(iommu, entry, sizeof(*entry));
707 }
708 return &context[devfn];
709}
710
David Woodhouse4ed6a542015-05-11 14:59:20 +0100711static int iommu_dummy(struct device *dev)
712{
713 return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
714}
715
David Woodhouse156baca2014-03-09 14:00:57 -0700716static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
Weidong Hanc7151a82008-12-08 22:51:37 +0800717{
718 struct dmar_drhd_unit *drhd = NULL;
Jiang Liub683b232014-02-19 14:07:32 +0800719 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -0700720 struct device *tmp;
721 struct pci_dev *ptmp, *pdev = NULL;
Yijing Wangaa4d0662014-05-26 20:14:06 +0800722 u16 segment = 0;
Weidong Hanc7151a82008-12-08 22:51:37 +0800723 int i;
724
David Woodhouse4ed6a542015-05-11 14:59:20 +0100725 if (iommu_dummy(dev))
726 return NULL;
727
David Woodhouse156baca2014-03-09 14:00:57 -0700728 if (dev_is_pci(dev)) {
729 pdev = to_pci_dev(dev);
730 segment = pci_domain_nr(pdev->bus);
Rafael J. Wysockica5b74d2015-03-16 23:49:08 +0100731 } else if (has_acpi_companion(dev))
David Woodhouse156baca2014-03-09 14:00:57 -0700732 dev = &ACPI_COMPANION(dev)->dev;
733
Jiang Liu0e242612014-02-19 14:07:34 +0800734 rcu_read_lock();
Jiang Liub683b232014-02-19 14:07:32 +0800735 for_each_active_iommu(iommu, drhd) {
David Woodhouse156baca2014-03-09 14:00:57 -0700736 if (pdev && segment != drhd->segment)
David Woodhouse276dbf992009-04-04 01:45:37 +0100737 continue;
Weidong Hanc7151a82008-12-08 22:51:37 +0800738
Jiang Liub683b232014-02-19 14:07:32 +0800739 for_each_active_dev_scope(drhd->devices,
David Woodhouse156baca2014-03-09 14:00:57 -0700740 drhd->devices_cnt, i, tmp) {
741 if (tmp == dev) {
742 *bus = drhd->devices[i].bus;
743 *devfn = drhd->devices[i].devfn;
744 goto out;
745 }
746
747 if (!pdev || !dev_is_pci(tmp))
David Woodhouse832bd852014-03-07 15:08:36 +0000748 continue;
David Woodhouse156baca2014-03-09 14:00:57 -0700749
750 ptmp = to_pci_dev(tmp);
751 if (ptmp->subordinate &&
752 ptmp->subordinate->number <= pdev->bus->number &&
753 ptmp->subordinate->busn_res.end >= pdev->bus->number)
754 goto got_pdev;
David Woodhouse924b6232009-04-04 00:39:25 +0100755 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800756
David Woodhouse156baca2014-03-09 14:00:57 -0700757 if (pdev && drhd->include_all) {
758 got_pdev:
759 *bus = pdev->bus->number;
760 *devfn = pdev->devfn;
Jiang Liub683b232014-02-19 14:07:32 +0800761 goto out;
David Woodhouse156baca2014-03-09 14:00:57 -0700762 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800763 }
Jiang Liub683b232014-02-19 14:07:32 +0800764 iommu = NULL;
David Woodhouse156baca2014-03-09 14:00:57 -0700765 out:
Jiang Liu0e242612014-02-19 14:07:34 +0800766 rcu_read_unlock();
Weidong Hanc7151a82008-12-08 22:51:37 +0800767
Jiang Liub683b232014-02-19 14:07:32 +0800768 return iommu;
Weidong Hanc7151a82008-12-08 22:51:37 +0800769}
770
Weidong Han5331fe62008-12-08 23:00:00 +0800771static void domain_flush_cache(struct dmar_domain *domain,
772 void *addr, int size)
773{
774 if (!domain->iommu_coherency)
775 clflush_cache_range(addr, size);
776}
777
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700778static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
779{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700780 struct context_entry *context;
David Woodhouse03ecc322015-02-13 14:35:21 +0000781 int ret = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700782 unsigned long flags;
783
784 spin_lock_irqsave(&iommu->lock, flags);
David Woodhouse03ecc322015-02-13 14:35:21 +0000785 context = iommu_context_addr(iommu, bus, devfn, 0);
786 if (context)
787 ret = context_present(context);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700788 spin_unlock_irqrestore(&iommu->lock, flags);
789 return ret;
790}
791
792static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
793{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700794 struct context_entry *context;
795 unsigned long flags;
796
797 spin_lock_irqsave(&iommu->lock, flags);
David Woodhouse03ecc322015-02-13 14:35:21 +0000798 context = iommu_context_addr(iommu, bus, devfn, 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700799 if (context) {
David Woodhouse03ecc322015-02-13 14:35:21 +0000800 context_clear_entry(context);
801 __iommu_flush_cache(iommu, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700802 }
803 spin_unlock_irqrestore(&iommu->lock, flags);
804}
805
806static void free_context_table(struct intel_iommu *iommu)
807{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700808 int i;
809 unsigned long flags;
810 struct context_entry *context;
811
812 spin_lock_irqsave(&iommu->lock, flags);
813 if (!iommu->root_entry) {
814 goto out;
815 }
816 for (i = 0; i < ROOT_ENTRY_NR; i++) {
David Woodhouse03ecc322015-02-13 14:35:21 +0000817 context = iommu_context_addr(iommu, i, 0, 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700818 if (context)
819 free_pgtable_page(context);
David Woodhouse03ecc322015-02-13 14:35:21 +0000820
David Woodhousec83b2f22015-06-12 10:15:49 +0100821 if (!ecs_enabled(iommu))
David Woodhouse03ecc322015-02-13 14:35:21 +0000822 continue;
823
824 context = iommu_context_addr(iommu, i, 0x80, 0);
825 if (context)
826 free_pgtable_page(context);
827
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700828 }
829 free_pgtable_page(iommu->root_entry);
830 iommu->root_entry = NULL;
831out:
832 spin_unlock_irqrestore(&iommu->lock, flags);
833}
834
David Woodhouseb026fd22009-06-28 10:37:25 +0100835static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
David Woodhouse5cf0a762014-03-19 16:07:49 +0000836 unsigned long pfn, int *target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700837{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700838 struct dma_pte *parent, *pte = NULL;
839 int level = agaw_to_level(domain->agaw);
Allen Kay4399c8b2011-10-14 12:32:46 -0700840 int offset;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700841
842 BUG_ON(!domain->pgd);
Julian Stecklinaf9423602013-10-09 10:03:52 +0200843
Jiang Liu162d1b12014-07-11 14:19:35 +0800844 if (!domain_pfn_supported(domain, pfn))
Julian Stecklinaf9423602013-10-09 10:03:52 +0200845 /* Address beyond IOMMU's addressing capabilities. */
846 return NULL;
847
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700848 parent = domain->pgd;
849
David Woodhouse5cf0a762014-03-19 16:07:49 +0000850 while (1) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700851 void *tmp_page;
852
David Woodhouseb026fd22009-06-28 10:37:25 +0100853 offset = pfn_level_offset(pfn, level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700854 pte = &parent[offset];
David Woodhouse5cf0a762014-03-19 16:07:49 +0000855 if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100856 break;
David Woodhouse5cf0a762014-03-19 16:07:49 +0000857 if (level == *target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700858 break;
859
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000860 if (!dma_pte_present(pte)) {
David Woodhousec85994e2009-07-01 19:21:24 +0100861 uint64_t pteval;
862
Suresh Siddha4c923d42009-10-02 11:01:24 -0700863 tmp_page = alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700864
David Woodhouse206a73c2009-07-01 19:30:28 +0100865 if (!tmp_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700866 return NULL;
David Woodhouse206a73c2009-07-01 19:30:28 +0100867
David Woodhousec85994e2009-07-01 19:21:24 +0100868 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
Benjamin LaHaise64de5af2009-09-16 21:05:55 -0400869 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
Yijing Wangeffad4b2014-05-26 20:13:47 +0800870 if (cmpxchg64(&pte->val, 0ULL, pteval))
David Woodhousec85994e2009-07-01 19:21:24 +0100871 /* Someone else set it while we were thinking; use theirs. */
872 free_pgtable_page(tmp_page);
Yijing Wangeffad4b2014-05-26 20:13:47 +0800873 else
David Woodhousec85994e2009-07-01 19:21:24 +0100874 domain_flush_cache(domain, pte, sizeof(*pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700875 }
David Woodhouse5cf0a762014-03-19 16:07:49 +0000876 if (level == 1)
877 break;
878
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000879 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700880 level--;
881 }
882
David Woodhouse5cf0a762014-03-19 16:07:49 +0000883 if (!*target_level)
884 *target_level = level;
885
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700886 return pte;
887}
888
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100889
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700890/* return address's pte at specific level */
David Woodhouse90dcfb52009-06-27 17:14:59 +0100891static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
892 unsigned long pfn,
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100893 int level, int *large_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700894{
895 struct dma_pte *parent, *pte = NULL;
896 int total = agaw_to_level(domain->agaw);
897 int offset;
898
899 parent = domain->pgd;
900 while (level <= total) {
David Woodhouse90dcfb52009-06-27 17:14:59 +0100901 offset = pfn_level_offset(pfn, total);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700902 pte = &parent[offset];
903 if (level == total)
904 return pte;
905
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100906 if (!dma_pte_present(pte)) {
907 *large_page = total;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700908 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100909 }
910
Yijing Wange16922a2014-05-20 20:37:51 +0800911 if (dma_pte_superpage(pte)) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100912 *large_page = total;
913 return pte;
914 }
915
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000916 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700917 total--;
918 }
919 return NULL;
920}
921
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700922/* clear last level pte, a tlb flush should be followed */
David Woodhouse5cf0a762014-03-19 16:07:49 +0000923static void dma_pte_clear_range(struct dmar_domain *domain,
David Woodhouse595badf2009-06-27 22:09:11 +0100924 unsigned long start_pfn,
925 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700926{
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100927 unsigned int large_page = 1;
David Woodhouse310a5ab2009-06-28 18:52:20 +0100928 struct dma_pte *first_pte, *pte;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700929
Jiang Liu162d1b12014-07-11 14:19:35 +0800930 BUG_ON(!domain_pfn_supported(domain, start_pfn));
931 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouse59c36282009-09-19 07:36:28 -0700932 BUG_ON(start_pfn > last_pfn);
David Woodhouse66eae842009-06-27 19:00:32 +0100933
David Woodhouse04b18e62009-06-27 19:15:01 +0100934 /* we don't need lock here; nobody else touches the iova range */
David Woodhouse59c36282009-09-19 07:36:28 -0700935 do {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100936 large_page = 1;
937 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100938 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100939 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100940 continue;
941 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100942 do {
David Woodhouse310a5ab2009-06-28 18:52:20 +0100943 dma_clear_pte(pte);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100944 start_pfn += lvl_to_nr_pages(large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100945 pte++;
David Woodhouse75e6bf92009-07-02 11:21:16 +0100946 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
947
David Woodhouse310a5ab2009-06-28 18:52:20 +0100948 domain_flush_cache(domain, first_pte,
949 (void *)pte - (void *)first_pte);
David Woodhouse59c36282009-09-19 07:36:28 -0700950
951 } while (start_pfn && start_pfn <= last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700952}
953
Alex Williamson3269ee02013-06-15 10:27:19 -0600954static void dma_pte_free_level(struct dmar_domain *domain, int level,
955 struct dma_pte *pte, unsigned long pfn,
956 unsigned long start_pfn, unsigned long last_pfn)
957{
958 pfn = max(start_pfn, pfn);
959 pte = &pte[pfn_level_offset(pfn, level)];
960
961 do {
962 unsigned long level_pfn;
963 struct dma_pte *level_pte;
964
965 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
966 goto next;
967
968 level_pfn = pfn & level_mask(level - 1);
969 level_pte = phys_to_virt(dma_pte_addr(pte));
970
971 if (level > 2)
972 dma_pte_free_level(domain, level - 1, level_pte,
973 level_pfn, start_pfn, last_pfn);
974
975 /* If range covers entire pagetable, free it */
976 if (!(start_pfn > level_pfn ||
Alex Williamson08336fd2014-01-21 15:48:18 -0800977 last_pfn < level_pfn + level_size(level) - 1)) {
Alex Williamson3269ee02013-06-15 10:27:19 -0600978 dma_clear_pte(pte);
979 domain_flush_cache(domain, pte, sizeof(*pte));
980 free_pgtable_page(level_pte);
981 }
982next:
983 pfn += level_size(level);
984 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
985}
986
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700987/* free page table pages. last level pte should already be cleared */
988static void dma_pte_free_pagetable(struct dmar_domain *domain,
David Woodhoused794dc92009-06-28 00:27:49 +0100989 unsigned long start_pfn,
990 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700991{
Jiang Liu162d1b12014-07-11 14:19:35 +0800992 BUG_ON(!domain_pfn_supported(domain, start_pfn));
993 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouse59c36282009-09-19 07:36:28 -0700994 BUG_ON(start_pfn > last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700995
Jiang Liud41a4ad2014-07-11 14:19:34 +0800996 dma_pte_clear_range(domain, start_pfn, last_pfn);
997
David Woodhousef3a0a522009-06-30 03:40:07 +0100998 /* We don't need lock here; nobody else touches the iova range */
Alex Williamson3269ee02013-06-15 10:27:19 -0600999 dma_pte_free_level(domain, agaw_to_level(domain->agaw),
1000 domain->pgd, 0, start_pfn, last_pfn);
David Woodhouse6660c632009-06-27 22:41:00 +01001001
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001002 /* free pgd */
David Woodhoused794dc92009-06-28 00:27:49 +01001003 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001004 free_pgtable_page(domain->pgd);
1005 domain->pgd = NULL;
1006 }
1007}
1008
David Woodhouseea8ea462014-03-05 17:09:32 +00001009/* When a page at a given level is being unlinked from its parent, we don't
1010 need to *modify* it at all. All we need to do is make a list of all the
1011 pages which can be freed just as soon as we've flushed the IOTLB and we
1012 know the hardware page-walk will no longer touch them.
1013 The 'pte' argument is the *parent* PTE, pointing to the page that is to
1014 be freed. */
1015static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
1016 int level, struct dma_pte *pte,
1017 struct page *freelist)
1018{
1019 struct page *pg;
1020
1021 pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
1022 pg->freelist = freelist;
1023 freelist = pg;
1024
1025 if (level == 1)
1026 return freelist;
1027
Jiang Liuadeb2592014-04-09 10:20:39 +08001028 pte = page_address(pg);
1029 do {
David Woodhouseea8ea462014-03-05 17:09:32 +00001030 if (dma_pte_present(pte) && !dma_pte_superpage(pte))
1031 freelist = dma_pte_list_pagetables(domain, level - 1,
1032 pte, freelist);
Jiang Liuadeb2592014-04-09 10:20:39 +08001033 pte++;
1034 } while (!first_pte_in_page(pte));
David Woodhouseea8ea462014-03-05 17:09:32 +00001035
1036 return freelist;
1037}
1038
1039static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
1040 struct dma_pte *pte, unsigned long pfn,
1041 unsigned long start_pfn,
1042 unsigned long last_pfn,
1043 struct page *freelist)
1044{
1045 struct dma_pte *first_pte = NULL, *last_pte = NULL;
1046
1047 pfn = max(start_pfn, pfn);
1048 pte = &pte[pfn_level_offset(pfn, level)];
1049
1050 do {
1051 unsigned long level_pfn;
1052
1053 if (!dma_pte_present(pte))
1054 goto next;
1055
1056 level_pfn = pfn & level_mask(level);
1057
1058 /* If range covers entire pagetable, free it */
1059 if (start_pfn <= level_pfn &&
1060 last_pfn >= level_pfn + level_size(level) - 1) {
1061 /* These suborbinate page tables are going away entirely. Don't
1062 bother to clear them; we're just going to *free* them. */
1063 if (level > 1 && !dma_pte_superpage(pte))
1064 freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
1065
1066 dma_clear_pte(pte);
1067 if (!first_pte)
1068 first_pte = pte;
1069 last_pte = pte;
1070 } else if (level > 1) {
1071 /* Recurse down into a level that isn't *entirely* obsolete */
1072 freelist = dma_pte_clear_level(domain, level - 1,
1073 phys_to_virt(dma_pte_addr(pte)),
1074 level_pfn, start_pfn, last_pfn,
1075 freelist);
1076 }
1077next:
1078 pfn += level_size(level);
1079 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1080
1081 if (first_pte)
1082 domain_flush_cache(domain, first_pte,
1083 (void *)++last_pte - (void *)first_pte);
1084
1085 return freelist;
1086}
1087
1088/* We can't just free the pages because the IOMMU may still be walking
1089 the page tables, and may have cached the intermediate levels. The
1090 pages can only be freed after the IOTLB flush has been done. */
1091struct page *domain_unmap(struct dmar_domain *domain,
1092 unsigned long start_pfn,
1093 unsigned long last_pfn)
1094{
David Woodhouseea8ea462014-03-05 17:09:32 +00001095 struct page *freelist = NULL;
1096
Jiang Liu162d1b12014-07-11 14:19:35 +08001097 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1098 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouseea8ea462014-03-05 17:09:32 +00001099 BUG_ON(start_pfn > last_pfn);
1100
1101 /* we don't need lock here; nobody else touches the iova range */
1102 freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
1103 domain->pgd, 0, start_pfn, last_pfn, NULL);
1104
1105 /* free pgd */
1106 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1107 struct page *pgd_page = virt_to_page(domain->pgd);
1108 pgd_page->freelist = freelist;
1109 freelist = pgd_page;
1110
1111 domain->pgd = NULL;
1112 }
1113
1114 return freelist;
1115}
1116
1117void dma_free_pagelist(struct page *freelist)
1118{
1119 struct page *pg;
1120
1121 while ((pg = freelist)) {
1122 freelist = pg->freelist;
1123 free_pgtable_page(page_address(pg));
1124 }
1125}
1126
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001127/* iommu handling */
1128static int iommu_alloc_root_entry(struct intel_iommu *iommu)
1129{
1130 struct root_entry *root;
1131 unsigned long flags;
1132
Suresh Siddha4c923d42009-10-02 11:01:24 -07001133 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
Jiang Liuffebeb42014-11-09 22:48:02 +08001134 if (!root) {
1135 pr_err("IOMMU: allocating root entry for %s failed\n",
1136 iommu->name);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001137 return -ENOMEM;
Jiang Liuffebeb42014-11-09 22:48:02 +08001138 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001139
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001140 __iommu_flush_cache(iommu, root, ROOT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001141
1142 spin_lock_irqsave(&iommu->lock, flags);
1143 iommu->root_entry = root;
1144 spin_unlock_irqrestore(&iommu->lock, flags);
1145
1146 return 0;
1147}
1148
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001149static void iommu_set_root_entry(struct intel_iommu *iommu)
1150{
David Woodhouse03ecc322015-02-13 14:35:21 +00001151 u64 addr;
David Woodhousec416daa2009-05-10 20:30:58 +01001152 u32 sts;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001153 unsigned long flag;
1154
David Woodhouse03ecc322015-02-13 14:35:21 +00001155 addr = virt_to_phys(iommu->root_entry);
David Woodhousec83b2f22015-06-12 10:15:49 +01001156 if (ecs_enabled(iommu))
David Woodhouse03ecc322015-02-13 14:35:21 +00001157 addr |= DMA_RTADDR_RTT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001158
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001159 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse03ecc322015-02-13 14:35:21 +00001160 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001161
David Woodhousec416daa2009-05-10 20:30:58 +01001162 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001163
1164 /* Make sure hardware complete it */
1165 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001166 readl, (sts & DMA_GSTS_RTPS), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001167
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001168 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001169}
1170
1171static void iommu_flush_write_buffer(struct intel_iommu *iommu)
1172{
1173 u32 val;
1174 unsigned long flag;
1175
David Woodhouse9af88142009-02-13 23:18:03 +00001176 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001177 return;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001178
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001179 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse462b60f2009-05-10 20:18:18 +01001180 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001181
1182 /* Make sure hardware complete it */
1183 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001184 readl, (!(val & DMA_GSTS_WBFS)), val);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001185
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001186 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001187}
1188
1189/* return value determine if we need a write buffer flush */
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001190static void __iommu_flush_context(struct intel_iommu *iommu,
1191 u16 did, u16 source_id, u8 function_mask,
1192 u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001193{
1194 u64 val = 0;
1195 unsigned long flag;
1196
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001197 switch (type) {
1198 case DMA_CCMD_GLOBAL_INVL:
1199 val = DMA_CCMD_GLOBAL_INVL;
1200 break;
1201 case DMA_CCMD_DOMAIN_INVL:
1202 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1203 break;
1204 case DMA_CCMD_DEVICE_INVL:
1205 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1206 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1207 break;
1208 default:
1209 BUG();
1210 }
1211 val |= DMA_CCMD_ICC;
1212
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001213 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001214 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1215
1216 /* Make sure hardware complete it */
1217 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1218 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1219
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001220 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001221}
1222
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001223/* return value determine if we need a write buffer flush */
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001224static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1225 u64 addr, unsigned int size_order, u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001226{
1227 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1228 u64 val = 0, val_iva = 0;
1229 unsigned long flag;
1230
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001231 switch (type) {
1232 case DMA_TLB_GLOBAL_FLUSH:
1233 /* global flush doesn't need set IVA_REG */
1234 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1235 break;
1236 case DMA_TLB_DSI_FLUSH:
1237 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1238 break;
1239 case DMA_TLB_PSI_FLUSH:
1240 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
David Woodhouseea8ea462014-03-05 17:09:32 +00001241 /* IH bit is passed in as part of address */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001242 val_iva = size_order | addr;
1243 break;
1244 default:
1245 BUG();
1246 }
1247 /* Note: set drain read/write */
1248#if 0
1249 /*
1250 * This is probably to be super secure.. Looks like we can
1251 * ignore it without any impact.
1252 */
1253 if (cap_read_drain(iommu->cap))
1254 val |= DMA_TLB_READ_DRAIN;
1255#endif
1256 if (cap_write_drain(iommu->cap))
1257 val |= DMA_TLB_WRITE_DRAIN;
1258
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001259 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001260 /* Note: Only uses first TLB reg currently */
1261 if (val_iva)
1262 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1263 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1264
1265 /* Make sure hardware complete it */
1266 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1267 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1268
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001269 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001270
1271 /* check IOTLB invalidation granularity */
1272 if (DMA_TLB_IAIG(val) == 0)
1273 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
1274 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1275 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001276 (unsigned long long)DMA_TLB_IIRG(type),
1277 (unsigned long long)DMA_TLB_IAIG(val));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001278}
1279
David Woodhouse64ae8922014-03-09 12:52:30 -07001280static struct device_domain_info *
1281iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
1282 u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001283{
Quentin Lambert2f119c72015-02-06 10:59:53 +01001284 bool found = false;
Yu Zhao93a23a72009-05-18 13:51:37 +08001285 unsigned long flags;
1286 struct device_domain_info *info;
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001287 struct pci_dev *pdev;
Yu Zhao93a23a72009-05-18 13:51:37 +08001288
1289 if (!ecap_dev_iotlb_support(iommu->ecap))
1290 return NULL;
1291
1292 if (!iommu->qi)
1293 return NULL;
1294
1295 spin_lock_irqsave(&device_domain_lock, flags);
1296 list_for_each_entry(info, &domain->devices, link)
Jiang Liuc3b497c2014-07-11 14:19:25 +08001297 if (info->iommu == iommu && info->bus == bus &&
1298 info->devfn == devfn) {
Quentin Lambert2f119c72015-02-06 10:59:53 +01001299 found = true;
Yu Zhao93a23a72009-05-18 13:51:37 +08001300 break;
1301 }
1302 spin_unlock_irqrestore(&device_domain_lock, flags);
1303
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001304 if (!found || !info->dev || !dev_is_pci(info->dev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001305 return NULL;
1306
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001307 pdev = to_pci_dev(info->dev);
1308
1309 if (!pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS))
Yu Zhao93a23a72009-05-18 13:51:37 +08001310 return NULL;
1311
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001312 if (!dmar_find_matched_atsr_unit(pdev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001313 return NULL;
1314
Yu Zhao93a23a72009-05-18 13:51:37 +08001315 return info;
1316}
1317
1318static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1319{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001320 if (!info || !dev_is_pci(info->dev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001321 return;
1322
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001323 pci_enable_ats(to_pci_dev(info->dev), VTD_PAGE_SHIFT);
Yu Zhao93a23a72009-05-18 13:51:37 +08001324}
1325
1326static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1327{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001328 if (!info->dev || !dev_is_pci(info->dev) ||
1329 !pci_ats_enabled(to_pci_dev(info->dev)))
Yu Zhao93a23a72009-05-18 13:51:37 +08001330 return;
1331
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001332 pci_disable_ats(to_pci_dev(info->dev));
Yu Zhao93a23a72009-05-18 13:51:37 +08001333}
1334
1335static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1336 u64 addr, unsigned mask)
1337{
1338 u16 sid, qdep;
1339 unsigned long flags;
1340 struct device_domain_info *info;
1341
1342 spin_lock_irqsave(&device_domain_lock, flags);
1343 list_for_each_entry(info, &domain->devices, link) {
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001344 struct pci_dev *pdev;
1345 if (!info->dev || !dev_is_pci(info->dev))
1346 continue;
1347
1348 pdev = to_pci_dev(info->dev);
1349 if (!pci_ats_enabled(pdev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001350 continue;
1351
1352 sid = info->bus << 8 | info->devfn;
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001353 qdep = pci_ats_queue_depth(pdev);
Yu Zhao93a23a72009-05-18 13:51:37 +08001354 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1355 }
1356 spin_unlock_irqrestore(&device_domain_lock, flags);
1357}
1358
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001359static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
David Woodhouseea8ea462014-03-05 17:09:32 +00001360 unsigned long pfn, unsigned int pages, int ih, int map)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001361{
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001362 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
David Woodhouse03d6a242009-06-28 15:33:46 +01001363 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001364
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001365 BUG_ON(pages == 0);
1366
David Woodhouseea8ea462014-03-05 17:09:32 +00001367 if (ih)
1368 ih = 1 << 6;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001369 /*
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001370 * Fallback to domain selective flush if no PSI support or the size is
1371 * too big.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001372 * PSI requires page size to be 2 ^ x, and the base address is naturally
1373 * aligned to the size
1374 */
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001375 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1376 iommu->flush.flush_iotlb(iommu, did, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001377 DMA_TLB_DSI_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001378 else
David Woodhouseea8ea462014-03-05 17:09:32 +00001379 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001380 DMA_TLB_PSI_FLUSH);
Yu Zhaobf92df32009-06-29 11:31:45 +08001381
1382 /*
Nadav Amit82653632010-04-01 13:24:40 +03001383 * In caching mode, changes of pages from non-present to present require
1384 * flush. However, device IOTLB doesn't need to be flushed in this case.
Yu Zhaobf92df32009-06-29 11:31:45 +08001385 */
Nadav Amit82653632010-04-01 13:24:40 +03001386 if (!cap_caching_mode(iommu->cap) || !map)
Yu Zhao93a23a72009-05-18 13:51:37 +08001387 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001388}
1389
mark grossf8bab732008-02-08 04:18:38 -08001390static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1391{
1392 u32 pmen;
1393 unsigned long flags;
1394
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001395 raw_spin_lock_irqsave(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001396 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1397 pmen &= ~DMA_PMEN_EPM;
1398 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1399
1400 /* wait for the protected region status bit to clear */
1401 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1402 readl, !(pmen & DMA_PMEN_PRS), pmen);
1403
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001404 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001405}
1406
Jiang Liu2a41cce2014-07-11 14:19:33 +08001407static void iommu_enable_translation(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001408{
1409 u32 sts;
1410 unsigned long flags;
1411
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001412 raw_spin_lock_irqsave(&iommu->register_lock, flags);
David Woodhousec416daa2009-05-10 20:30:58 +01001413 iommu->gcmd |= DMA_GCMD_TE;
1414 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001415
1416 /* Make sure hardware complete it */
1417 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001418 readl, (sts & DMA_GSTS_TES), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001419
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001420 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001421}
1422
Jiang Liu2a41cce2014-07-11 14:19:33 +08001423static void iommu_disable_translation(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001424{
1425 u32 sts;
1426 unsigned long flag;
1427
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001428 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001429 iommu->gcmd &= ~DMA_GCMD_TE;
1430 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1431
1432 /* Make sure hardware complete it */
1433 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001434 readl, (!(sts & DMA_GSTS_TES)), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001435
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001436 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001437}
1438
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001439
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001440static int iommu_init_domains(struct intel_iommu *iommu)
1441{
1442 unsigned long ndomains;
1443 unsigned long nlongs;
1444
1445 ndomains = cap_ndoms(iommu->cap);
Jiang Liu852bdb02014-01-06 14:18:11 +08001446 pr_debug("IOMMU%d: Number of Domains supported <%ld>\n",
1447 iommu->seq_id, ndomains);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001448 nlongs = BITS_TO_LONGS(ndomains);
1449
Donald Dutile94a91b52009-08-20 16:51:34 -04001450 spin_lock_init(&iommu->lock);
1451
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001452 /* TBD: there might be 64K domains,
1453 * consider other allocation for future chip
1454 */
1455 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1456 if (!iommu->domain_ids) {
Jiang Liu852bdb02014-01-06 14:18:11 +08001457 pr_err("IOMMU%d: allocating domain id array failed\n",
1458 iommu->seq_id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001459 return -ENOMEM;
1460 }
1461 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1462 GFP_KERNEL);
1463 if (!iommu->domains) {
Jiang Liu852bdb02014-01-06 14:18:11 +08001464 pr_err("IOMMU%d: allocating domain array failed\n",
1465 iommu->seq_id);
1466 kfree(iommu->domain_ids);
1467 iommu->domain_ids = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001468 return -ENOMEM;
1469 }
1470
1471 /*
1472 * if Caching mode is set, then invalid translations are tagged
1473 * with domainid 0. Hence we need to pre-allocate it.
1474 */
1475 if (cap_caching_mode(iommu->cap))
1476 set_bit(0, iommu->domain_ids);
1477 return 0;
1478}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001479
Jiang Liuffebeb42014-11-09 22:48:02 +08001480static void disable_dmar_iommu(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001481{
1482 struct dmar_domain *domain;
Jiang Liu2a46ddf2014-07-11 14:19:30 +08001483 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001484
Donald Dutile94a91b52009-08-20 16:51:34 -04001485 if ((iommu->domains) && (iommu->domain_ids)) {
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001486 for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
Jiang Liua4eaa862014-02-19 14:07:30 +08001487 /*
1488 * Domain id 0 is reserved for invalid translation
1489 * if hardware supports caching mode.
1490 */
1491 if (cap_caching_mode(iommu->cap) && i == 0)
1492 continue;
1493
Donald Dutile94a91b52009-08-20 16:51:34 -04001494 domain = iommu->domains[i];
1495 clear_bit(i, iommu->domain_ids);
Jiang Liu129ad282014-07-11 14:19:31 +08001496 if (domain_detach_iommu(domain, iommu) == 0 &&
1497 !domain_type_is_vm(domain))
Jiang Liu92d03cc2014-02-19 14:07:28 +08001498 domain_exit(domain);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001499 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001500 }
1501
1502 if (iommu->gcmd & DMA_GCMD_TE)
1503 iommu_disable_translation(iommu);
Jiang Liuffebeb42014-11-09 22:48:02 +08001504}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001505
Jiang Liuffebeb42014-11-09 22:48:02 +08001506static void free_dmar_iommu(struct intel_iommu *iommu)
1507{
1508 if ((iommu->domains) && (iommu->domain_ids)) {
1509 kfree(iommu->domains);
1510 kfree(iommu->domain_ids);
1511 iommu->domains = NULL;
1512 iommu->domain_ids = NULL;
1513 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001514
Weidong Hand9630fe2008-12-08 11:06:32 +08001515 g_iommus[iommu->seq_id] = NULL;
1516
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001517 /* free context mapping */
1518 free_context_table(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001519}
1520
Jiang Liuab8dfe22014-07-11 14:19:27 +08001521static struct dmar_domain *alloc_domain(int flags)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001522{
Jiang Liu92d03cc2014-02-19 14:07:28 +08001523 /* domain id for virtual machine, it won't be set in context */
1524 static atomic_t vm_domid = ATOMIC_INIT(0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001525 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001526
1527 domain = alloc_domain_mem();
1528 if (!domain)
1529 return NULL;
1530
Jiang Liuab8dfe22014-07-11 14:19:27 +08001531 memset(domain, 0, sizeof(*domain));
Suresh Siddha4c923d42009-10-02 11:01:24 -07001532 domain->nid = -1;
Jiang Liuab8dfe22014-07-11 14:19:27 +08001533 domain->flags = flags;
Jiang Liu92d03cc2014-02-19 14:07:28 +08001534 spin_lock_init(&domain->iommu_lock);
1535 INIT_LIST_HEAD(&domain->devices);
Jiang Liuab8dfe22014-07-11 14:19:27 +08001536 if (flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
Jiang Liu92d03cc2014-02-19 14:07:28 +08001537 domain->id = atomic_inc_return(&vm_domid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001538
1539 return domain;
1540}
1541
Jiang Liufb170fb2014-07-11 14:19:28 +08001542static int __iommu_attach_domain(struct dmar_domain *domain,
1543 struct intel_iommu *iommu)
1544{
1545 int num;
1546 unsigned long ndomains;
1547
1548 ndomains = cap_ndoms(iommu->cap);
1549 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1550 if (num < ndomains) {
1551 set_bit(num, iommu->domain_ids);
1552 iommu->domains[num] = domain;
1553 } else {
1554 num = -ENOSPC;
1555 }
1556
1557 return num;
1558}
1559
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001560static int iommu_attach_domain(struct dmar_domain *domain,
1561 struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001562{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001563 int num;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001564 unsigned long flags;
1565
Weidong Han8c11e792008-12-08 15:29:22 +08001566 spin_lock_irqsave(&iommu->lock, flags);
Jiang Liufb170fb2014-07-11 14:19:28 +08001567 num = __iommu_attach_domain(domain, iommu);
Jiang Liu44bde612014-07-11 14:19:29 +08001568 spin_unlock_irqrestore(&iommu->lock, flags);
Jiang Liufb170fb2014-07-11 14:19:28 +08001569 if (num < 0)
1570 pr_err("IOMMU: no free domain ids\n");
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001571
Jiang Liufb170fb2014-07-11 14:19:28 +08001572 return num;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001573}
1574
Jiang Liu44bde612014-07-11 14:19:29 +08001575static int iommu_attach_vm_domain(struct dmar_domain *domain,
1576 struct intel_iommu *iommu)
1577{
1578 int num;
1579 unsigned long ndomains;
1580
1581 ndomains = cap_ndoms(iommu->cap);
1582 for_each_set_bit(num, iommu->domain_ids, ndomains)
1583 if (iommu->domains[num] == domain)
1584 return num;
1585
1586 return __iommu_attach_domain(domain, iommu);
1587}
1588
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001589static void iommu_detach_domain(struct dmar_domain *domain,
1590 struct intel_iommu *iommu)
1591{
1592 unsigned long flags;
1593 int num, ndomains;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001594
1595 spin_lock_irqsave(&iommu->lock, flags);
Jiang Liufb170fb2014-07-11 14:19:28 +08001596 if (domain_type_is_vm_or_si(domain)) {
1597 ndomains = cap_ndoms(iommu->cap);
1598 for_each_set_bit(num, iommu->domain_ids, ndomains) {
1599 if (iommu->domains[num] == domain) {
1600 clear_bit(num, iommu->domain_ids);
1601 iommu->domains[num] = NULL;
1602 break;
1603 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001604 }
Jiang Liufb170fb2014-07-11 14:19:28 +08001605 } else {
1606 clear_bit(domain->id, iommu->domain_ids);
1607 iommu->domains[domain->id] = NULL;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001608 }
Weidong Han8c11e792008-12-08 15:29:22 +08001609 spin_unlock_irqrestore(&iommu->lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001610}
1611
Jiang Liufb170fb2014-07-11 14:19:28 +08001612static void domain_attach_iommu(struct dmar_domain *domain,
1613 struct intel_iommu *iommu)
1614{
1615 unsigned long flags;
1616
1617 spin_lock_irqsave(&domain->iommu_lock, flags);
1618 if (!test_and_set_bit(iommu->seq_id, domain->iommu_bmp)) {
1619 domain->iommu_count++;
1620 if (domain->iommu_count == 1)
1621 domain->nid = iommu->node;
1622 domain_update_iommu_cap(domain);
1623 }
1624 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1625}
1626
1627static int domain_detach_iommu(struct dmar_domain *domain,
1628 struct intel_iommu *iommu)
1629{
1630 unsigned long flags;
1631 int count = INT_MAX;
1632
1633 spin_lock_irqsave(&domain->iommu_lock, flags);
1634 if (test_and_clear_bit(iommu->seq_id, domain->iommu_bmp)) {
1635 count = --domain->iommu_count;
1636 domain_update_iommu_cap(domain);
1637 }
1638 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1639
1640 return count;
1641}
1642
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001643static struct iova_domain reserved_iova_list;
Mark Gross8a443df2008-03-04 14:59:31 -08001644static struct lock_class_key reserved_rbtree_key;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001645
Joseph Cihula51a63e62011-03-21 11:04:24 -07001646static int dmar_init_reserved_ranges(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001647{
1648 struct pci_dev *pdev = NULL;
1649 struct iova *iova;
1650 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001651
Robin Murphy0fb5fe82015-01-12 17:51:16 +00001652 init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN,
1653 DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001654
Mark Gross8a443df2008-03-04 14:59:31 -08001655 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1656 &reserved_rbtree_key);
1657
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001658 /* IOAPIC ranges shouldn't be accessed by DMA */
1659 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1660 IOVA_PFN(IOAPIC_RANGE_END));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001661 if (!iova) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001662 printk(KERN_ERR "Reserve IOAPIC range failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001663 return -ENODEV;
1664 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001665
1666 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1667 for_each_pci_dev(pdev) {
1668 struct resource *r;
1669
1670 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1671 r = &pdev->resource[i];
1672 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1673 continue;
David Woodhouse1a4a4552009-06-28 16:00:42 +01001674 iova = reserve_iova(&reserved_iova_list,
1675 IOVA_PFN(r->start),
1676 IOVA_PFN(r->end));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001677 if (!iova) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001678 printk(KERN_ERR "Reserve iova failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001679 return -ENODEV;
1680 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001681 }
1682 }
Joseph Cihula51a63e62011-03-21 11:04:24 -07001683 return 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001684}
1685
1686static void domain_reserve_special_ranges(struct dmar_domain *domain)
1687{
1688 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1689}
1690
1691static inline int guestwidth_to_adjustwidth(int gaw)
1692{
1693 int agaw;
1694 int r = (gaw - 12) % 9;
1695
1696 if (r == 0)
1697 agaw = gaw;
1698 else
1699 agaw = gaw + 9 - r;
1700 if (agaw > 64)
1701 agaw = 64;
1702 return agaw;
1703}
1704
1705static int domain_init(struct dmar_domain *domain, int guest_width)
1706{
1707 struct intel_iommu *iommu;
1708 int adjust_width, agaw;
1709 unsigned long sagaw;
1710
Robin Murphy0fb5fe82015-01-12 17:51:16 +00001711 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
1712 DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001713 domain_reserve_special_ranges(domain);
1714
1715 /* calculate AGAW */
Weidong Han8c11e792008-12-08 15:29:22 +08001716 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001717 if (guest_width > cap_mgaw(iommu->cap))
1718 guest_width = cap_mgaw(iommu->cap);
1719 domain->gaw = guest_width;
1720 adjust_width = guestwidth_to_adjustwidth(guest_width);
1721 agaw = width_to_agaw(adjust_width);
1722 sagaw = cap_sagaw(iommu->cap);
1723 if (!test_bit(agaw, &sagaw)) {
1724 /* hardware doesn't support it, choose a bigger one */
1725 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1726 agaw = find_next_bit(&sagaw, 5, agaw);
1727 if (agaw >= 5)
1728 return -ENODEV;
1729 }
1730 domain->agaw = agaw;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001731
Weidong Han8e6040972008-12-08 15:49:06 +08001732 if (ecap_coherent(iommu->ecap))
1733 domain->iommu_coherency = 1;
1734 else
1735 domain->iommu_coherency = 0;
1736
Sheng Yang58c610b2009-03-18 15:33:05 +08001737 if (ecap_sc_support(iommu->ecap))
1738 domain->iommu_snooping = 1;
1739 else
1740 domain->iommu_snooping = 0;
1741
David Woodhouse214e39a2014-03-19 10:38:49 +00001742 if (intel_iommu_superpage)
1743 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
1744 else
1745 domain->iommu_superpage = 0;
1746
Suresh Siddha4c923d42009-10-02 11:01:24 -07001747 domain->nid = iommu->node;
Weidong Hanc7151a82008-12-08 22:51:37 +08001748
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001749 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07001750 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001751 if (!domain->pgd)
1752 return -ENOMEM;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001753 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001754 return 0;
1755}
1756
1757static void domain_exit(struct dmar_domain *domain)
1758{
David Woodhouseea8ea462014-03-05 17:09:32 +00001759 struct page *freelist = NULL;
Alex Williamson71684402015-03-04 11:30:10 -07001760 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001761
1762 /* Domain 0 is reserved, so dont process it */
1763 if (!domain)
1764 return;
1765
Alex Williamson7b668352011-05-24 12:02:41 +01001766 /* Flush any lazy unmaps that may reference this domain */
1767 if (!intel_iommu_strict)
1768 flush_unmaps_timeout(0);
1769
Jiang Liu92d03cc2014-02-19 14:07:28 +08001770 /* remove associated devices */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001771 domain_remove_dev_info(domain);
Jiang Liu92d03cc2014-02-19 14:07:28 +08001772
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001773 /* destroy iovas */
1774 put_iova_domain(&domain->iovad);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001775
David Woodhouseea8ea462014-03-05 17:09:32 +00001776 freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001777
Jiang Liu92d03cc2014-02-19 14:07:28 +08001778 /* clear attached or cached domains */
Jiang Liu0e242612014-02-19 14:07:34 +08001779 rcu_read_lock();
Alex Williamson71684402015-03-04 11:30:10 -07001780 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus)
1781 iommu_detach_domain(domain, g_iommus[i]);
Jiang Liu0e242612014-02-19 14:07:34 +08001782 rcu_read_unlock();
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001783
David Woodhouseea8ea462014-03-05 17:09:32 +00001784 dma_free_pagelist(freelist);
1785
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001786 free_domain_mem(domain);
1787}
1788
David Woodhouse64ae8922014-03-09 12:52:30 -07001789static int domain_context_mapping_one(struct dmar_domain *domain,
1790 struct intel_iommu *iommu,
1791 u8 bus, u8 devfn, int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001792{
1793 struct context_entry *context;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001794 unsigned long flags;
Weidong Hanea6606b2008-12-08 23:08:15 +08001795 struct dma_pte *pgd;
Weidong Hanea6606b2008-12-08 23:08:15 +08001796 int id;
1797 int agaw;
Yu Zhao93a23a72009-05-18 13:51:37 +08001798 struct device_domain_info *info = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001799
1800 pr_debug("Set context mapping for %02x:%02x.%d\n",
1801 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001802
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001803 BUG_ON(!domain->pgd);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001804 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1805 translation != CONTEXT_TT_MULTI_LEVEL);
Weidong Han5331fe62008-12-08 23:00:00 +08001806
David Woodhouse03ecc322015-02-13 14:35:21 +00001807 spin_lock_irqsave(&iommu->lock, flags);
1808 context = iommu_context_addr(iommu, bus, devfn, 1);
1809 spin_unlock_irqrestore(&iommu->lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001810 if (!context)
1811 return -ENOMEM;
1812 spin_lock_irqsave(&iommu->lock, flags);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001813 if (context_present(context)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001814 spin_unlock_irqrestore(&iommu->lock, flags);
1815 return 0;
1816 }
1817
Weidong Hanea6606b2008-12-08 23:08:15 +08001818 id = domain->id;
1819 pgd = domain->pgd;
1820
Jiang Liuab8dfe22014-07-11 14:19:27 +08001821 if (domain_type_is_vm_or_si(domain)) {
Jiang Liu44bde612014-07-11 14:19:29 +08001822 if (domain_type_is_vm(domain)) {
1823 id = iommu_attach_vm_domain(domain, iommu);
Jiang Liufb170fb2014-07-11 14:19:28 +08001824 if (id < 0) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001825 spin_unlock_irqrestore(&iommu->lock, flags);
Jiang Liufb170fb2014-07-11 14:19:28 +08001826 pr_err("IOMMU: no free domain ids\n");
Weidong Hanea6606b2008-12-08 23:08:15 +08001827 return -EFAULT;
1828 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001829 }
1830
1831 /* Skip top levels of page tables for
1832 * iommu which has less agaw than default.
Chris Wright1672af12009-12-02 12:06:34 -08001833 * Unnecessary for PT mode.
Weidong Hanea6606b2008-12-08 23:08:15 +08001834 */
Chris Wright1672af12009-12-02 12:06:34 -08001835 if (translation != CONTEXT_TT_PASS_THROUGH) {
1836 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1837 pgd = phys_to_virt(dma_pte_addr(pgd));
1838 if (!dma_pte_present(pgd)) {
1839 spin_unlock_irqrestore(&iommu->lock, flags);
1840 return -ENOMEM;
1841 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001842 }
1843 }
1844 }
1845
1846 context_set_domain_id(context, id);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001847
Yu Zhao93a23a72009-05-18 13:51:37 +08001848 if (translation != CONTEXT_TT_PASS_THROUGH) {
David Woodhouse64ae8922014-03-09 12:52:30 -07001849 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
Yu Zhao93a23a72009-05-18 13:51:37 +08001850 translation = info ? CONTEXT_TT_DEV_IOTLB :
1851 CONTEXT_TT_MULTI_LEVEL;
1852 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001853 /*
1854 * In pass through mode, AW must be programmed to indicate the largest
1855 * AGAW value supported by hardware. And ASR is ignored by hardware.
1856 */
Yu Zhao93a23a72009-05-18 13:51:37 +08001857 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001858 context_set_address_width(context, iommu->msagaw);
Yu Zhao93a23a72009-05-18 13:51:37 +08001859 else {
1860 context_set_address_root(context, virt_to_phys(pgd));
1861 context_set_address_width(context, iommu->agaw);
1862 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001863
1864 context_set_translation_type(context, translation);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001865 context_set_fault_enable(context);
1866 context_set_present(context);
Weidong Han5331fe62008-12-08 23:00:00 +08001867 domain_flush_cache(domain, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001868
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001869 /*
1870 * It's a non-present to present mapping. If hardware doesn't cache
1871 * non-present entry we only need to flush the write-buffer. If the
1872 * _does_ cache non-present entries, then it does so in the special
1873 * domain #0, which we have to flush:
1874 */
1875 if (cap_caching_mode(iommu->cap)) {
1876 iommu->flush.flush_context(iommu, 0,
1877 (((u16)bus) << 8) | devfn,
1878 DMA_CCMD_MASK_NOBIT,
1879 DMA_CCMD_DEVICE_INVL);
Jiang Liu18fd7792014-07-11 14:19:26 +08001880 iommu->flush.flush_iotlb(iommu, id, 0, 0, DMA_TLB_DSI_FLUSH);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001881 } else {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001882 iommu_flush_write_buffer(iommu);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001883 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001884 iommu_enable_dev_iotlb(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001885 spin_unlock_irqrestore(&iommu->lock, flags);
Weidong Hanc7151a82008-12-08 22:51:37 +08001886
Jiang Liufb170fb2014-07-11 14:19:28 +08001887 domain_attach_iommu(domain, iommu);
1888
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001889 return 0;
1890}
1891
Alex Williamson579305f2014-07-03 09:51:43 -06001892struct domain_context_mapping_data {
1893 struct dmar_domain *domain;
1894 struct intel_iommu *iommu;
1895 int translation;
1896};
1897
1898static int domain_context_mapping_cb(struct pci_dev *pdev,
1899 u16 alias, void *opaque)
1900{
1901 struct domain_context_mapping_data *data = opaque;
1902
1903 return domain_context_mapping_one(data->domain, data->iommu,
1904 PCI_BUS_NUM(alias), alias & 0xff,
1905 data->translation);
1906}
1907
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001908static int
David Woodhousee1f167f2014-03-09 15:24:46 -07001909domain_context_mapping(struct dmar_domain *domain, struct device *dev,
1910 int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001911{
David Woodhouse64ae8922014-03-09 12:52:30 -07001912 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07001913 u8 bus, devfn;
Alex Williamson579305f2014-07-03 09:51:43 -06001914 struct domain_context_mapping_data data;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001915
David Woodhousee1f167f2014-03-09 15:24:46 -07001916 iommu = device_to_iommu(dev, &bus, &devfn);
David Woodhouse64ae8922014-03-09 12:52:30 -07001917 if (!iommu)
1918 return -ENODEV;
1919
Alex Williamson579305f2014-07-03 09:51:43 -06001920 if (!dev_is_pci(dev))
1921 return domain_context_mapping_one(domain, iommu, bus, devfn,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001922 translation);
Alex Williamson579305f2014-07-03 09:51:43 -06001923
1924 data.domain = domain;
1925 data.iommu = iommu;
1926 data.translation = translation;
1927
1928 return pci_for_each_dma_alias(to_pci_dev(dev),
1929 &domain_context_mapping_cb, &data);
1930}
1931
1932static int domain_context_mapped_cb(struct pci_dev *pdev,
1933 u16 alias, void *opaque)
1934{
1935 struct intel_iommu *iommu = opaque;
1936
1937 return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001938}
1939
David Woodhousee1f167f2014-03-09 15:24:46 -07001940static int domain_context_mapped(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001941{
Weidong Han5331fe62008-12-08 23:00:00 +08001942 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07001943 u8 bus, devfn;
Weidong Han5331fe62008-12-08 23:00:00 +08001944
David Woodhousee1f167f2014-03-09 15:24:46 -07001945 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001946 if (!iommu)
1947 return -ENODEV;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001948
Alex Williamson579305f2014-07-03 09:51:43 -06001949 if (!dev_is_pci(dev))
1950 return device_context_mapped(iommu, bus, devfn);
David Woodhousee1f167f2014-03-09 15:24:46 -07001951
Alex Williamson579305f2014-07-03 09:51:43 -06001952 return !pci_for_each_dma_alias(to_pci_dev(dev),
1953 domain_context_mapped_cb, iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001954}
1955
Fenghua Yuf5329592009-08-04 15:09:37 -07001956/* Returns a number of VTD pages, but aligned to MM page size */
1957static inline unsigned long aligned_nrpages(unsigned long host_addr,
1958 size_t size)
1959{
1960 host_addr &= ~PAGE_MASK;
1961 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1962}
1963
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001964/* Return largest possible superpage level for a given mapping */
1965static inline int hardware_largepage_caps(struct dmar_domain *domain,
1966 unsigned long iov_pfn,
1967 unsigned long phy_pfn,
1968 unsigned long pages)
1969{
1970 int support, level = 1;
1971 unsigned long pfnmerge;
1972
1973 support = domain->iommu_superpage;
1974
1975 /* To use a large page, the virtual *and* physical addresses
1976 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
1977 of them will mean we have to use smaller pages. So just
1978 merge them and check both at once. */
1979 pfnmerge = iov_pfn | phy_pfn;
1980
1981 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
1982 pages >>= VTD_STRIDE_SHIFT;
1983 if (!pages)
1984 break;
1985 pfnmerge >>= VTD_STRIDE_SHIFT;
1986 level++;
1987 support--;
1988 }
1989 return level;
1990}
1991
David Woodhouse9051aa02009-06-29 12:30:54 +01001992static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1993 struct scatterlist *sg, unsigned long phys_pfn,
1994 unsigned long nr_pages, int prot)
David Woodhousee1605492009-06-29 11:17:38 +01001995{
1996 struct dma_pte *first_pte = NULL, *pte = NULL;
David Woodhouse9051aa02009-06-29 12:30:54 +01001997 phys_addr_t uninitialized_var(pteval);
Jiang Liucc4f14a2014-11-26 09:42:10 +08001998 unsigned long sg_res = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001999 unsigned int largepage_lvl = 0;
2000 unsigned long lvl_pages = 0;
David Woodhousee1605492009-06-29 11:17:38 +01002001
Jiang Liu162d1b12014-07-11 14:19:35 +08002002 BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));
David Woodhousee1605492009-06-29 11:17:38 +01002003
2004 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
2005 return -EINVAL;
2006
2007 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
2008
Jiang Liucc4f14a2014-11-26 09:42:10 +08002009 if (!sg) {
2010 sg_res = nr_pages;
David Woodhouse9051aa02009-06-29 12:30:54 +01002011 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
2012 }
2013
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002014 while (nr_pages > 0) {
David Woodhousec85994e2009-07-01 19:21:24 +01002015 uint64_t tmp;
2016
David Woodhousee1605492009-06-29 11:17:38 +01002017 if (!sg_res) {
Fenghua Yuf5329592009-08-04 15:09:37 -07002018 sg_res = aligned_nrpages(sg->offset, sg->length);
David Woodhousee1605492009-06-29 11:17:38 +01002019 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
2020 sg->dma_length = sg->length;
2021 pteval = page_to_phys(sg_page(sg)) | prot;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002022 phys_pfn = pteval >> VTD_PAGE_SHIFT;
David Woodhousee1605492009-06-29 11:17:38 +01002023 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002024
David Woodhousee1605492009-06-29 11:17:38 +01002025 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002026 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
2027
David Woodhouse5cf0a762014-03-19 16:07:49 +00002028 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
David Woodhousee1605492009-06-29 11:17:38 +01002029 if (!pte)
2030 return -ENOMEM;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002031 /* It is large page*/
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002032 if (largepage_lvl > 1) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002033 pteval |= DMA_PTE_LARGE_PAGE;
Jiang Liud41a4ad2014-07-11 14:19:34 +08002034 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2035 /*
2036 * Ensure that old small page tables are
2037 * removed to make room for superpage,
2038 * if they exist.
2039 */
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002040 dma_pte_free_pagetable(domain, iov_pfn,
Jiang Liud41a4ad2014-07-11 14:19:34 +08002041 iov_pfn + lvl_pages - 1);
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002042 } else {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002043 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002044 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002045
David Woodhousee1605492009-06-29 11:17:38 +01002046 }
2047 /* We don't need lock here, nobody else
2048 * touches the iova range
2049 */
David Woodhouse7766a3f2009-07-01 20:27:03 +01002050 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
David Woodhousec85994e2009-07-01 19:21:24 +01002051 if (tmp) {
David Woodhouse1bf20f02009-06-29 22:06:43 +01002052 static int dumps = 5;
David Woodhousec85994e2009-07-01 19:21:24 +01002053 printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
2054 iov_pfn, tmp, (unsigned long long)pteval);
David Woodhouse1bf20f02009-06-29 22:06:43 +01002055 if (dumps) {
2056 dumps--;
2057 debug_dma_dump_mappings(NULL);
2058 }
2059 WARN_ON(1);
2060 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002061
2062 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2063
2064 BUG_ON(nr_pages < lvl_pages);
2065 BUG_ON(sg_res < lvl_pages);
2066
2067 nr_pages -= lvl_pages;
2068 iov_pfn += lvl_pages;
2069 phys_pfn += lvl_pages;
2070 pteval += lvl_pages * VTD_PAGE_SIZE;
2071 sg_res -= lvl_pages;
2072
2073 /* If the next PTE would be the first in a new page, then we
2074 need to flush the cache on the entries we've just written.
2075 And then we'll need to recalculate 'pte', so clear it and
2076 let it get set again in the if (!pte) block above.
2077
2078 If we're done (!nr_pages) we need to flush the cache too.
2079
2080 Also if we've been setting superpages, we may need to
2081 recalculate 'pte' and switch back to smaller pages for the
2082 end of the mapping, if the trailing size is not enough to
2083 use another superpage (i.e. sg_res < lvl_pages). */
David Woodhousee1605492009-06-29 11:17:38 +01002084 pte++;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002085 if (!nr_pages || first_pte_in_page(pte) ||
2086 (largepage_lvl > 1 && sg_res < lvl_pages)) {
David Woodhousee1605492009-06-29 11:17:38 +01002087 domain_flush_cache(domain, first_pte,
2088 (void *)pte - (void *)first_pte);
2089 pte = NULL;
2090 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002091
2092 if (!sg_res && nr_pages)
David Woodhousee1605492009-06-29 11:17:38 +01002093 sg = sg_next(sg);
2094 }
2095 return 0;
2096}
2097
David Woodhouse9051aa02009-06-29 12:30:54 +01002098static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2099 struct scatterlist *sg, unsigned long nr_pages,
2100 int prot)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002101{
David Woodhouse9051aa02009-06-29 12:30:54 +01002102 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
2103}
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002104
David Woodhouse9051aa02009-06-29 12:30:54 +01002105static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2106 unsigned long phys_pfn, unsigned long nr_pages,
2107 int prot)
2108{
2109 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002110}
2111
Weidong Hanc7151a82008-12-08 22:51:37 +08002112static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002113{
Weidong Hanc7151a82008-12-08 22:51:37 +08002114 if (!iommu)
2115 return;
Weidong Han8c11e792008-12-08 15:29:22 +08002116
2117 clear_context_table(iommu, bus, devfn);
2118 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002119 DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002120 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002121}
2122
David Woodhouse109b9b02012-05-25 17:43:02 +01002123static inline void unlink_domain_info(struct device_domain_info *info)
2124{
2125 assert_spin_locked(&device_domain_lock);
2126 list_del(&info->link);
2127 list_del(&info->global);
2128 if (info->dev)
David Woodhouse0bcb3e22014-03-06 17:12:03 +00002129 info->dev->archdata.iommu = NULL;
David Woodhouse109b9b02012-05-25 17:43:02 +01002130}
2131
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002132static void domain_remove_dev_info(struct dmar_domain *domain)
2133{
Yijing Wang3a74ca02014-05-20 20:37:47 +08002134 struct device_domain_info *info, *tmp;
Jiang Liufb170fb2014-07-11 14:19:28 +08002135 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002136
2137 spin_lock_irqsave(&device_domain_lock, flags);
Yijing Wang3a74ca02014-05-20 20:37:47 +08002138 list_for_each_entry_safe(info, tmp, &domain->devices, link) {
David Woodhouse109b9b02012-05-25 17:43:02 +01002139 unlink_domain_info(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002140 spin_unlock_irqrestore(&device_domain_lock, flags);
2141
Yu Zhao93a23a72009-05-18 13:51:37 +08002142 iommu_disable_dev_iotlb(info);
David Woodhouse7c7faa12014-03-09 13:33:06 -07002143 iommu_detach_dev(info->iommu, info->bus, info->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002144
Jiang Liuab8dfe22014-07-11 14:19:27 +08002145 if (domain_type_is_vm(domain)) {
David Woodhouse7c7faa12014-03-09 13:33:06 -07002146 iommu_detach_dependent_devices(info->iommu, info->dev);
Jiang Liufb170fb2014-07-11 14:19:28 +08002147 domain_detach_iommu(domain, info->iommu);
Jiang Liu92d03cc2014-02-19 14:07:28 +08002148 }
2149
2150 free_devinfo_mem(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002151 spin_lock_irqsave(&device_domain_lock, flags);
2152 }
2153 spin_unlock_irqrestore(&device_domain_lock, flags);
2154}
2155
2156/*
2157 * find_domain
David Woodhouse1525a292014-03-06 16:19:30 +00002158 * Note: we use struct device->archdata.iommu stores the info
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002159 */
David Woodhouse1525a292014-03-06 16:19:30 +00002160static struct dmar_domain *find_domain(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002161{
2162 struct device_domain_info *info;
2163
2164 /* No lock here, assumes no domain exit in normal case */
David Woodhouse1525a292014-03-06 16:19:30 +00002165 info = dev->archdata.iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002166 if (info)
2167 return info->domain;
2168 return NULL;
2169}
2170
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002171static inline struct device_domain_info *
Jiang Liu745f2582014-02-19 14:07:26 +08002172dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
2173{
2174 struct device_domain_info *info;
2175
2176 list_for_each_entry(info, &device_domain_list, global)
David Woodhouse41e80dca2014-03-09 13:55:54 -07002177 if (info->iommu->segment == segment && info->bus == bus &&
Jiang Liu745f2582014-02-19 14:07:26 +08002178 info->devfn == devfn)
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002179 return info;
Jiang Liu745f2582014-02-19 14:07:26 +08002180
2181 return NULL;
2182}
2183
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002184static struct dmar_domain *dmar_insert_dev_info(struct intel_iommu *iommu,
David Woodhouse41e80dca2014-03-09 13:55:54 -07002185 int bus, int devfn,
David Woodhouseb718cd32014-03-09 13:11:33 -07002186 struct device *dev,
2187 struct dmar_domain *domain)
Jiang Liu745f2582014-02-19 14:07:26 +08002188{
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002189 struct dmar_domain *found = NULL;
Jiang Liu745f2582014-02-19 14:07:26 +08002190 struct device_domain_info *info;
2191 unsigned long flags;
2192
2193 info = alloc_devinfo_mem();
2194 if (!info)
David Woodhouseb718cd32014-03-09 13:11:33 -07002195 return NULL;
Jiang Liu745f2582014-02-19 14:07:26 +08002196
Jiang Liu745f2582014-02-19 14:07:26 +08002197 info->bus = bus;
2198 info->devfn = devfn;
2199 info->dev = dev;
2200 info->domain = domain;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002201 info->iommu = iommu;
Jiang Liu745f2582014-02-19 14:07:26 +08002202
2203 spin_lock_irqsave(&device_domain_lock, flags);
2204 if (dev)
David Woodhouse0bcb3e22014-03-06 17:12:03 +00002205 found = find_domain(dev);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002206 else {
2207 struct device_domain_info *info2;
David Woodhouse41e80dca2014-03-09 13:55:54 -07002208 info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002209 if (info2)
2210 found = info2->domain;
2211 }
Jiang Liu745f2582014-02-19 14:07:26 +08002212 if (found) {
2213 spin_unlock_irqrestore(&device_domain_lock, flags);
2214 free_devinfo_mem(info);
David Woodhouseb718cd32014-03-09 13:11:33 -07002215 /* Caller must free the original domain */
2216 return found;
Jiang Liu745f2582014-02-19 14:07:26 +08002217 }
2218
David Woodhouseb718cd32014-03-09 13:11:33 -07002219 list_add(&info->link, &domain->devices);
2220 list_add(&info->global, &device_domain_list);
2221 if (dev)
2222 dev->archdata.iommu = info;
2223 spin_unlock_irqrestore(&device_domain_lock, flags);
2224
2225 return domain;
Jiang Liu745f2582014-02-19 14:07:26 +08002226}
2227
Alex Williamson579305f2014-07-03 09:51:43 -06002228static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
2229{
2230 *(u16 *)opaque = alias;
2231 return 0;
2232}
2233
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002234/* domain is initialized */
David Woodhouse146922e2014-03-09 15:44:17 -07002235static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002236{
Alex Williamson579305f2014-07-03 09:51:43 -06002237 struct dmar_domain *domain, *tmp;
2238 struct intel_iommu *iommu;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002239 struct device_domain_info *info;
Alex Williamson579305f2014-07-03 09:51:43 -06002240 u16 dma_alias;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002241 unsigned long flags;
Yijing Wangaa4d0662014-05-26 20:14:06 +08002242 u8 bus, devfn;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002243
David Woodhouse146922e2014-03-09 15:44:17 -07002244 domain = find_domain(dev);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002245 if (domain)
2246 return domain;
2247
David Woodhouse146922e2014-03-09 15:44:17 -07002248 iommu = device_to_iommu(dev, &bus, &devfn);
2249 if (!iommu)
Alex Williamson579305f2014-07-03 09:51:43 -06002250 return NULL;
2251
2252 if (dev_is_pci(dev)) {
2253 struct pci_dev *pdev = to_pci_dev(dev);
2254
2255 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2256
2257 spin_lock_irqsave(&device_domain_lock, flags);
2258 info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus),
2259 PCI_BUS_NUM(dma_alias),
2260 dma_alias & 0xff);
2261 if (info) {
2262 iommu = info->iommu;
2263 domain = info->domain;
2264 }
2265 spin_unlock_irqrestore(&device_domain_lock, flags);
2266
2267 /* DMA alias already has a domain, uses it */
2268 if (info)
2269 goto found_domain;
2270 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002271
David Woodhouse146922e2014-03-09 15:44:17 -07002272 /* Allocate and initialize new domain for the device */
Jiang Liuab8dfe22014-07-11 14:19:27 +08002273 domain = alloc_domain(0);
Jiang Liu745f2582014-02-19 14:07:26 +08002274 if (!domain)
Alex Williamson579305f2014-07-03 09:51:43 -06002275 return NULL;
Jiang Liu44bde612014-07-11 14:19:29 +08002276 domain->id = iommu_attach_domain(domain, iommu);
2277 if (domain->id < 0) {
Alex Williamson2fe9723d2011-03-04 14:52:30 -07002278 free_domain_mem(domain);
Alex Williamson579305f2014-07-03 09:51:43 -06002279 return NULL;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002280 }
Jiang Liufb170fb2014-07-11 14:19:28 +08002281 domain_attach_iommu(domain, iommu);
Alex Williamson579305f2014-07-03 09:51:43 -06002282 if (domain_init(domain, gaw)) {
2283 domain_exit(domain);
2284 return NULL;
2285 }
2286
2287 /* register PCI DMA alias device */
2288 if (dev_is_pci(dev)) {
2289 tmp = dmar_insert_dev_info(iommu, PCI_BUS_NUM(dma_alias),
2290 dma_alias & 0xff, NULL, domain);
2291
2292 if (!tmp || tmp != domain) {
2293 domain_exit(domain);
2294 domain = tmp;
2295 }
2296
David Woodhouseb718cd32014-03-09 13:11:33 -07002297 if (!domain)
Alex Williamson579305f2014-07-03 09:51:43 -06002298 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002299 }
2300
2301found_domain:
Alex Williamson579305f2014-07-03 09:51:43 -06002302 tmp = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
2303
2304 if (!tmp || tmp != domain) {
2305 domain_exit(domain);
2306 domain = tmp;
2307 }
David Woodhouseb718cd32014-03-09 13:11:33 -07002308
2309 return domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002310}
2311
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002312static int iommu_identity_mapping;
David Woodhousee0fc7e0b2009-09-30 09:12:17 -07002313#define IDENTMAP_ALL 1
2314#define IDENTMAP_GFX 2
2315#define IDENTMAP_AZALIA 4
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002316
David Woodhouseb2132032009-06-26 18:50:28 +01002317static int iommu_domain_identity_map(struct dmar_domain *domain,
2318 unsigned long long start,
2319 unsigned long long end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002320{
David Woodhousec5395d52009-06-28 16:35:56 +01002321 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2322 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002323
David Woodhousec5395d52009-06-28 16:35:56 +01002324 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2325 dma_to_mm_pfn(last_vpfn))) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002326 printk(KERN_ERR "IOMMU: reserve iova failed\n");
David Woodhouseb2132032009-06-26 18:50:28 +01002327 return -ENOMEM;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002328 }
2329
David Woodhousec5395d52009-06-28 16:35:56 +01002330 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
2331 start, end, domain->id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002332 /*
2333 * RMRR range might have overlap with physical memory range,
2334 * clear it first
2335 */
David Woodhousec5395d52009-06-28 16:35:56 +01002336 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002337
David Woodhousec5395d52009-06-28 16:35:56 +01002338 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2339 last_vpfn - first_vpfn + 1,
David Woodhouse61df7442009-06-28 11:55:58 +01002340 DMA_PTE_READ|DMA_PTE_WRITE);
David Woodhouseb2132032009-06-26 18:50:28 +01002341}
2342
David Woodhouse0b9d9752014-03-09 15:48:15 -07002343static int iommu_prepare_identity_map(struct device *dev,
David Woodhouseb2132032009-06-26 18:50:28 +01002344 unsigned long long start,
2345 unsigned long long end)
2346{
2347 struct dmar_domain *domain;
2348 int ret;
2349
David Woodhouse0b9d9752014-03-09 15:48:15 -07002350 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
David Woodhouseb2132032009-06-26 18:50:28 +01002351 if (!domain)
2352 return -ENOMEM;
2353
David Woodhouse19943b02009-08-04 16:19:20 +01002354 /* For _hardware_ passthrough, don't bother. But for software
2355 passthrough, we do it anyway -- it may indicate a memory
2356 range which is reserved in E820, so which didn't get set
2357 up to start with in si_domain */
2358 if (domain == si_domain && hw_pass_through) {
2359 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
David Woodhouse0b9d9752014-03-09 15:48:15 -07002360 dev_name(dev), start, end);
David Woodhouse19943b02009-08-04 16:19:20 +01002361 return 0;
2362 }
2363
2364 printk(KERN_INFO
2365 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
David Woodhouse0b9d9752014-03-09 15:48:15 -07002366 dev_name(dev), start, end);
David Woodhouse2ff729f2009-08-26 14:25:41 +01002367
David Woodhouse5595b522009-12-02 09:21:55 +00002368 if (end < start) {
2369 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2370 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2371 dmi_get_system_info(DMI_BIOS_VENDOR),
2372 dmi_get_system_info(DMI_BIOS_VERSION),
2373 dmi_get_system_info(DMI_PRODUCT_VERSION));
2374 ret = -EIO;
2375 goto error;
2376 }
2377
David Woodhouse2ff729f2009-08-26 14:25:41 +01002378 if (end >> agaw_to_width(domain->agaw)) {
2379 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2380 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2381 agaw_to_width(domain->agaw),
2382 dmi_get_system_info(DMI_BIOS_VENDOR),
2383 dmi_get_system_info(DMI_BIOS_VERSION),
2384 dmi_get_system_info(DMI_PRODUCT_VERSION));
2385 ret = -EIO;
2386 goto error;
2387 }
David Woodhouse19943b02009-08-04 16:19:20 +01002388
David Woodhouseb2132032009-06-26 18:50:28 +01002389 ret = iommu_domain_identity_map(domain, start, end);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002390 if (ret)
2391 goto error;
2392
2393 /* context entry init */
David Woodhouse0b9d9752014-03-09 15:48:15 -07002394 ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
David Woodhouseb2132032009-06-26 18:50:28 +01002395 if (ret)
2396 goto error;
2397
2398 return 0;
2399
2400 error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002401 domain_exit(domain);
2402 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002403}
2404
2405static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
David Woodhouse0b9d9752014-03-09 15:48:15 -07002406 struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002407{
David Woodhouse0b9d9752014-03-09 15:48:15 -07002408 if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002409 return 0;
David Woodhouse0b9d9752014-03-09 15:48:15 -07002410 return iommu_prepare_identity_map(dev, rmrr->base_address,
2411 rmrr->end_address);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002412}
2413
Suresh Siddhad3f13812011-08-23 17:05:25 -07002414#ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002415static inline void iommu_prepare_isa(void)
2416{
2417 struct pci_dev *pdev;
2418 int ret;
2419
2420 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2421 if (!pdev)
2422 return;
2423
David Woodhousec7ab48d2009-06-26 19:10:36 +01002424 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
David Woodhouse0b9d9752014-03-09 15:48:15 -07002425 ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002426
2427 if (ret)
David Woodhousec7ab48d2009-06-26 19:10:36 +01002428 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
2429 "floppy might not work\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002430
Yijing Wang9b27e822014-05-20 20:37:52 +08002431 pci_dev_put(pdev);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002432}
2433#else
2434static inline void iommu_prepare_isa(void)
2435{
2436 return;
2437}
Suresh Siddhad3f13812011-08-23 17:05:25 -07002438#endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002439
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002440static int md_domain_init(struct dmar_domain *domain, int guest_width);
David Woodhousec7ab48d2009-06-26 19:10:36 +01002441
Matt Kraai071e1372009-08-23 22:30:22 -07002442static int __init si_domain_init(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002443{
2444 struct dmar_drhd_unit *drhd;
2445 struct intel_iommu *iommu;
David Woodhousec7ab48d2009-06-26 19:10:36 +01002446 int nid, ret = 0;
Jiang Liu44bde612014-07-11 14:19:29 +08002447 bool first = true;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002448
Jiang Liuab8dfe22014-07-11 14:19:27 +08002449 si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002450 if (!si_domain)
2451 return -EFAULT;
2452
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002453 for_each_active_iommu(iommu, drhd) {
2454 ret = iommu_attach_domain(si_domain, iommu);
Jiang Liufb170fb2014-07-11 14:19:28 +08002455 if (ret < 0) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002456 domain_exit(si_domain);
2457 return -EFAULT;
Jiang Liu44bde612014-07-11 14:19:29 +08002458 } else if (first) {
2459 si_domain->id = ret;
2460 first = false;
2461 } else if (si_domain->id != ret) {
2462 domain_exit(si_domain);
2463 return -EFAULT;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002464 }
Jiang Liufb170fb2014-07-11 14:19:28 +08002465 domain_attach_iommu(si_domain, iommu);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002466 }
2467
2468 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2469 domain_exit(si_domain);
2470 return -EFAULT;
2471 }
2472
Jiang Liu9544c002014-01-06 14:18:13 +08002473 pr_debug("IOMMU: identity mapping domain is domain %d\n",
2474 si_domain->id);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002475
David Woodhouse19943b02009-08-04 16:19:20 +01002476 if (hw)
2477 return 0;
2478
David Woodhousec7ab48d2009-06-26 19:10:36 +01002479 for_each_online_node(nid) {
Tejun Heod4bbf7e2011-11-28 09:46:22 -08002480 unsigned long start_pfn, end_pfn;
2481 int i;
2482
2483 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2484 ret = iommu_domain_identity_map(si_domain,
2485 PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2486 if (ret)
2487 return ret;
2488 }
David Woodhousec7ab48d2009-06-26 19:10:36 +01002489 }
2490
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002491 return 0;
2492}
2493
David Woodhouse9b226622014-03-09 14:03:28 -07002494static int identity_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002495{
2496 struct device_domain_info *info;
2497
2498 if (likely(!iommu_identity_mapping))
2499 return 0;
2500
David Woodhouse9b226622014-03-09 14:03:28 -07002501 info = dev->archdata.iommu;
Mike Traviscb452a42011-05-28 13:15:03 -05002502 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2503 return (info->domain == si_domain);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002504
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002505 return 0;
2506}
2507
2508static int domain_add_dev_info(struct dmar_domain *domain,
David Woodhouse5913c9b2014-03-09 16:27:31 -07002509 struct device *dev, int translation)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002510{
David Woodhouse0ac72662014-03-09 13:19:22 -07002511 struct dmar_domain *ndomain;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002512 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07002513 u8 bus, devfn;
David Woodhouse5fe60f42009-08-09 10:53:41 +01002514 int ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002515
David Woodhouse5913c9b2014-03-09 16:27:31 -07002516 iommu = device_to_iommu(dev, &bus, &devfn);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002517 if (!iommu)
2518 return -ENODEV;
2519
David Woodhouse5913c9b2014-03-09 16:27:31 -07002520 ndomain = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
David Woodhouse0ac72662014-03-09 13:19:22 -07002521 if (ndomain != domain)
2522 return -EBUSY;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002523
David Woodhouse5913c9b2014-03-09 16:27:31 -07002524 ret = domain_context_mapping(domain, dev, translation);
David Woodhousee2ad23d2012-05-25 17:42:54 +01002525 if (ret) {
David Woodhouse5913c9b2014-03-09 16:27:31 -07002526 domain_remove_one_dev_info(domain, dev);
David Woodhousee2ad23d2012-05-25 17:42:54 +01002527 return ret;
2528 }
2529
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002530 return 0;
2531}
2532
David Woodhouse0b9d9752014-03-09 15:48:15 -07002533static bool device_has_rmrr(struct device *dev)
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002534{
2535 struct dmar_rmrr_unit *rmrr;
David Woodhouse832bd852014-03-07 15:08:36 +00002536 struct device *tmp;
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002537 int i;
2538
Jiang Liu0e242612014-02-19 14:07:34 +08002539 rcu_read_lock();
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002540 for_each_rmrr_units(rmrr) {
Jiang Liub683b232014-02-19 14:07:32 +08002541 /*
2542 * Return TRUE if this RMRR contains the device that
2543 * is passed in.
2544 */
2545 for_each_active_dev_scope(rmrr->devices,
2546 rmrr->devices_cnt, i, tmp)
David Woodhouse0b9d9752014-03-09 15:48:15 -07002547 if (tmp == dev) {
Jiang Liu0e242612014-02-19 14:07:34 +08002548 rcu_read_unlock();
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002549 return true;
Jiang Liub683b232014-02-19 14:07:32 +08002550 }
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002551 }
Jiang Liu0e242612014-02-19 14:07:34 +08002552 rcu_read_unlock();
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002553 return false;
2554}
2555
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002556/*
2557 * There are a couple cases where we need to restrict the functionality of
2558 * devices associated with RMRRs. The first is when evaluating a device for
2559 * identity mapping because problems exist when devices are moved in and out
2560 * of domains and their respective RMRR information is lost. This means that
2561 * a device with associated RMRRs will never be in a "passthrough" domain.
2562 * The second is use of the device through the IOMMU API. This interface
2563 * expects to have full control of the IOVA space for the device. We cannot
2564 * satisfy both the requirement that RMRR access is maintained and have an
2565 * unencumbered IOVA space. We also have no ability to quiesce the device's
2566 * use of the RMRR space or even inform the IOMMU API user of the restriction.
2567 * We therefore prevent devices associated with an RMRR from participating in
2568 * the IOMMU API, which eliminates them from device assignment.
2569 *
2570 * In both cases we assume that PCI USB devices with RMRRs have them largely
2571 * for historical reasons and that the RMRR space is not actively used post
2572 * boot. This exclusion may change if vendors begin to abuse it.
David Woodhouse18436af2015-03-25 15:05:47 +00002573 *
2574 * The same exception is made for graphics devices, with the requirement that
2575 * any use of the RMRR regions will be torn down before assigning the device
2576 * to a guest.
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002577 */
2578static bool device_is_rmrr_locked(struct device *dev)
2579{
2580 if (!device_has_rmrr(dev))
2581 return false;
2582
2583 if (dev_is_pci(dev)) {
2584 struct pci_dev *pdev = to_pci_dev(dev);
2585
David Woodhouse18436af2015-03-25 15:05:47 +00002586 if (IS_USB_DEVICE(pdev) || IS_GFX_DEVICE(pdev))
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002587 return false;
2588 }
2589
2590 return true;
2591}
2592
David Woodhouse3bdb2592014-03-09 16:03:08 -07002593static int iommu_should_identity_map(struct device *dev, int startup)
David Woodhouse6941af22009-07-04 18:24:27 +01002594{
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002595
David Woodhouse3bdb2592014-03-09 16:03:08 -07002596 if (dev_is_pci(dev)) {
2597 struct pci_dev *pdev = to_pci_dev(dev);
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002598
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002599 if (device_is_rmrr_locked(dev))
David Woodhouse3bdb2592014-03-09 16:03:08 -07002600 return 0;
David Woodhousee0fc7e0b2009-09-30 09:12:17 -07002601
David Woodhouse3bdb2592014-03-09 16:03:08 -07002602 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2603 return 1;
David Woodhousee0fc7e0b2009-09-30 09:12:17 -07002604
David Woodhouse3bdb2592014-03-09 16:03:08 -07002605 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2606 return 1;
2607
2608 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2609 return 0;
2610
2611 /*
2612 * We want to start off with all devices in the 1:1 domain, and
2613 * take them out later if we find they can't access all of memory.
2614 *
2615 * However, we can't do this for PCI devices behind bridges,
2616 * because all PCI devices behind the same bridge will end up
2617 * with the same source-id on their transactions.
2618 *
2619 * Practically speaking, we can't change things around for these
2620 * devices at run-time, because we can't be sure there'll be no
2621 * DMA transactions in flight for any of their siblings.
2622 *
2623 * So PCI devices (unless they're on the root bus) as well as
2624 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2625 * the 1:1 domain, just in _case_ one of their siblings turns out
2626 * not to be able to map all of memory.
2627 */
2628 if (!pci_is_pcie(pdev)) {
2629 if (!pci_is_root_bus(pdev->bus))
2630 return 0;
2631 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2632 return 0;
2633 } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
2634 return 0;
2635 } else {
2636 if (device_has_rmrr(dev))
2637 return 0;
2638 }
David Woodhouse6941af22009-07-04 18:24:27 +01002639
David Woodhouse3dfc8132009-07-04 19:11:08 +01002640 /*
David Woodhouse3dfc8132009-07-04 19:11:08 +01002641 * At boot time, we don't yet know if devices will be 64-bit capable.
David Woodhouse3bdb2592014-03-09 16:03:08 -07002642 * Assume that they will — if they turn out not to be, then we can
David Woodhouse3dfc8132009-07-04 19:11:08 +01002643 * take them out of the 1:1 domain later.
2644 */
Chris Wright8fcc5372011-05-28 13:15:02 -05002645 if (!startup) {
2646 /*
2647 * If the device's dma_mask is less than the system's memory
2648 * size then this is not a candidate for identity mapping.
2649 */
David Woodhouse3bdb2592014-03-09 16:03:08 -07002650 u64 dma_mask = *dev->dma_mask;
Chris Wright8fcc5372011-05-28 13:15:02 -05002651
David Woodhouse3bdb2592014-03-09 16:03:08 -07002652 if (dev->coherent_dma_mask &&
2653 dev->coherent_dma_mask < dma_mask)
2654 dma_mask = dev->coherent_dma_mask;
Chris Wright8fcc5372011-05-28 13:15:02 -05002655
David Woodhouse3bdb2592014-03-09 16:03:08 -07002656 return dma_mask >= dma_get_required_mask(dev);
Chris Wright8fcc5372011-05-28 13:15:02 -05002657 }
David Woodhouse6941af22009-07-04 18:24:27 +01002658
2659 return 1;
2660}
2661
David Woodhousecf04eee2014-03-21 16:49:04 +00002662static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw)
2663{
2664 int ret;
2665
2666 if (!iommu_should_identity_map(dev, 1))
2667 return 0;
2668
2669 ret = domain_add_dev_info(si_domain, dev,
2670 hw ? CONTEXT_TT_PASS_THROUGH :
2671 CONTEXT_TT_MULTI_LEVEL);
2672 if (!ret)
2673 pr_info("IOMMU: %s identity mapping for device %s\n",
2674 hw ? "hardware" : "software", dev_name(dev));
2675 else if (ret == -ENODEV)
2676 /* device not associated with an iommu */
2677 ret = 0;
2678
2679 return ret;
2680}
2681
2682
Matt Kraai071e1372009-08-23 22:30:22 -07002683static int __init iommu_prepare_static_identity_mapping(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002684{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002685 struct pci_dev *pdev = NULL;
David Woodhousecf04eee2014-03-21 16:49:04 +00002686 struct dmar_drhd_unit *drhd;
2687 struct intel_iommu *iommu;
2688 struct device *dev;
2689 int i;
2690 int ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002691
David Woodhouse19943b02009-08-04 16:19:20 +01002692 ret = si_domain_init(hw);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002693 if (ret)
2694 return -EFAULT;
2695
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002696 for_each_pci_dev(pdev) {
David Woodhousecf04eee2014-03-21 16:49:04 +00002697 ret = dev_prepare_static_identity_mapping(&pdev->dev, hw);
2698 if (ret)
2699 return ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002700 }
2701
David Woodhousecf04eee2014-03-21 16:49:04 +00002702 for_each_active_iommu(iommu, drhd)
2703 for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) {
2704 struct acpi_device_physical_node *pn;
2705 struct acpi_device *adev;
2706
2707 if (dev->bus != &acpi_bus_type)
2708 continue;
2709
2710 adev= to_acpi_device(dev);
2711 mutex_lock(&adev->physical_node_lock);
2712 list_for_each_entry(pn, &adev->physical_node_list, node) {
2713 ret = dev_prepare_static_identity_mapping(pn->dev, hw);
2714 if (ret)
2715 break;
2716 }
2717 mutex_unlock(&adev->physical_node_lock);
2718 if (ret)
2719 return ret;
2720 }
2721
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002722 return 0;
2723}
2724
Jiang Liuffebeb42014-11-09 22:48:02 +08002725static void intel_iommu_init_qi(struct intel_iommu *iommu)
2726{
2727 /*
2728 * Start from the sane iommu hardware state.
2729 * If the queued invalidation is already initialized by us
2730 * (for example, while enabling interrupt-remapping) then
2731 * we got the things already rolling from a sane state.
2732 */
2733 if (!iommu->qi) {
2734 /*
2735 * Clear any previous faults.
2736 */
2737 dmar_fault(-1, iommu);
2738 /*
2739 * Disable queued invalidation if supported and already enabled
2740 * before OS handover.
2741 */
2742 dmar_disable_qi(iommu);
2743 }
2744
2745 if (dmar_enable_qi(iommu)) {
2746 /*
2747 * Queued Invalidate not enabled, use Register Based Invalidate
2748 */
2749 iommu->flush.flush_context = __iommu_flush_context;
2750 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2751 pr_info("IOMMU: %s using Register based invalidation\n",
2752 iommu->name);
2753 } else {
2754 iommu->flush.flush_context = qi_flush_context;
2755 iommu->flush.flush_iotlb = qi_flush_iotlb;
2756 pr_info("IOMMU: %s using Queued invalidation\n", iommu->name);
2757 }
2758}
2759
Joseph Cihulab7792602011-05-03 00:08:37 -07002760static int __init init_dmars(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002761{
2762 struct dmar_drhd_unit *drhd;
2763 struct dmar_rmrr_unit *rmrr;
David Woodhouse832bd852014-03-07 15:08:36 +00002764 struct device *dev;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002765 struct intel_iommu *iommu;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07002766 int i, ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002767
2768 /*
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002769 * for each drhd
2770 * allocate root
2771 * initialize and program root entry to not present
2772 * endfor
2773 */
2774 for_each_drhd_unit(drhd) {
mark gross5e0d2a62008-03-04 15:22:08 -08002775 /*
2776 * lock not needed as this is only incremented in the single
2777 * threaded kernel __init code path all other access are read
2778 * only
2779 */
Jiang Liu78d8e702014-11-09 22:47:57 +08002780 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) {
Mike Travis1b198bb2012-03-05 15:05:16 -08002781 g_num_of_iommus++;
2782 continue;
2783 }
2784 printk_once(KERN_ERR "intel-iommu: exceeded %d IOMMUs\n",
Jiang Liu78d8e702014-11-09 22:47:57 +08002785 DMAR_UNITS_SUPPORTED);
mark gross5e0d2a62008-03-04 15:22:08 -08002786 }
2787
Jiang Liuffebeb42014-11-09 22:48:02 +08002788 /* Preallocate enough resources for IOMMU hot-addition */
2789 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED)
2790 g_num_of_iommus = DMAR_UNITS_SUPPORTED;
2791
Weidong Hand9630fe2008-12-08 11:06:32 +08002792 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2793 GFP_KERNEL);
2794 if (!g_iommus) {
2795 printk(KERN_ERR "Allocating global iommu array failed\n");
2796 ret = -ENOMEM;
2797 goto error;
2798 }
2799
mark gross80b20dd2008-04-18 13:53:58 -07002800 deferred_flush = kzalloc(g_num_of_iommus *
2801 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2802 if (!deferred_flush) {
mark gross5e0d2a62008-03-04 15:22:08 -08002803 ret = -ENOMEM;
Jiang Liu989d51f2014-02-19 14:07:21 +08002804 goto free_g_iommus;
mark gross5e0d2a62008-03-04 15:22:08 -08002805 }
2806
Jiang Liu7c919772014-01-06 14:18:18 +08002807 for_each_active_iommu(iommu, drhd) {
Weidong Hand9630fe2008-12-08 11:06:32 +08002808 g_iommus[iommu->seq_id] = iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002809
Suresh Siddhae61d98d2008-07-10 11:16:35 -07002810 ret = iommu_init_domains(iommu);
2811 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08002812 goto free_iommu;
Suresh Siddhae61d98d2008-07-10 11:16:35 -07002813
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002814 /*
2815 * TBD:
2816 * we could share the same root & context tables
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002817 * among all IOMMU's. Need to Split it later.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002818 */
2819 ret = iommu_alloc_root_entry(iommu);
Jiang Liuffebeb42014-11-09 22:48:02 +08002820 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08002821 goto free_iommu;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002822 if (!ecap_pass_through(iommu->ecap))
David Woodhouse19943b02009-08-04 16:19:20 +01002823 hw_pass_through = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002824 }
2825
Jiang Liuffebeb42014-11-09 22:48:02 +08002826 for_each_active_iommu(iommu, drhd)
2827 intel_iommu_init_qi(iommu);
Youquan Songa77b67d2008-10-16 16:31:56 -07002828
David Woodhouse19943b02009-08-04 16:19:20 +01002829 if (iommu_pass_through)
David Woodhousee0fc7e0b2009-09-30 09:12:17 -07002830 iommu_identity_mapping |= IDENTMAP_ALL;
2831
Suresh Siddhad3f13812011-08-23 17:05:25 -07002832#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
David Woodhousee0fc7e0b2009-09-30 09:12:17 -07002833 iommu_identity_mapping |= IDENTMAP_GFX;
David Woodhouse19943b02009-08-04 16:19:20 +01002834#endif
David Woodhousee0fc7e0b2009-09-30 09:12:17 -07002835
2836 check_tylersburg_isoch();
2837
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002838 /*
2839 * If pass through is not set or not enabled, setup context entries for
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002840 * identity mappings for rmrr, gfx, and isa and may fall back to static
2841 * identity mapping if iommu_identity_mapping is set.
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002842 */
David Woodhouse19943b02009-08-04 16:19:20 +01002843 if (iommu_identity_mapping) {
2844 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
2845 if (ret) {
2846 printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
Jiang Liu989d51f2014-02-19 14:07:21 +08002847 goto free_iommu;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002848 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002849 }
David Woodhouse19943b02009-08-04 16:19:20 +01002850 /*
2851 * For each rmrr
2852 * for each dev attached to rmrr
2853 * do
2854 * locate drhd for dev, alloc domain for dev
2855 * allocate free domain
2856 * allocate page table entries for rmrr
2857 * if context not allocated for bus
2858 * allocate and init context
2859 * set present in root table for this bus
2860 * init context with domain, translation etc
2861 * endfor
2862 * endfor
2863 */
2864 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2865 for_each_rmrr_units(rmrr) {
Jiang Liub683b232014-02-19 14:07:32 +08002866 /* some BIOS lists non-exist devices in DMAR table. */
2867 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
David Woodhouse832bd852014-03-07 15:08:36 +00002868 i, dev) {
David Woodhouse0b9d9752014-03-09 15:48:15 -07002869 ret = iommu_prepare_rmrr_dev(rmrr, dev);
David Woodhouse19943b02009-08-04 16:19:20 +01002870 if (ret)
2871 printk(KERN_ERR
2872 "IOMMU: mapping reserved region failed\n");
2873 }
2874 }
2875
2876 iommu_prepare_isa();
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002877
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002878 /*
2879 * for each drhd
2880 * enable fault log
2881 * global invalidate context cache
2882 * global invalidate iotlb
2883 * enable translation
2884 */
Jiang Liu7c919772014-01-06 14:18:18 +08002885 for_each_iommu(iommu, drhd) {
Joseph Cihula51a63e62011-03-21 11:04:24 -07002886 if (drhd->ignored) {
2887 /*
2888 * we always have to disable PMRs or DMA may fail on
2889 * this device
2890 */
2891 if (force_on)
Jiang Liu7c919772014-01-06 14:18:18 +08002892 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002893 continue;
Joseph Cihula51a63e62011-03-21 11:04:24 -07002894 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002895
2896 iommu_flush_write_buffer(iommu);
2897
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002898 ret = dmar_set_interrupt(iommu);
2899 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08002900 goto free_iommu;
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002901
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002902 iommu_set_root_entry(iommu);
2903
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002904 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002905 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
Jiang Liu2a41cce2014-07-11 14:19:33 +08002906 iommu_enable_translation(iommu);
David Woodhouseb94996c2009-09-19 15:28:12 -07002907 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002908 }
2909
2910 return 0;
Jiang Liu989d51f2014-02-19 14:07:21 +08002911
2912free_iommu:
Jiang Liuffebeb42014-11-09 22:48:02 +08002913 for_each_active_iommu(iommu, drhd) {
2914 disable_dmar_iommu(iommu);
Jiang Liua868e6b2014-01-06 14:18:20 +08002915 free_dmar_iommu(iommu);
Jiang Liuffebeb42014-11-09 22:48:02 +08002916 }
Jiang Liu9bdc5312014-01-06 14:18:27 +08002917 kfree(deferred_flush);
Jiang Liu989d51f2014-02-19 14:07:21 +08002918free_g_iommus:
Weidong Hand9630fe2008-12-08 11:06:32 +08002919 kfree(g_iommus);
Jiang Liu989d51f2014-02-19 14:07:21 +08002920error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002921 return ret;
2922}
2923
David Woodhouse5a5e02a2009-07-04 09:35:44 +01002924/* This takes a number of _MM_ pages, not VTD pages */
David Woodhouse875764d2009-06-28 21:20:51 +01002925static struct iova *intel_alloc_iova(struct device *dev,
2926 struct dmar_domain *domain,
2927 unsigned long nrpages, uint64_t dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002928{
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002929 struct iova *iova = NULL;
2930
David Woodhouse875764d2009-06-28 21:20:51 +01002931 /* Restrict dma_mask to the width that the iommu can handle */
2932 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2933
2934 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002935 /*
2936 * First try to allocate an io virtual address in
Yang Hongyang284901a2009-04-06 19:01:15 -07002937 * DMA_BIT_MASK(32) and if that fails then try allocating
Joe Perches36098012007-12-17 11:40:11 -08002938 * from higher range
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002939 */
David Woodhouse875764d2009-06-28 21:20:51 +01002940 iova = alloc_iova(&domain->iovad, nrpages,
2941 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2942 if (iova)
2943 return iova;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002944 }
David Woodhouse875764d2009-06-28 21:20:51 +01002945 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2946 if (unlikely(!iova)) {
2947 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
David Woodhouse207e3592014-03-09 16:12:32 -07002948 nrpages, dev_name(dev));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002949 return NULL;
2950 }
2951
2952 return iova;
2953}
2954
David Woodhoused4b709f2014-03-09 16:07:40 -07002955static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002956{
2957 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002958 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002959
David Woodhoused4b709f2014-03-09 16:07:40 -07002960 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002961 if (!domain) {
David Woodhoused4b709f2014-03-09 16:07:40 -07002962 printk(KERN_ERR "Allocating domain for %s failed",
2963 dev_name(dev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002964 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002965 }
2966
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002967 /* make sure context mapping is ok */
David Woodhoused4b709f2014-03-09 16:07:40 -07002968 if (unlikely(!domain_context_mapped(dev))) {
2969 ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002970 if (ret) {
David Woodhoused4b709f2014-03-09 16:07:40 -07002971 printk(KERN_ERR "Domain context map for %s failed",
2972 dev_name(dev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002973 return NULL;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002974 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002975 }
2976
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002977 return domain;
2978}
2979
David Woodhoused4b709f2014-03-09 16:07:40 -07002980static inline struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
David Woodhouse147202a2009-07-07 19:43:20 +01002981{
2982 struct device_domain_info *info;
2983
2984 /* No lock here, assumes no domain exit in normal case */
David Woodhoused4b709f2014-03-09 16:07:40 -07002985 info = dev->archdata.iommu;
David Woodhouse147202a2009-07-07 19:43:20 +01002986 if (likely(info))
2987 return info->domain;
2988
2989 return __get_valid_domain_for_dev(dev);
2990}
2991
David Woodhouseecb509e2014-03-09 16:29:55 -07002992/* Check if the dev needs to go through non-identity map and unmap process.*/
David Woodhouse73676832009-07-04 14:08:36 +01002993static int iommu_no_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002994{
2995 int found;
2996
David Woodhouse3d891942014-03-06 15:59:26 +00002997 if (iommu_dummy(dev))
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002998 return 1;
2999
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003000 if (!iommu_identity_mapping)
David Woodhouse1e4c64c2009-07-04 10:40:38 +01003001 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003002
David Woodhouse9b226622014-03-09 14:03:28 -07003003 found = identity_mapping(dev);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003004 if (found) {
David Woodhouseecb509e2014-03-09 16:29:55 -07003005 if (iommu_should_identity_map(dev, 0))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003006 return 1;
3007 else {
3008 /*
3009 * 32 bit DMA is removed from si_domain and fall back
3010 * to non-identity mapping.
3011 */
David Woodhousebf9c9ed2014-03-09 16:19:13 -07003012 domain_remove_one_dev_info(si_domain, dev);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003013 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
David Woodhouseecb509e2014-03-09 16:29:55 -07003014 dev_name(dev));
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003015 return 0;
3016 }
3017 } else {
3018 /*
3019 * In case of a detached 64 bit DMA device from vm, the device
3020 * is put into si_domain for identity mapping.
3021 */
David Woodhouseecb509e2014-03-09 16:29:55 -07003022 if (iommu_should_identity_map(dev, 0)) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003023 int ret;
David Woodhouse5913c9b2014-03-09 16:27:31 -07003024 ret = domain_add_dev_info(si_domain, dev,
David Woodhouse5fe60f42009-08-09 10:53:41 +01003025 hw_pass_through ?
3026 CONTEXT_TT_PASS_THROUGH :
3027 CONTEXT_TT_MULTI_LEVEL);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003028 if (!ret) {
3029 printk(KERN_INFO "64bit %s uses identity mapping\n",
David Woodhouseecb509e2014-03-09 16:29:55 -07003030 dev_name(dev));
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003031 return 1;
3032 }
3033 }
3034 }
3035
David Woodhouse1e4c64c2009-07-04 10:40:38 +01003036 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003037}
3038
David Woodhouse5040a912014-03-09 16:14:00 -07003039static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003040 size_t size, int dir, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003041{
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003042 struct dmar_domain *domain;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003043 phys_addr_t start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003044 struct iova *iova;
3045 int prot = 0;
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003046 int ret;
Weidong Han8c11e792008-12-08 15:29:22 +08003047 struct intel_iommu *iommu;
Fenghua Yu33041ec2009-08-04 15:10:59 -07003048 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003049
3050 BUG_ON(dir == DMA_NONE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003051
David Woodhouse5040a912014-03-09 16:14:00 -07003052 if (iommu_no_mapping(dev))
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003053 return paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003054
David Woodhouse5040a912014-03-09 16:14:00 -07003055 domain = get_valid_domain_for_dev(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003056 if (!domain)
3057 return 0;
3058
Weidong Han8c11e792008-12-08 15:29:22 +08003059 iommu = domain_get_iommu(domain);
David Woodhouse88cb6a72009-06-28 15:03:06 +01003060 size = aligned_nrpages(paddr, size);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003061
David Woodhouse5040a912014-03-09 16:14:00 -07003062 iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003063 if (!iova)
3064 goto error;
3065
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003066 /*
3067 * Check if DMAR supports zero-length reads on write only
3068 * mappings..
3069 */
3070 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003071 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003072 prot |= DMA_PTE_READ;
3073 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3074 prot |= DMA_PTE_WRITE;
3075 /*
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003076 * paddr - (paddr + size) might be partial page, we should map the whole
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003077 * page. Note: if two part of one page are separately mapped, we
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003078 * might have two guest_addr mapping to the same host paddr, but this
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003079 * is not a big problem
3080 */
David Woodhouse0ab36de2009-06-28 14:01:43 +01003081 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
Fenghua Yu33041ec2009-08-04 15:10:59 -07003082 mm_to_dma_pfn(paddr_pfn), size, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003083 if (ret)
3084 goto error;
3085
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003086 /* it's a non-present to present mapping. Only flush if caching mode */
3087 if (cap_caching_mode(iommu->cap))
David Woodhouseea8ea462014-03-05 17:09:32 +00003088 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 0, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003089 else
Weidong Han8c11e792008-12-08 15:29:22 +08003090 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003091
David Woodhouse03d6a242009-06-28 15:33:46 +01003092 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
3093 start_paddr += paddr & ~PAGE_MASK;
3094 return start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003095
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003096error:
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003097 if (iova)
3098 __free_iova(&domain->iovad, iova);
David Woodhouse4cf2e752009-02-11 17:23:43 +00003099 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
David Woodhouse5040a912014-03-09 16:14:00 -07003100 dev_name(dev), size, (unsigned long long)paddr, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003101 return 0;
3102}
3103
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003104static dma_addr_t intel_map_page(struct device *dev, struct page *page,
3105 unsigned long offset, size_t size,
3106 enum dma_data_direction dir,
3107 struct dma_attrs *attrs)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003108{
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003109 return __intel_map_single(dev, page_to_phys(page) + offset, size,
David Woodhouse46333e32014-03-10 20:01:21 -07003110 dir, *dev->dma_mask);
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003111}
3112
mark gross5e0d2a62008-03-04 15:22:08 -08003113static void flush_unmaps(void)
3114{
mark gross80b20dd2008-04-18 13:53:58 -07003115 int i, j;
mark gross5e0d2a62008-03-04 15:22:08 -08003116
mark gross5e0d2a62008-03-04 15:22:08 -08003117 timer_on = 0;
3118
3119 /* just flush them all */
3120 for (i = 0; i < g_num_of_iommus; i++) {
Weidong Hana2bb8452008-12-08 11:24:12 +08003121 struct intel_iommu *iommu = g_iommus[i];
3122 if (!iommu)
3123 continue;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07003124
Yu Zhao9dd2fe82009-05-18 13:51:36 +08003125 if (!deferred_flush[i].next)
3126 continue;
3127
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003128 /* In caching mode, global flushes turn emulation expensive */
3129 if (!cap_caching_mode(iommu->cap))
3130 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
Yu Zhao93a23a72009-05-18 13:51:37 +08003131 DMA_TLB_GLOBAL_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08003132 for (j = 0; j < deferred_flush[i].next; j++) {
Yu Zhao93a23a72009-05-18 13:51:37 +08003133 unsigned long mask;
3134 struct iova *iova = deferred_flush[i].iova[j];
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003135 struct dmar_domain *domain = deferred_flush[i].domain[j];
Yu Zhao93a23a72009-05-18 13:51:37 +08003136
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003137 /* On real hardware multiple invalidations are expensive */
3138 if (cap_caching_mode(iommu->cap))
3139 iommu_flush_iotlb_psi(iommu, domain->id,
Jiang Liua156ef92014-07-11 14:19:36 +08003140 iova->pfn_lo, iova_size(iova),
David Woodhouseea8ea462014-03-05 17:09:32 +00003141 !deferred_flush[i].freelist[j], 0);
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003142 else {
Jiang Liua156ef92014-07-11 14:19:36 +08003143 mask = ilog2(mm_to_dma_pfn(iova_size(iova)));
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003144 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
3145 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
3146 }
Yu Zhao93a23a72009-05-18 13:51:37 +08003147 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
David Woodhouseea8ea462014-03-05 17:09:32 +00003148 if (deferred_flush[i].freelist[j])
3149 dma_free_pagelist(deferred_flush[i].freelist[j]);
mark gross80b20dd2008-04-18 13:53:58 -07003150 }
Yu Zhao9dd2fe82009-05-18 13:51:36 +08003151 deferred_flush[i].next = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08003152 }
3153
mark gross5e0d2a62008-03-04 15:22:08 -08003154 list_size = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08003155}
3156
3157static void flush_unmaps_timeout(unsigned long data)
3158{
mark gross80b20dd2008-04-18 13:53:58 -07003159 unsigned long flags;
3160
3161 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08003162 flush_unmaps();
mark gross80b20dd2008-04-18 13:53:58 -07003163 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08003164}
3165
David Woodhouseea8ea462014-03-05 17:09:32 +00003166static void add_unmap(struct dmar_domain *dom, struct iova *iova, struct page *freelist)
mark gross5e0d2a62008-03-04 15:22:08 -08003167{
3168 unsigned long flags;
mark gross80b20dd2008-04-18 13:53:58 -07003169 int next, iommu_id;
Weidong Han8c11e792008-12-08 15:29:22 +08003170 struct intel_iommu *iommu;
mark gross5e0d2a62008-03-04 15:22:08 -08003171
3172 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross80b20dd2008-04-18 13:53:58 -07003173 if (list_size == HIGH_WATER_MARK)
3174 flush_unmaps();
3175
Weidong Han8c11e792008-12-08 15:29:22 +08003176 iommu = domain_get_iommu(dom);
3177 iommu_id = iommu->seq_id;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07003178
mark gross80b20dd2008-04-18 13:53:58 -07003179 next = deferred_flush[iommu_id].next;
3180 deferred_flush[iommu_id].domain[next] = dom;
3181 deferred_flush[iommu_id].iova[next] = iova;
David Woodhouseea8ea462014-03-05 17:09:32 +00003182 deferred_flush[iommu_id].freelist[next] = freelist;
mark gross80b20dd2008-04-18 13:53:58 -07003183 deferred_flush[iommu_id].next++;
mark gross5e0d2a62008-03-04 15:22:08 -08003184
3185 if (!timer_on) {
3186 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
3187 timer_on = 1;
3188 }
3189 list_size++;
3190 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
3191}
3192
Jiang Liud41a4ad2014-07-11 14:19:34 +08003193static void intel_unmap(struct device *dev, dma_addr_t dev_addr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003194{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003195 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01003196 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003197 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08003198 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00003199 struct page *freelist;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003200
David Woodhouse73676832009-07-04 14:08:36 +01003201 if (iommu_no_mapping(dev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003202 return;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003203
David Woodhouse1525a292014-03-06 16:19:30 +00003204 domain = find_domain(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003205 BUG_ON(!domain);
3206
Weidong Han8c11e792008-12-08 15:29:22 +08003207 iommu = domain_get_iommu(domain);
3208
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003209 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
David Woodhouse85b98272009-07-01 19:27:53 +01003210 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
3211 (unsigned long long)dev_addr))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003212 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003213
David Woodhoused794dc92009-06-28 00:27:49 +01003214 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
3215 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003216
David Woodhoused794dc92009-06-28 00:27:49 +01003217 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
David Woodhouse207e3592014-03-09 16:12:32 -07003218 dev_name(dev), start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003219
David Woodhouseea8ea462014-03-05 17:09:32 +00003220 freelist = domain_unmap(domain, start_pfn, last_pfn);
David Woodhoused794dc92009-06-28 00:27:49 +01003221
mark gross5e0d2a62008-03-04 15:22:08 -08003222 if (intel_iommu_strict) {
David Woodhouse03d6a242009-06-28 15:33:46 +01003223 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
David Woodhouseea8ea462014-03-05 17:09:32 +00003224 last_pfn - start_pfn + 1, !freelist, 0);
mark gross5e0d2a62008-03-04 15:22:08 -08003225 /* free iova */
3226 __free_iova(&domain->iovad, iova);
David Woodhouseea8ea462014-03-05 17:09:32 +00003227 dma_free_pagelist(freelist);
mark gross5e0d2a62008-03-04 15:22:08 -08003228 } else {
David Woodhouseea8ea462014-03-05 17:09:32 +00003229 add_unmap(domain, iova, freelist);
mark gross5e0d2a62008-03-04 15:22:08 -08003230 /*
3231 * queue up the release of the unmap to save the 1/6th of the
3232 * cpu used up by the iotlb flush operation...
3233 */
mark gross5e0d2a62008-03-04 15:22:08 -08003234 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003235}
3236
Jiang Liud41a4ad2014-07-11 14:19:34 +08003237static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
3238 size_t size, enum dma_data_direction dir,
3239 struct dma_attrs *attrs)
3240{
3241 intel_unmap(dev, dev_addr);
3242}
3243
David Woodhouse5040a912014-03-09 16:14:00 -07003244static void *intel_alloc_coherent(struct device *dev, size_t size,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003245 dma_addr_t *dma_handle, gfp_t flags,
3246 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003247{
Akinobu Mita36746432014-06-04 16:06:51 -07003248 struct page *page = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003249 int order;
3250
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003251 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003252 order = get_order(size);
Alex Williamsone8bb9102009-11-04 15:59:34 -07003253
David Woodhouse5040a912014-03-09 16:14:00 -07003254 if (!iommu_no_mapping(dev))
Alex Williamsone8bb9102009-11-04 15:59:34 -07003255 flags &= ~(GFP_DMA | GFP_DMA32);
David Woodhouse5040a912014-03-09 16:14:00 -07003256 else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
3257 if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
Alex Williamsone8bb9102009-11-04 15:59:34 -07003258 flags |= GFP_DMA;
3259 else
3260 flags |= GFP_DMA32;
3261 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003262
Akinobu Mita36746432014-06-04 16:06:51 -07003263 if (flags & __GFP_WAIT) {
3264 unsigned int count = size >> PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003265
Akinobu Mita36746432014-06-04 16:06:51 -07003266 page = dma_alloc_from_contiguous(dev, count, order);
3267 if (page && iommu_no_mapping(dev) &&
3268 page_to_phys(page) + size > dev->coherent_dma_mask) {
3269 dma_release_from_contiguous(dev, page, count);
3270 page = NULL;
3271 }
3272 }
3273
3274 if (!page)
3275 page = alloc_pages(flags, order);
3276 if (!page)
3277 return NULL;
3278 memset(page_address(page), 0, size);
3279
3280 *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003281 DMA_BIDIRECTIONAL,
David Woodhouse5040a912014-03-09 16:14:00 -07003282 dev->coherent_dma_mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003283 if (*dma_handle)
Akinobu Mita36746432014-06-04 16:06:51 -07003284 return page_address(page);
3285 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3286 __free_pages(page, order);
3287
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003288 return NULL;
3289}
3290
David Woodhouse5040a912014-03-09 16:14:00 -07003291static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003292 dma_addr_t dma_handle, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003293{
3294 int order;
Akinobu Mita36746432014-06-04 16:06:51 -07003295 struct page *page = virt_to_page(vaddr);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003296
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003297 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003298 order = get_order(size);
3299
Jiang Liud41a4ad2014-07-11 14:19:34 +08003300 intel_unmap(dev, dma_handle);
Akinobu Mita36746432014-06-04 16:06:51 -07003301 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3302 __free_pages(page, order);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003303}
3304
David Woodhouse5040a912014-03-09 16:14:00 -07003305static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003306 int nelems, enum dma_data_direction dir,
3307 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003308{
Jiang Liud41a4ad2014-07-11 14:19:34 +08003309 intel_unmap(dev, sglist[0].dma_address);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003310}
3311
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003312static int intel_nontranslate_map_sg(struct device *hddev,
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003313 struct scatterlist *sglist, int nelems, int dir)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003314{
3315 int i;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003316 struct scatterlist *sg;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003317
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003318 for_each_sg(sglist, sg, nelems, i) {
FUJITA Tomonori12d4d402007-10-23 09:32:25 +02003319 BUG_ON(!sg_page(sg));
David Woodhouse4cf2e752009-02-11 17:23:43 +00003320 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003321 sg->dma_length = sg->length;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003322 }
3323 return nelems;
3324}
3325
David Woodhouse5040a912014-03-09 16:14:00 -07003326static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003327 enum dma_data_direction dir, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003328{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003329 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003330 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003331 size_t size = 0;
3332 int prot = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003333 struct iova *iova = NULL;
3334 int ret;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003335 struct scatterlist *sg;
David Woodhouseb536d242009-06-28 14:49:31 +01003336 unsigned long start_vpfn;
Weidong Han8c11e792008-12-08 15:29:22 +08003337 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003338
3339 BUG_ON(dir == DMA_NONE);
David Woodhouse5040a912014-03-09 16:14:00 -07003340 if (iommu_no_mapping(dev))
3341 return intel_nontranslate_map_sg(dev, sglist, nelems, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003342
David Woodhouse5040a912014-03-09 16:14:00 -07003343 domain = get_valid_domain_for_dev(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003344 if (!domain)
3345 return 0;
3346
Weidong Han8c11e792008-12-08 15:29:22 +08003347 iommu = domain_get_iommu(domain);
3348
David Woodhouseb536d242009-06-28 14:49:31 +01003349 for_each_sg(sglist, sg, nelems, i)
David Woodhouse88cb6a72009-06-28 15:03:06 +01003350 size += aligned_nrpages(sg->offset, sg->length);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003351
David Woodhouse5040a912014-03-09 16:14:00 -07003352 iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
3353 *dev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003354 if (!iova) {
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003355 sglist->dma_length = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003356 return 0;
3357 }
3358
3359 /*
3360 * Check if DMAR supports zero-length reads on write only
3361 * mappings..
3362 */
3363 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003364 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003365 prot |= DMA_PTE_READ;
3366 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3367 prot |= DMA_PTE_WRITE;
3368
David Woodhouseb536d242009-06-28 14:49:31 +01003369 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
David Woodhousee1605492009-06-29 11:17:38 +01003370
Fenghua Yuf5329592009-08-04 15:09:37 -07003371 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
David Woodhousee1605492009-06-29 11:17:38 +01003372 if (unlikely(ret)) {
David Woodhousee1605492009-06-29 11:17:38 +01003373 dma_pte_free_pagetable(domain, start_vpfn,
3374 start_vpfn + size - 1);
David Woodhousee1605492009-06-29 11:17:38 +01003375 __free_iova(&domain->iovad, iova);
3376 return 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003377 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003378
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003379 /* it's a non-present to present mapping. Only flush if caching mode */
3380 if (cap_caching_mode(iommu->cap))
David Woodhouseea8ea462014-03-05 17:09:32 +00003381 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 0, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003382 else
Weidong Han8c11e792008-12-08 15:29:22 +08003383 iommu_flush_write_buffer(iommu);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003384
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003385 return nelems;
3386}
3387
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003388static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3389{
3390 return !dma_addr;
3391}
3392
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09003393struct dma_map_ops intel_dma_ops = {
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003394 .alloc = intel_alloc_coherent,
3395 .free = intel_free_coherent,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003396 .map_sg = intel_map_sg,
3397 .unmap_sg = intel_unmap_sg,
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003398 .map_page = intel_map_page,
3399 .unmap_page = intel_unmap_page,
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003400 .mapping_error = intel_mapping_error,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003401};
3402
3403static inline int iommu_domain_cache_init(void)
3404{
3405 int ret = 0;
3406
3407 iommu_domain_cache = kmem_cache_create("iommu_domain",
3408 sizeof(struct dmar_domain),
3409 0,
3410 SLAB_HWCACHE_ALIGN,
3411
3412 NULL);
3413 if (!iommu_domain_cache) {
3414 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
3415 ret = -ENOMEM;
3416 }
3417
3418 return ret;
3419}
3420
3421static inline int iommu_devinfo_cache_init(void)
3422{
3423 int ret = 0;
3424
3425 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3426 sizeof(struct device_domain_info),
3427 0,
3428 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003429 NULL);
3430 if (!iommu_devinfo_cache) {
3431 printk(KERN_ERR "Couldn't create devinfo cache\n");
3432 ret = -ENOMEM;
3433 }
3434
3435 return ret;
3436}
3437
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003438static int __init iommu_init_mempool(void)
3439{
3440 int ret;
3441 ret = iommu_iova_cache_init();
3442 if (ret)
3443 return ret;
3444
3445 ret = iommu_domain_cache_init();
3446 if (ret)
3447 goto domain_error;
3448
3449 ret = iommu_devinfo_cache_init();
3450 if (!ret)
3451 return ret;
3452
3453 kmem_cache_destroy(iommu_domain_cache);
3454domain_error:
Robin Murphy85b45452015-01-12 17:51:14 +00003455 iommu_iova_cache_destroy();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003456
3457 return -ENOMEM;
3458}
3459
3460static void __init iommu_exit_mempool(void)
3461{
3462 kmem_cache_destroy(iommu_devinfo_cache);
3463 kmem_cache_destroy(iommu_domain_cache);
Robin Murphy85b45452015-01-12 17:51:14 +00003464 iommu_iova_cache_destroy();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003465}
3466
Dan Williams556ab452010-07-23 15:47:56 -07003467static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3468{
3469 struct dmar_drhd_unit *drhd;
3470 u32 vtbar;
3471 int rc;
3472
3473 /* We know that this device on this chipset has its own IOMMU.
3474 * If we find it under a different IOMMU, then the BIOS is lying
3475 * to us. Hope that the IOMMU for this device is actually
3476 * disabled, and it needs no translation...
3477 */
3478 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3479 if (rc) {
3480 /* "can't" happen */
3481 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3482 return;
3483 }
3484 vtbar &= 0xffff0000;
3485
3486 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3487 drhd = dmar_find_matched_drhd_unit(pdev);
3488 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3489 TAINT_FIRMWARE_WORKAROUND,
3490 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3491 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3492}
3493DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3494
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003495static void __init init_no_remapping_devices(void)
3496{
3497 struct dmar_drhd_unit *drhd;
David Woodhouse832bd852014-03-07 15:08:36 +00003498 struct device *dev;
Jiang Liub683b232014-02-19 14:07:32 +08003499 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003500
3501 for_each_drhd_unit(drhd) {
3502 if (!drhd->include_all) {
Jiang Liub683b232014-02-19 14:07:32 +08003503 for_each_active_dev_scope(drhd->devices,
3504 drhd->devices_cnt, i, dev)
3505 break;
David Woodhouse832bd852014-03-07 15:08:36 +00003506 /* ignore DMAR unit if no devices exist */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003507 if (i == drhd->devices_cnt)
3508 drhd->ignored = 1;
3509 }
3510 }
3511
Jiang Liu7c919772014-01-06 14:18:18 +08003512 for_each_active_drhd_unit(drhd) {
Jiang Liu7c919772014-01-06 14:18:18 +08003513 if (drhd->include_all)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003514 continue;
3515
Jiang Liub683b232014-02-19 14:07:32 +08003516 for_each_active_dev_scope(drhd->devices,
3517 drhd->devices_cnt, i, dev)
David Woodhouse832bd852014-03-07 15:08:36 +00003518 if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003519 break;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003520 if (i < drhd->devices_cnt)
3521 continue;
3522
David Woodhousec0771df2011-10-14 20:59:46 +01003523 /* This IOMMU has *only* gfx devices. Either bypass it or
3524 set the gfx_mapped flag, as appropriate */
3525 if (dmar_map_gfx) {
3526 intel_iommu_gfx_mapped = 1;
3527 } else {
3528 drhd->ignored = 1;
Jiang Liub683b232014-02-19 14:07:32 +08003529 for_each_active_dev_scope(drhd->devices,
3530 drhd->devices_cnt, i, dev)
David Woodhouse832bd852014-03-07 15:08:36 +00003531 dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003532 }
3533 }
3534}
3535
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003536#ifdef CONFIG_SUSPEND
3537static int init_iommu_hw(void)
3538{
3539 struct dmar_drhd_unit *drhd;
3540 struct intel_iommu *iommu = NULL;
3541
3542 for_each_active_iommu(iommu, drhd)
3543 if (iommu->qi)
3544 dmar_reenable_qi(iommu);
3545
Joseph Cihulab7792602011-05-03 00:08:37 -07003546 for_each_iommu(iommu, drhd) {
3547 if (drhd->ignored) {
3548 /*
3549 * we always have to disable PMRs or DMA may fail on
3550 * this device
3551 */
3552 if (force_on)
3553 iommu_disable_protect_mem_regions(iommu);
3554 continue;
3555 }
3556
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003557 iommu_flush_write_buffer(iommu);
3558
3559 iommu_set_root_entry(iommu);
3560
3561 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003562 DMA_CCMD_GLOBAL_INVL);
Jiang Liu2a41cce2014-07-11 14:19:33 +08003563 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3564 iommu_enable_translation(iommu);
David Woodhouseb94996c2009-09-19 15:28:12 -07003565 iommu_disable_protect_mem_regions(iommu);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003566 }
3567
3568 return 0;
3569}
3570
3571static void iommu_flush_all(void)
3572{
3573 struct dmar_drhd_unit *drhd;
3574 struct intel_iommu *iommu;
3575
3576 for_each_active_iommu(iommu, drhd) {
3577 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003578 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003579 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003580 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003581 }
3582}
3583
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003584static int iommu_suspend(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003585{
3586 struct dmar_drhd_unit *drhd;
3587 struct intel_iommu *iommu = NULL;
3588 unsigned long flag;
3589
3590 for_each_active_iommu(iommu, drhd) {
3591 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3592 GFP_ATOMIC);
3593 if (!iommu->iommu_state)
3594 goto nomem;
3595 }
3596
3597 iommu_flush_all();
3598
3599 for_each_active_iommu(iommu, drhd) {
3600 iommu_disable_translation(iommu);
3601
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003602 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003603
3604 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3605 readl(iommu->reg + DMAR_FECTL_REG);
3606 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3607 readl(iommu->reg + DMAR_FEDATA_REG);
3608 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3609 readl(iommu->reg + DMAR_FEADDR_REG);
3610 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3611 readl(iommu->reg + DMAR_FEUADDR_REG);
3612
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003613 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003614 }
3615 return 0;
3616
3617nomem:
3618 for_each_active_iommu(iommu, drhd)
3619 kfree(iommu->iommu_state);
3620
3621 return -ENOMEM;
3622}
3623
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003624static void iommu_resume(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003625{
3626 struct dmar_drhd_unit *drhd;
3627 struct intel_iommu *iommu = NULL;
3628 unsigned long flag;
3629
3630 if (init_iommu_hw()) {
Joseph Cihulab7792602011-05-03 00:08:37 -07003631 if (force_on)
3632 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3633 else
3634 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003635 return;
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003636 }
3637
3638 for_each_active_iommu(iommu, drhd) {
3639
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003640 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003641
3642 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3643 iommu->reg + DMAR_FECTL_REG);
3644 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3645 iommu->reg + DMAR_FEDATA_REG);
3646 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3647 iommu->reg + DMAR_FEADDR_REG);
3648 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3649 iommu->reg + DMAR_FEUADDR_REG);
3650
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003651 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003652 }
3653
3654 for_each_active_iommu(iommu, drhd)
3655 kfree(iommu->iommu_state);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003656}
3657
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003658static struct syscore_ops iommu_syscore_ops = {
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003659 .resume = iommu_resume,
3660 .suspend = iommu_suspend,
3661};
3662
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003663static void __init init_iommu_pm_ops(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003664{
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003665 register_syscore_ops(&iommu_syscore_ops);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003666}
3667
3668#else
Rafael J. Wysocki99592ba2011-06-07 21:32:31 +02003669static inline void init_iommu_pm_ops(void) {}
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003670#endif /* CONFIG_PM */
3671
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003672
Jiang Liuc2a0b532014-11-09 22:47:56 +08003673int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003674{
3675 struct acpi_dmar_reserved_memory *rmrr;
3676 struct dmar_rmrr_unit *rmrru;
3677
3678 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
3679 if (!rmrru)
3680 return -ENOMEM;
3681
3682 rmrru->hdr = header;
3683 rmrr = (struct acpi_dmar_reserved_memory *)header;
3684 rmrru->base_address = rmrr->base_address;
3685 rmrru->end_address = rmrr->end_address;
Jiang Liu2e455282014-02-19 14:07:36 +08003686 rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
3687 ((void *)rmrr) + rmrr->header.length,
3688 &rmrru->devices_cnt);
3689 if (rmrru->devices_cnt && rmrru->devices == NULL) {
3690 kfree(rmrru);
3691 return -ENOMEM;
3692 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003693
Jiang Liu2e455282014-02-19 14:07:36 +08003694 list_add(&rmrru->list, &dmar_rmrr_units);
3695
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003696 return 0;
3697}
3698
Jiang Liu6b197242014-11-09 22:47:58 +08003699static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
3700{
3701 struct dmar_atsr_unit *atsru;
3702 struct acpi_dmar_atsr *tmp;
3703
3704 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
3705 tmp = (struct acpi_dmar_atsr *)atsru->hdr;
3706 if (atsr->segment != tmp->segment)
3707 continue;
3708 if (atsr->header.length != tmp->header.length)
3709 continue;
3710 if (memcmp(atsr, tmp, atsr->header.length) == 0)
3711 return atsru;
3712 }
3713
3714 return NULL;
3715}
3716
3717int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003718{
3719 struct acpi_dmar_atsr *atsr;
3720 struct dmar_atsr_unit *atsru;
3721
Jiang Liu6b197242014-11-09 22:47:58 +08003722 if (system_state != SYSTEM_BOOTING && !intel_iommu_enabled)
3723 return 0;
3724
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003725 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
Jiang Liu6b197242014-11-09 22:47:58 +08003726 atsru = dmar_find_atsr(atsr);
3727 if (atsru)
3728 return 0;
3729
3730 atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003731 if (!atsru)
3732 return -ENOMEM;
3733
Jiang Liu6b197242014-11-09 22:47:58 +08003734 /*
3735 * If memory is allocated from slab by ACPI _DSM method, we need to
3736 * copy the memory content because the memory buffer will be freed
3737 * on return.
3738 */
3739 atsru->hdr = (void *)(atsru + 1);
3740 memcpy(atsru->hdr, hdr, hdr->length);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003741 atsru->include_all = atsr->flags & 0x1;
Jiang Liu2e455282014-02-19 14:07:36 +08003742 if (!atsru->include_all) {
3743 atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
3744 (void *)atsr + atsr->header.length,
3745 &atsru->devices_cnt);
3746 if (atsru->devices_cnt && atsru->devices == NULL) {
3747 kfree(atsru);
3748 return -ENOMEM;
3749 }
3750 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003751
Jiang Liu0e242612014-02-19 14:07:34 +08003752 list_add_rcu(&atsru->list, &dmar_atsr_units);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003753
3754 return 0;
3755}
3756
Jiang Liu9bdc5312014-01-06 14:18:27 +08003757static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
3758{
3759 dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
3760 kfree(atsru);
3761}
3762
Jiang Liu6b197242014-11-09 22:47:58 +08003763int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg)
3764{
3765 struct acpi_dmar_atsr *atsr;
3766 struct dmar_atsr_unit *atsru;
3767
3768 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3769 atsru = dmar_find_atsr(atsr);
3770 if (atsru) {
3771 list_del_rcu(&atsru->list);
3772 synchronize_rcu();
3773 intel_iommu_free_atsr(atsru);
3774 }
3775
3776 return 0;
3777}
3778
3779int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
3780{
3781 int i;
3782 struct device *dev;
3783 struct acpi_dmar_atsr *atsr;
3784 struct dmar_atsr_unit *atsru;
3785
3786 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3787 atsru = dmar_find_atsr(atsr);
3788 if (!atsru)
3789 return 0;
3790
3791 if (!atsru->include_all && atsru->devices && atsru->devices_cnt)
3792 for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
3793 i, dev)
3794 return -EBUSY;
3795
3796 return 0;
3797}
3798
Jiang Liuffebeb42014-11-09 22:48:02 +08003799static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
3800{
3801 int sp, ret = 0;
3802 struct intel_iommu *iommu = dmaru->iommu;
3803
3804 if (g_iommus[iommu->seq_id])
3805 return 0;
3806
3807 if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
3808 pr_warn("IOMMU: %s doesn't support hardware pass through.\n",
3809 iommu->name);
3810 return -ENXIO;
3811 }
3812 if (!ecap_sc_support(iommu->ecap) &&
3813 domain_update_iommu_snooping(iommu)) {
3814 pr_warn("IOMMU: %s doesn't support snooping.\n",
3815 iommu->name);
3816 return -ENXIO;
3817 }
3818 sp = domain_update_iommu_superpage(iommu) - 1;
3819 if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
3820 pr_warn("IOMMU: %s doesn't support large page.\n",
3821 iommu->name);
3822 return -ENXIO;
3823 }
3824
3825 /*
3826 * Disable translation if already enabled prior to OS handover.
3827 */
3828 if (iommu->gcmd & DMA_GCMD_TE)
3829 iommu_disable_translation(iommu);
3830
3831 g_iommus[iommu->seq_id] = iommu;
3832 ret = iommu_init_domains(iommu);
3833 if (ret == 0)
3834 ret = iommu_alloc_root_entry(iommu);
3835 if (ret)
3836 goto out;
3837
3838 if (dmaru->ignored) {
3839 /*
3840 * we always have to disable PMRs or DMA may fail on this device
3841 */
3842 if (force_on)
3843 iommu_disable_protect_mem_regions(iommu);
3844 return 0;
3845 }
3846
3847 intel_iommu_init_qi(iommu);
3848 iommu_flush_write_buffer(iommu);
3849 ret = dmar_set_interrupt(iommu);
3850 if (ret)
3851 goto disable_iommu;
3852
3853 iommu_set_root_entry(iommu);
3854 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
3855 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3856 iommu_enable_translation(iommu);
3857
3858 if (si_domain) {
3859 ret = iommu_attach_domain(si_domain, iommu);
3860 if (ret < 0 || si_domain->id != ret)
3861 goto disable_iommu;
3862 domain_attach_iommu(si_domain, iommu);
3863 }
3864
3865 iommu_disable_protect_mem_regions(iommu);
3866 return 0;
3867
3868disable_iommu:
3869 disable_dmar_iommu(iommu);
3870out:
3871 free_dmar_iommu(iommu);
3872 return ret;
3873}
3874
Jiang Liu6b197242014-11-09 22:47:58 +08003875int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
3876{
Jiang Liuffebeb42014-11-09 22:48:02 +08003877 int ret = 0;
3878 struct intel_iommu *iommu = dmaru->iommu;
3879
3880 if (!intel_iommu_enabled)
3881 return 0;
3882 if (iommu == NULL)
3883 return -EINVAL;
3884
3885 if (insert) {
3886 ret = intel_iommu_add(dmaru);
3887 } else {
3888 disable_dmar_iommu(iommu);
3889 free_dmar_iommu(iommu);
3890 }
3891
3892 return ret;
Jiang Liu6b197242014-11-09 22:47:58 +08003893}
3894
Jiang Liu9bdc5312014-01-06 14:18:27 +08003895static void intel_iommu_free_dmars(void)
3896{
3897 struct dmar_rmrr_unit *rmrru, *rmrr_n;
3898 struct dmar_atsr_unit *atsru, *atsr_n;
3899
3900 list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
3901 list_del(&rmrru->list);
3902 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
3903 kfree(rmrru);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003904 }
3905
Jiang Liu9bdc5312014-01-06 14:18:27 +08003906 list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
3907 list_del(&atsru->list);
3908 intel_iommu_free_atsr(atsru);
3909 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003910}
3911
3912int dmar_find_matched_atsr_unit(struct pci_dev *dev)
3913{
Jiang Liub683b232014-02-19 14:07:32 +08003914 int i, ret = 1;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003915 struct pci_bus *bus;
David Woodhouse832bd852014-03-07 15:08:36 +00003916 struct pci_dev *bridge = NULL;
3917 struct device *tmp;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003918 struct acpi_dmar_atsr *atsr;
3919 struct dmar_atsr_unit *atsru;
3920
3921 dev = pci_physfn(dev);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003922 for (bus = dev->bus; bus; bus = bus->parent) {
Jiang Liub5f82dd2014-02-19 14:07:31 +08003923 bridge = bus->self;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003924 if (!bridge || !pci_is_pcie(bridge) ||
Yijing Wang62f87c02012-07-24 17:20:03 +08003925 pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003926 return 0;
Jiang Liub5f82dd2014-02-19 14:07:31 +08003927 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003928 break;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003929 }
Jiang Liub5f82dd2014-02-19 14:07:31 +08003930 if (!bridge)
3931 return 0;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003932
Jiang Liu0e242612014-02-19 14:07:34 +08003933 rcu_read_lock();
Jiang Liub5f82dd2014-02-19 14:07:31 +08003934 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
3935 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3936 if (atsr->segment != pci_domain_nr(dev->bus))
3937 continue;
3938
Jiang Liub683b232014-02-19 14:07:32 +08003939 for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
David Woodhouse832bd852014-03-07 15:08:36 +00003940 if (tmp == &bridge->dev)
Jiang Liub683b232014-02-19 14:07:32 +08003941 goto out;
Jiang Liub5f82dd2014-02-19 14:07:31 +08003942
3943 if (atsru->include_all)
Jiang Liub683b232014-02-19 14:07:32 +08003944 goto out;
Jiang Liub5f82dd2014-02-19 14:07:31 +08003945 }
Jiang Liub683b232014-02-19 14:07:32 +08003946 ret = 0;
3947out:
Jiang Liu0e242612014-02-19 14:07:34 +08003948 rcu_read_unlock();
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003949
Jiang Liub683b232014-02-19 14:07:32 +08003950 return ret;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003951}
3952
Jiang Liu59ce0512014-02-19 14:07:35 +08003953int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
3954{
3955 int ret = 0;
3956 struct dmar_rmrr_unit *rmrru;
3957 struct dmar_atsr_unit *atsru;
3958 struct acpi_dmar_atsr *atsr;
3959 struct acpi_dmar_reserved_memory *rmrr;
3960
3961 if (!intel_iommu_enabled && system_state != SYSTEM_BOOTING)
3962 return 0;
3963
3964 list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
3965 rmrr = container_of(rmrru->hdr,
3966 struct acpi_dmar_reserved_memory, header);
3967 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
3968 ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
3969 ((void *)rmrr) + rmrr->header.length,
3970 rmrr->segment, rmrru->devices,
3971 rmrru->devices_cnt);
Jiang Liu27e24952014-06-20 15:08:06 +08003972 if(ret < 0)
Jiang Liu59ce0512014-02-19 14:07:35 +08003973 return ret;
3974 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
Jiang Liu27e24952014-06-20 15:08:06 +08003975 dmar_remove_dev_scope(info, rmrr->segment,
3976 rmrru->devices, rmrru->devices_cnt);
Jiang Liu59ce0512014-02-19 14:07:35 +08003977 }
3978 }
3979
3980 list_for_each_entry(atsru, &dmar_atsr_units, list) {
3981 if (atsru->include_all)
3982 continue;
3983
3984 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3985 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
3986 ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
3987 (void *)atsr + atsr->header.length,
3988 atsr->segment, atsru->devices,
3989 atsru->devices_cnt);
3990 if (ret > 0)
3991 break;
3992 else if(ret < 0)
3993 return ret;
3994 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
3995 if (dmar_remove_dev_scope(info, atsr->segment,
3996 atsru->devices, atsru->devices_cnt))
3997 break;
3998 }
3999 }
4000
4001 return 0;
4002}
4003
Fenghua Yu99dcade2009-11-11 07:23:06 -08004004/*
4005 * Here we only respond to action of unbound device from driver.
4006 *
4007 * Added device is not attached to its DMAR domain here yet. That will happen
4008 * when mapping the device to iova.
4009 */
4010static int device_notifier(struct notifier_block *nb,
4011 unsigned long action, void *data)
4012{
4013 struct device *dev = data;
Fenghua Yu99dcade2009-11-11 07:23:06 -08004014 struct dmar_domain *domain;
4015
David Woodhouse3d891942014-03-06 15:59:26 +00004016 if (iommu_dummy(dev))
David Woodhouse44cd6132009-12-02 10:18:30 +00004017 return 0;
4018
Joerg Roedel1196c2f2014-09-30 13:02:03 +02004019 if (action != BUS_NOTIFY_REMOVED_DEVICE)
Jiang Liu7e7dfab2014-02-19 14:07:23 +08004020 return 0;
4021
David Woodhouse1525a292014-03-06 16:19:30 +00004022 domain = find_domain(dev);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004023 if (!domain)
4024 return 0;
4025
Jiang Liu3a5670e2014-02-19 14:07:33 +08004026 down_read(&dmar_global_lock);
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004027 domain_remove_one_dev_info(domain, dev);
Jiang Liuab8dfe22014-07-11 14:19:27 +08004028 if (!domain_type_is_vm_or_si(domain) && list_empty(&domain->devices))
Jiang Liu7e7dfab2014-02-19 14:07:23 +08004029 domain_exit(domain);
Jiang Liu3a5670e2014-02-19 14:07:33 +08004030 up_read(&dmar_global_lock);
Alex Williamsona97590e2011-03-04 14:52:16 -07004031
Fenghua Yu99dcade2009-11-11 07:23:06 -08004032 return 0;
4033}
4034
4035static struct notifier_block device_nb = {
4036 .notifier_call = device_notifier,
4037};
4038
Jiang Liu75f05562014-02-19 14:07:37 +08004039static int intel_iommu_memory_notifier(struct notifier_block *nb,
4040 unsigned long val, void *v)
4041{
4042 struct memory_notify *mhp = v;
4043 unsigned long long start, end;
4044 unsigned long start_vpfn, last_vpfn;
4045
4046 switch (val) {
4047 case MEM_GOING_ONLINE:
4048 start = mhp->start_pfn << PAGE_SHIFT;
4049 end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
4050 if (iommu_domain_identity_map(si_domain, start, end)) {
4051 pr_warn("dmar: failed to build identity map for [%llx-%llx]\n",
4052 start, end);
4053 return NOTIFY_BAD;
4054 }
4055 break;
4056
4057 case MEM_OFFLINE:
4058 case MEM_CANCEL_ONLINE:
4059 start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
4060 last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
4061 while (start_vpfn <= last_vpfn) {
4062 struct iova *iova;
4063 struct dmar_drhd_unit *drhd;
4064 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00004065 struct page *freelist;
Jiang Liu75f05562014-02-19 14:07:37 +08004066
4067 iova = find_iova(&si_domain->iovad, start_vpfn);
4068 if (iova == NULL) {
4069 pr_debug("dmar: failed get IOVA for PFN %lx\n",
4070 start_vpfn);
4071 break;
4072 }
4073
4074 iova = split_and_remove_iova(&si_domain->iovad, iova,
4075 start_vpfn, last_vpfn);
4076 if (iova == NULL) {
4077 pr_warn("dmar: failed to split IOVA PFN [%lx-%lx]\n",
4078 start_vpfn, last_vpfn);
4079 return NOTIFY_BAD;
4080 }
4081
David Woodhouseea8ea462014-03-05 17:09:32 +00004082 freelist = domain_unmap(si_domain, iova->pfn_lo,
4083 iova->pfn_hi);
4084
Jiang Liu75f05562014-02-19 14:07:37 +08004085 rcu_read_lock();
4086 for_each_active_iommu(iommu, drhd)
4087 iommu_flush_iotlb_psi(iommu, si_domain->id,
Jiang Liua156ef92014-07-11 14:19:36 +08004088 iova->pfn_lo, iova_size(iova),
David Woodhouseea8ea462014-03-05 17:09:32 +00004089 !freelist, 0);
Jiang Liu75f05562014-02-19 14:07:37 +08004090 rcu_read_unlock();
David Woodhouseea8ea462014-03-05 17:09:32 +00004091 dma_free_pagelist(freelist);
Jiang Liu75f05562014-02-19 14:07:37 +08004092
4093 start_vpfn = iova->pfn_hi + 1;
4094 free_iova_mem(iova);
4095 }
4096 break;
4097 }
4098
4099 return NOTIFY_OK;
4100}
4101
4102static struct notifier_block intel_iommu_memory_nb = {
4103 .notifier_call = intel_iommu_memory_notifier,
4104 .priority = 0
4105};
4106
Alex Williamsona5459cf2014-06-12 16:12:31 -06004107
4108static ssize_t intel_iommu_show_version(struct device *dev,
4109 struct device_attribute *attr,
4110 char *buf)
4111{
4112 struct intel_iommu *iommu = dev_get_drvdata(dev);
4113 u32 ver = readl(iommu->reg + DMAR_VER_REG);
4114 return sprintf(buf, "%d:%d\n",
4115 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver));
4116}
4117static DEVICE_ATTR(version, S_IRUGO, intel_iommu_show_version, NULL);
4118
4119static ssize_t intel_iommu_show_address(struct device *dev,
4120 struct device_attribute *attr,
4121 char *buf)
4122{
4123 struct intel_iommu *iommu = dev_get_drvdata(dev);
4124 return sprintf(buf, "%llx\n", iommu->reg_phys);
4125}
4126static DEVICE_ATTR(address, S_IRUGO, intel_iommu_show_address, NULL);
4127
4128static ssize_t intel_iommu_show_cap(struct device *dev,
4129 struct device_attribute *attr,
4130 char *buf)
4131{
4132 struct intel_iommu *iommu = dev_get_drvdata(dev);
4133 return sprintf(buf, "%llx\n", iommu->cap);
4134}
4135static DEVICE_ATTR(cap, S_IRUGO, intel_iommu_show_cap, NULL);
4136
4137static ssize_t intel_iommu_show_ecap(struct device *dev,
4138 struct device_attribute *attr,
4139 char *buf)
4140{
4141 struct intel_iommu *iommu = dev_get_drvdata(dev);
4142 return sprintf(buf, "%llx\n", iommu->ecap);
4143}
4144static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);
4145
4146static struct attribute *intel_iommu_attrs[] = {
4147 &dev_attr_version.attr,
4148 &dev_attr_address.attr,
4149 &dev_attr_cap.attr,
4150 &dev_attr_ecap.attr,
4151 NULL,
4152};
4153
4154static struct attribute_group intel_iommu_group = {
4155 .name = "intel-iommu",
4156 .attrs = intel_iommu_attrs,
4157};
4158
4159const struct attribute_group *intel_iommu_groups[] = {
4160 &intel_iommu_group,
4161 NULL,
4162};
4163
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004164int __init intel_iommu_init(void)
4165{
Jiang Liu9bdc5312014-01-06 14:18:27 +08004166 int ret = -ENODEV;
Takao Indoh3a93c842013-04-23 17:35:03 +09004167 struct dmar_drhd_unit *drhd;
Jiang Liu7c919772014-01-06 14:18:18 +08004168 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004169
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004170 /* VT-d is required for a TXT/tboot launch, so enforce that */
4171 force_on = tboot_force_iommu();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004172
Jiang Liu3a5670e2014-02-19 14:07:33 +08004173 if (iommu_init_mempool()) {
4174 if (force_on)
4175 panic("tboot: Failed to initialize iommu memory\n");
4176 return -ENOMEM;
4177 }
4178
4179 down_write(&dmar_global_lock);
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004180 if (dmar_table_init()) {
4181 if (force_on)
4182 panic("tboot: Failed to initialize DMAR table\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004183 goto out_free_dmar;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004184 }
4185
Takao Indoh3a93c842013-04-23 17:35:03 +09004186 /*
4187 * Disable translation if already enabled prior to OS handover.
4188 */
Jiang Liu7c919772014-01-06 14:18:18 +08004189 for_each_active_iommu(iommu, drhd)
Takao Indoh3a93c842013-04-23 17:35:03 +09004190 if (iommu->gcmd & DMA_GCMD_TE)
4191 iommu_disable_translation(iommu);
Takao Indoh3a93c842013-04-23 17:35:03 +09004192
Suresh Siddhac2c72862011-08-23 17:05:19 -07004193 if (dmar_dev_scope_init() < 0) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004194 if (force_on)
4195 panic("tboot: Failed to initialize DMAR device scope\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004196 goto out_free_dmar;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004197 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -07004198
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09004199 if (no_iommu || dmar_disabled)
Jiang Liu9bdc5312014-01-06 14:18:27 +08004200 goto out_free_dmar;
Suresh Siddha2ae21012008-07-10 11:16:43 -07004201
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004202 if (list_empty(&dmar_rmrr_units))
4203 printk(KERN_INFO "DMAR: No RMRR found\n");
4204
4205 if (list_empty(&dmar_atsr_units))
4206 printk(KERN_INFO "DMAR: No ATSR found\n");
4207
Joseph Cihula51a63e62011-03-21 11:04:24 -07004208 if (dmar_init_reserved_ranges()) {
4209 if (force_on)
4210 panic("tboot: Failed to reserve iommu ranges\n");
Jiang Liu3a5670e2014-02-19 14:07:33 +08004211 goto out_free_reserved_range;
Joseph Cihula51a63e62011-03-21 11:04:24 -07004212 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004213
4214 init_no_remapping_devices();
4215
Joseph Cihulab7792602011-05-03 00:08:37 -07004216 ret = init_dmars();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004217 if (ret) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004218 if (force_on)
4219 panic("tboot: Failed to initialize DMARs\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004220 printk(KERN_ERR "IOMMU: dmar init failed\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004221 goto out_free_reserved_range;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004222 }
Jiang Liu3a5670e2014-02-19 14:07:33 +08004223 up_write(&dmar_global_lock);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004224 printk(KERN_INFO
4225 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
4226
mark gross5e0d2a62008-03-04 15:22:08 -08004227 init_timer(&unmap_timer);
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09004228#ifdef CONFIG_SWIOTLB
4229 swiotlb = 0;
4230#endif
David Woodhouse19943b02009-08-04 16:19:20 +01004231 dma_ops = &intel_dma_ops;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07004232
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004233 init_iommu_pm_ops();
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004234
Alex Williamsona5459cf2014-06-12 16:12:31 -06004235 for_each_active_iommu(iommu, drhd)
4236 iommu->iommu_dev = iommu_device_create(NULL, iommu,
4237 intel_iommu_groups,
4238 iommu->name);
4239
Joerg Roedel4236d97d2011-09-06 17:56:07 +02004240 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004241 bus_register_notifier(&pci_bus_type, &device_nb);
Jiang Liu75f05562014-02-19 14:07:37 +08004242 if (si_domain && !hw_pass_through)
4243 register_memory_notifier(&intel_iommu_memory_nb);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004244
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -02004245 intel_iommu_enabled = 1;
4246
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004247 return 0;
Jiang Liu9bdc5312014-01-06 14:18:27 +08004248
4249out_free_reserved_range:
4250 put_iova_domain(&reserved_iova_list);
Jiang Liu9bdc5312014-01-06 14:18:27 +08004251out_free_dmar:
4252 intel_iommu_free_dmars();
Jiang Liu3a5670e2014-02-19 14:07:33 +08004253 up_write(&dmar_global_lock);
4254 iommu_exit_mempool();
Jiang Liu9bdc5312014-01-06 14:18:27 +08004255 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004256}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07004257
Alex Williamson579305f2014-07-03 09:51:43 -06004258static int iommu_detach_dev_cb(struct pci_dev *pdev, u16 alias, void *opaque)
4259{
4260 struct intel_iommu *iommu = opaque;
4261
4262 iommu_detach_dev(iommu, PCI_BUS_NUM(alias), alias & 0xff);
4263 return 0;
4264}
4265
4266/*
4267 * NB - intel-iommu lacks any sort of reference counting for the users of
4268 * dependent devices. If multiple endpoints have intersecting dependent
4269 * devices, unbinding the driver from any one of them will possibly leave
4270 * the others unable to operate.
4271 */
Han, Weidong3199aa62009-02-26 17:31:12 +08004272static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
David Woodhouse0bcb3e22014-03-06 17:12:03 +00004273 struct device *dev)
Han, Weidong3199aa62009-02-26 17:31:12 +08004274{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00004275 if (!iommu || !dev || !dev_is_pci(dev))
Han, Weidong3199aa62009-02-26 17:31:12 +08004276 return;
4277
Alex Williamson579305f2014-07-03 09:51:43 -06004278 pci_for_each_dma_alias(to_pci_dev(dev), &iommu_detach_dev_cb, iommu);
Han, Weidong3199aa62009-02-26 17:31:12 +08004279}
4280
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004281static void domain_remove_one_dev_info(struct dmar_domain *domain,
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004282 struct device *dev)
Weidong Hanc7151a82008-12-08 22:51:37 +08004283{
Yijing Wangbca2b912013-10-31 17:26:04 +08004284 struct device_domain_info *info, *tmp;
Weidong Hanc7151a82008-12-08 22:51:37 +08004285 struct intel_iommu *iommu;
4286 unsigned long flags;
Quentin Lambert2f119c72015-02-06 10:59:53 +01004287 bool found = false;
David Woodhouse156baca2014-03-09 14:00:57 -07004288 u8 bus, devfn;
Weidong Hanc7151a82008-12-08 22:51:37 +08004289
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004290 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08004291 if (!iommu)
4292 return;
4293
4294 spin_lock_irqsave(&device_domain_lock, flags);
Yijing Wangbca2b912013-10-31 17:26:04 +08004295 list_for_each_entry_safe(info, tmp, &domain->devices, link) {
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004296 if (info->iommu == iommu && info->bus == bus &&
4297 info->devfn == devfn) {
David Woodhouse109b9b02012-05-25 17:43:02 +01004298 unlink_domain_info(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08004299 spin_unlock_irqrestore(&device_domain_lock, flags);
4300
Yu Zhao93a23a72009-05-18 13:51:37 +08004301 iommu_disable_dev_iotlb(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08004302 iommu_detach_dev(iommu, info->bus, info->devfn);
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004303 iommu_detach_dependent_devices(iommu, dev);
Weidong Hanc7151a82008-12-08 22:51:37 +08004304 free_devinfo_mem(info);
4305
4306 spin_lock_irqsave(&device_domain_lock, flags);
4307
4308 if (found)
4309 break;
4310 else
4311 continue;
4312 }
4313
4314 /* if there is no other devices under the same iommu
4315 * owned by this domain, clear this iommu in iommu_bmp
4316 * update iommu count and coherency
4317 */
David Woodhouse8bbc4412014-03-09 13:52:37 -07004318 if (info->iommu == iommu)
Quentin Lambert2f119c72015-02-06 10:59:53 +01004319 found = true;
Weidong Hanc7151a82008-12-08 22:51:37 +08004320 }
4321
Roland Dreier3e7abe22011-07-20 06:22:21 -07004322 spin_unlock_irqrestore(&device_domain_lock, flags);
4323
Weidong Hanc7151a82008-12-08 22:51:37 +08004324 if (found == 0) {
Jiang Liufb170fb2014-07-11 14:19:28 +08004325 domain_detach_iommu(domain, iommu);
4326 if (!domain_type_is_vm_or_si(domain))
4327 iommu_detach_domain(domain, iommu);
Weidong Hanc7151a82008-12-08 22:51:37 +08004328 }
Weidong Hanc7151a82008-12-08 22:51:37 +08004329}
4330
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004331static int md_domain_init(struct dmar_domain *domain, int guest_width)
Weidong Han5e98c4b2008-12-08 23:03:27 +08004332{
4333 int adjust_width;
4334
Robin Murphy0fb5fe82015-01-12 17:51:16 +00004335 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
4336 DMA_32BIT_PFN);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004337 domain_reserve_special_ranges(domain);
4338
4339 /* calculate AGAW */
4340 domain->gaw = guest_width;
4341 adjust_width = guestwidth_to_adjustwidth(guest_width);
4342 domain->agaw = width_to_agaw(adjust_width);
4343
Weidong Han5e98c4b2008-12-08 23:03:27 +08004344 domain->iommu_coherency = 0;
Sheng Yangc5b15252009-08-06 13:31:56 +08004345 domain->iommu_snooping = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01004346 domain->iommu_superpage = 0;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004347 domain->max_addr = 0;
Weidong Han5e98c4b2008-12-08 23:03:27 +08004348
4349 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07004350 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004351 if (!domain->pgd)
4352 return -ENOMEM;
4353 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
4354 return 0;
4355}
4356
Joerg Roedel00a77de2015-03-26 13:43:08 +01004357static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
Kay, Allen M38717942008-09-09 18:37:29 +03004358{
Joerg Roedel5d450802008-12-03 14:52:32 +01004359 struct dmar_domain *dmar_domain;
Joerg Roedel00a77de2015-03-26 13:43:08 +01004360 struct iommu_domain *domain;
4361
4362 if (type != IOMMU_DOMAIN_UNMANAGED)
4363 return NULL;
Kay, Allen M38717942008-09-09 18:37:29 +03004364
Jiang Liuab8dfe22014-07-11 14:19:27 +08004365 dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
Joerg Roedel5d450802008-12-03 14:52:32 +01004366 if (!dmar_domain) {
Kay, Allen M38717942008-09-09 18:37:29 +03004367 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01004368 "intel_iommu_domain_init: dmar_domain == NULL\n");
Joerg Roedel00a77de2015-03-26 13:43:08 +01004369 return NULL;
Kay, Allen M38717942008-09-09 18:37:29 +03004370 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004371 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
Kay, Allen M38717942008-09-09 18:37:29 +03004372 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01004373 "intel_iommu_domain_init() failed\n");
Jiang Liu92d03cc2014-02-19 14:07:28 +08004374 domain_exit(dmar_domain);
Joerg Roedel00a77de2015-03-26 13:43:08 +01004375 return NULL;
Kay, Allen M38717942008-09-09 18:37:29 +03004376 }
Allen Kay8140a952011-10-14 12:32:17 -07004377 domain_update_iommu_cap(dmar_domain);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004378
Joerg Roedel00a77de2015-03-26 13:43:08 +01004379 domain = &dmar_domain->domain;
Joerg Roedel8a0e7152012-01-26 19:40:54 +01004380 domain->geometry.aperture_start = 0;
4381 domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
4382 domain->geometry.force_aperture = true;
4383
Joerg Roedel00a77de2015-03-26 13:43:08 +01004384 return domain;
Kay, Allen M38717942008-09-09 18:37:29 +03004385}
Kay, Allen M38717942008-09-09 18:37:29 +03004386
Joerg Roedel00a77de2015-03-26 13:43:08 +01004387static void intel_iommu_domain_free(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03004388{
Joerg Roedel00a77de2015-03-26 13:43:08 +01004389 domain_exit(to_dmar_domain(domain));
Kay, Allen M38717942008-09-09 18:37:29 +03004390}
Kay, Allen M38717942008-09-09 18:37:29 +03004391
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004392static int intel_iommu_attach_device(struct iommu_domain *domain,
4393 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03004394{
Joerg Roedel00a77de2015-03-26 13:43:08 +01004395 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004396 struct intel_iommu *iommu;
4397 int addr_width;
David Woodhouse156baca2014-03-09 14:00:57 -07004398 u8 bus, devfn;
Kay, Allen M38717942008-09-09 18:37:29 +03004399
Alex Williamsonc875d2c2014-07-03 09:57:02 -06004400 if (device_is_rmrr_locked(dev)) {
4401 dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n");
4402 return -EPERM;
4403 }
4404
David Woodhouse7207d8f2014-03-09 16:31:06 -07004405 /* normally dev is not mapped */
4406 if (unlikely(domain_context_mapped(dev))) {
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004407 struct dmar_domain *old_domain;
4408
David Woodhouse1525a292014-03-06 16:19:30 +00004409 old_domain = find_domain(dev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004410 if (old_domain) {
Jiang Liuab8dfe22014-07-11 14:19:27 +08004411 if (domain_type_is_vm_or_si(dmar_domain))
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004412 domain_remove_one_dev_info(old_domain, dev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004413 else
4414 domain_remove_dev_info(old_domain);
Joerg Roedel62c22162014-12-09 12:56:45 +01004415
4416 if (!domain_type_is_vm_or_si(old_domain) &&
4417 list_empty(&old_domain->devices))
4418 domain_exit(old_domain);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004419 }
4420 }
4421
David Woodhouse156baca2014-03-09 14:00:57 -07004422 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004423 if (!iommu)
4424 return -ENODEV;
4425
4426 /* check if this iommu agaw is sufficient for max mapped address */
4427 addr_width = agaw_to_width(iommu->agaw);
Tom Lyona99c47a2010-05-17 08:20:45 +01004428 if (addr_width > cap_mgaw(iommu->cap))
4429 addr_width = cap_mgaw(iommu->cap);
4430
4431 if (dmar_domain->max_addr > (1LL << addr_width)) {
4432 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004433 "sufficient for the mapped address (%llx)\n",
Tom Lyona99c47a2010-05-17 08:20:45 +01004434 __func__, addr_width, dmar_domain->max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004435 return -EFAULT;
4436 }
Tom Lyona99c47a2010-05-17 08:20:45 +01004437 dmar_domain->gaw = addr_width;
4438
4439 /*
4440 * Knock out extra levels of page tables if necessary
4441 */
4442 while (iommu->agaw < dmar_domain->agaw) {
4443 struct dma_pte *pte;
4444
4445 pte = dmar_domain->pgd;
4446 if (dma_pte_present(pte)) {
Sheng Yang25cbff12010-06-12 19:21:42 +08004447 dmar_domain->pgd = (struct dma_pte *)
4448 phys_to_virt(dma_pte_addr(pte));
Jan Kiszka7a661012010-11-02 08:05:51 +01004449 free_pgtable_page(pte);
Tom Lyona99c47a2010-05-17 08:20:45 +01004450 }
4451 dmar_domain->agaw--;
4452 }
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004453
David Woodhouse5913c9b2014-03-09 16:27:31 -07004454 return domain_add_dev_info(dmar_domain, dev, CONTEXT_TT_MULTI_LEVEL);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004455}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004456
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004457static void intel_iommu_detach_device(struct iommu_domain *domain,
4458 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03004459{
Joerg Roedel00a77de2015-03-26 13:43:08 +01004460 domain_remove_one_dev_info(to_dmar_domain(domain), dev);
Kay, Allen M38717942008-09-09 18:37:29 +03004461}
Kay, Allen M38717942008-09-09 18:37:29 +03004462
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004463static int intel_iommu_map(struct iommu_domain *domain,
4464 unsigned long iova, phys_addr_t hpa,
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004465 size_t size, int iommu_prot)
Kay, Allen M38717942008-09-09 18:37:29 +03004466{
Joerg Roedel00a77de2015-03-26 13:43:08 +01004467 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004468 u64 max_addr;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004469 int prot = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004470 int ret;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004471
Joerg Roedeldde57a22008-12-03 15:04:09 +01004472 if (iommu_prot & IOMMU_READ)
4473 prot |= DMA_PTE_READ;
4474 if (iommu_prot & IOMMU_WRITE)
4475 prot |= DMA_PTE_WRITE;
Sheng Yang9cf066972009-03-18 15:33:07 +08004476 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
4477 prot |= DMA_PTE_SNP;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004478
David Woodhouse163cc522009-06-28 00:51:17 +01004479 max_addr = iova + size;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004480 if (dmar_domain->max_addr < max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004481 u64 end;
4482
4483 /* check if minimum agaw is sufficient for mapped address */
Tom Lyon8954da12010-05-17 08:19:52 +01004484 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004485 if (end < max_addr) {
Tom Lyon8954da12010-05-17 08:19:52 +01004486 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004487 "sufficient for the mapped address (%llx)\n",
Tom Lyon8954da12010-05-17 08:19:52 +01004488 __func__, dmar_domain->gaw, max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004489 return -EFAULT;
4490 }
Joerg Roedeldde57a22008-12-03 15:04:09 +01004491 dmar_domain->max_addr = max_addr;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004492 }
David Woodhousead051222009-06-28 14:22:28 +01004493 /* Round up size to next multiple of PAGE_SIZE, if it and
4494 the low bits of hpa would take us onto the next page */
David Woodhouse88cb6a72009-06-28 15:03:06 +01004495 size = aligned_nrpages(hpa, size);
David Woodhousead051222009-06-28 14:22:28 +01004496 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
4497 hpa >> VTD_PAGE_SHIFT, size, prot);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004498 return ret;
Kay, Allen M38717942008-09-09 18:37:29 +03004499}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004500
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004501static size_t intel_iommu_unmap(struct iommu_domain *domain,
David Woodhouseea8ea462014-03-05 17:09:32 +00004502 unsigned long iova, size_t size)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004503{
Joerg Roedel00a77de2015-03-26 13:43:08 +01004504 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
David Woodhouseea8ea462014-03-05 17:09:32 +00004505 struct page *freelist = NULL;
4506 struct intel_iommu *iommu;
4507 unsigned long start_pfn, last_pfn;
4508 unsigned int npages;
4509 int iommu_id, num, ndomains, level = 0;
Sheng Yang4b99d352009-07-08 11:52:52 +01004510
David Woodhouse5cf0a762014-03-19 16:07:49 +00004511 /* Cope with horrid API which requires us to unmap more than the
4512 size argument if it happens to be a large-page mapping. */
4513 if (!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level))
4514 BUG();
4515
4516 if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
4517 size = VTD_PAGE_SIZE << level_to_offset_bits(level);
4518
David Woodhouseea8ea462014-03-05 17:09:32 +00004519 start_pfn = iova >> VTD_PAGE_SHIFT;
4520 last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
4521
4522 freelist = domain_unmap(dmar_domain, start_pfn, last_pfn);
4523
4524 npages = last_pfn - start_pfn + 1;
4525
4526 for_each_set_bit(iommu_id, dmar_domain->iommu_bmp, g_num_of_iommus) {
4527 iommu = g_iommus[iommu_id];
4528
4529 /*
4530 * find bit position of dmar_domain
4531 */
4532 ndomains = cap_ndoms(iommu->cap);
4533 for_each_set_bit(num, iommu->domain_ids, ndomains) {
4534 if (iommu->domains[num] == dmar_domain)
4535 iommu_flush_iotlb_psi(iommu, num, start_pfn,
4536 npages, !freelist, 0);
4537 }
4538
4539 }
4540
4541 dma_free_pagelist(freelist);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004542
David Woodhouse163cc522009-06-28 00:51:17 +01004543 if (dmar_domain->max_addr == iova + size)
4544 dmar_domain->max_addr = iova;
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004545
David Woodhouse5cf0a762014-03-19 16:07:49 +00004546 return size;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004547}
Kay, Allen M38717942008-09-09 18:37:29 +03004548
Joerg Roedeld14d6572008-12-03 15:06:57 +01004549static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
Varun Sethibb5547ac2013-03-29 01:23:58 +05304550 dma_addr_t iova)
Kay, Allen M38717942008-09-09 18:37:29 +03004551{
Joerg Roedel00a77de2015-03-26 13:43:08 +01004552 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
Kay, Allen M38717942008-09-09 18:37:29 +03004553 struct dma_pte *pte;
David Woodhouse5cf0a762014-03-19 16:07:49 +00004554 int level = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004555 u64 phys = 0;
Kay, Allen M38717942008-09-09 18:37:29 +03004556
David Woodhouse5cf0a762014-03-19 16:07:49 +00004557 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
Kay, Allen M38717942008-09-09 18:37:29 +03004558 if (pte)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004559 phys = dma_pte_addr(pte);
Kay, Allen M38717942008-09-09 18:37:29 +03004560
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004561 return phys;
Kay, Allen M38717942008-09-09 18:37:29 +03004562}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004563
Joerg Roedel5d587b82014-09-05 10:50:45 +02004564static bool intel_iommu_capable(enum iommu_cap cap)
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004565{
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004566 if (cap == IOMMU_CAP_CACHE_COHERENCY)
Joerg Roedel5d587b82014-09-05 10:50:45 +02004567 return domain_update_iommu_snooping(NULL) == 1;
Tom Lyon323f99c2010-07-02 16:56:14 -04004568 if (cap == IOMMU_CAP_INTR_REMAP)
Joerg Roedel5d587b82014-09-05 10:50:45 +02004569 return irq_remapping_enabled == 1;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004570
Joerg Roedel5d587b82014-09-05 10:50:45 +02004571 return false;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004572}
4573
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004574static int intel_iommu_add_device(struct device *dev)
Alex Williamson70ae6f02011-10-21 15:56:11 -04004575{
Alex Williamsona5459cf2014-06-12 16:12:31 -06004576 struct intel_iommu *iommu;
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004577 struct iommu_group *group;
David Woodhouse156baca2014-03-09 14:00:57 -07004578 u8 bus, devfn;
Alex Williamson70ae6f02011-10-21 15:56:11 -04004579
Alex Williamsona5459cf2014-06-12 16:12:31 -06004580 iommu = device_to_iommu(dev, &bus, &devfn);
4581 if (!iommu)
Alex Williamson70ae6f02011-10-21 15:56:11 -04004582 return -ENODEV;
4583
Alex Williamsona5459cf2014-06-12 16:12:31 -06004584 iommu_device_link(iommu->iommu_dev, dev);
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004585
Alex Williamsone17f9ff2014-07-03 09:51:37 -06004586 group = iommu_group_get_for_dev(dev);
Alex Williamson783f1572012-05-30 14:19:43 -06004587
Alex Williamsone17f9ff2014-07-03 09:51:37 -06004588 if (IS_ERR(group))
4589 return PTR_ERR(group);
Alex Williamson70ae6f02011-10-21 15:56:11 -04004590
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004591 iommu_group_put(group);
Alex Williamsone17f9ff2014-07-03 09:51:37 -06004592 return 0;
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004593}
4594
4595static void intel_iommu_remove_device(struct device *dev)
4596{
Alex Williamsona5459cf2014-06-12 16:12:31 -06004597 struct intel_iommu *iommu;
4598 u8 bus, devfn;
4599
4600 iommu = device_to_iommu(dev, &bus, &devfn);
4601 if (!iommu)
4602 return;
4603
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004604 iommu_group_remove_device(dev);
Alex Williamsona5459cf2014-06-12 16:12:31 -06004605
4606 iommu_device_unlink(iommu->iommu_dev, dev);
Alex Williamson70ae6f02011-10-21 15:56:11 -04004607}
4608
Thierry Redingb22f6432014-06-27 09:03:12 +02004609static const struct iommu_ops intel_iommu_ops = {
Joerg Roedel5d587b82014-09-05 10:50:45 +02004610 .capable = intel_iommu_capable,
Joerg Roedel00a77de2015-03-26 13:43:08 +01004611 .domain_alloc = intel_iommu_domain_alloc,
4612 .domain_free = intel_iommu_domain_free,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004613 .attach_dev = intel_iommu_attach_device,
4614 .detach_dev = intel_iommu_detach_device,
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004615 .map = intel_iommu_map,
4616 .unmap = intel_iommu_unmap,
Olav Haugan315786e2014-10-25 09:55:16 -07004617 .map_sg = default_iommu_map_sg,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004618 .iova_to_phys = intel_iommu_iova_to_phys,
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004619 .add_device = intel_iommu_add_device,
4620 .remove_device = intel_iommu_remove_device,
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +02004621 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004622};
David Woodhouse9af88142009-02-13 23:18:03 +00004623
Daniel Vetter94526182013-01-20 23:50:13 +01004624static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
4625{
4626 /* G4x/GM45 integrated gfx dmar support is totally busted. */
4627 printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
4628 dmar_map_gfx = 0;
4629}
4630
4631DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
4632DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
4633DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
4634DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
4635DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
4636DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
4637DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
4638
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08004639static void quirk_iommu_rwbf(struct pci_dev *dev)
David Woodhouse9af88142009-02-13 23:18:03 +00004640{
4641 /*
4642 * Mobile 4 Series Chipset neglects to set RWBF capability,
Daniel Vetter210561f2013-01-21 19:48:59 +01004643 * but needs it. Same seems to hold for the desktop versions.
David Woodhouse9af88142009-02-13 23:18:03 +00004644 */
4645 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
4646 rwbf_quirk = 1;
4647}
4648
4649DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
Daniel Vetter210561f2013-01-21 19:48:59 +01004650DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
4651DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
4652DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
4653DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
4654DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
4655DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
David Woodhousee0fc7e0b2009-09-30 09:12:17 -07004656
Adam Jacksoneecfd572010-08-25 21:17:34 +01004657#define GGC 0x52
4658#define GGC_MEMORY_SIZE_MASK (0xf << 8)
4659#define GGC_MEMORY_SIZE_NONE (0x0 << 8)
4660#define GGC_MEMORY_SIZE_1M (0x1 << 8)
4661#define GGC_MEMORY_SIZE_2M (0x3 << 8)
4662#define GGC_MEMORY_VT_ENABLED (0x8 << 8)
4663#define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
4664#define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
4665#define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
4666
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08004667static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
David Woodhouse9eecabc2010-09-21 22:28:23 +01004668{
4669 unsigned short ggc;
4670
Adam Jacksoneecfd572010-08-25 21:17:34 +01004671 if (pci_read_config_word(dev, GGC, &ggc))
David Woodhouse9eecabc2010-09-21 22:28:23 +01004672 return;
4673
Adam Jacksoneecfd572010-08-25 21:17:34 +01004674 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
David Woodhouse9eecabc2010-09-21 22:28:23 +01004675 printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
4676 dmar_map_gfx = 0;
David Woodhouse6fbcfb32011-09-25 19:11:14 -07004677 } else if (dmar_map_gfx) {
4678 /* we have to ensure the gfx device is idle before we flush */
4679 printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n");
4680 intel_iommu_strict = 1;
4681 }
David Woodhouse9eecabc2010-09-21 22:28:23 +01004682}
4683DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
4684DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
4685DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
4686DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
4687
David Woodhousee0fc7e0b2009-09-30 09:12:17 -07004688/* On Tylersburg chipsets, some BIOSes have been known to enable the
4689 ISOCH DMAR unit for the Azalia sound device, but not give it any
4690 TLB entries, which causes it to deadlock. Check for that. We do
4691 this in a function called from init_dmars(), instead of in a PCI
4692 quirk, because we don't want to print the obnoxious "BIOS broken"
4693 message if VT-d is actually disabled.
4694*/
4695static void __init check_tylersburg_isoch(void)
4696{
4697 struct pci_dev *pdev;
4698 uint32_t vtisochctrl;
4699
4700 /* If there's no Azalia in the system anyway, forget it. */
4701 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
4702 if (!pdev)
4703 return;
4704 pci_dev_put(pdev);
4705
4706 /* System Management Registers. Might be hidden, in which case
4707 we can't do the sanity check. But that's OK, because the
4708 known-broken BIOSes _don't_ actually hide it, so far. */
4709 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
4710 if (!pdev)
4711 return;
4712
4713 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
4714 pci_dev_put(pdev);
4715 return;
4716 }
4717
4718 pci_dev_put(pdev);
4719
4720 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
4721 if (vtisochctrl & 1)
4722 return;
4723
4724 /* Drop all bits other than the number of TLB entries */
4725 vtisochctrl &= 0x1c;
4726
4727 /* If we have the recommended number of TLB entries (16), fine. */
4728 if (vtisochctrl == 0x10)
4729 return;
4730
4731 /* Zero TLB entries? You get to ride the short bus to school. */
4732 if (!vtisochctrl) {
4733 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
4734 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
4735 dmi_get_system_info(DMI_BIOS_VENDOR),
4736 dmi_get_system_info(DMI_BIOS_VERSION),
4737 dmi_get_system_info(DMI_PRODUCT_VERSION));
4738 iommu_identity_mapping |= IDENTMAP_AZALIA;
4739 return;
4740 }
4741
4742 printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
4743 vtisochctrl);
4744}