aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-12-05 09:49:07 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2009-12-05 09:49:07 -0800
commit7b626acb8f983eb83b396ab96cc24b18d635d487 (patch)
tree8c3320191311e6186d3aa722f1acd12acd47ece8 /lib
parent1ebb275afcf5a47092e995541d6c604eef96062a (diff)
parent4528752f49c1f4025473d12bc5fa9181085c3f22 (diff)
Merge branch 'core-iommu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'core-iommu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (63 commits) x86, Calgary IOMMU quirk: Find nearest matching Calgary while walking up the PCI tree x86/amd-iommu: Remove amd_iommu_pd_table x86/amd-iommu: Move reset_iommu_command_buffer out of locked code x86/amd-iommu: Cleanup DTE flushing code x86/amd-iommu: Introduce iommu_flush_device() function x86/amd-iommu: Cleanup attach/detach_device code x86/amd-iommu: Keep devices per domain in a list x86/amd-iommu: Add device bind reference counting x86/amd-iommu: Use dev->arch->iommu to store iommu related information x86/amd-iommu: Remove support for domain sharing x86/amd-iommu: Rearrange dma_ops related functions x86/amd-iommu: Move some pte allocation functions in the right section x86/amd-iommu: Remove iommu parameter from dma_ops_domain_alloc x86/amd-iommu: Use get_device_id and check_device where appropriate x86/amd-iommu: Move find_protection_domain to helper functions x86/amd-iommu: Simplify get_device_resources() x86/amd-iommu: Let domain_for_device handle aliases x86/amd-iommu: Remove iommu specific handling from dma_ops path x86/amd-iommu: Remove iommu parameter from __(un)map_single x86/amd-iommu: Make alloc_new_range aware of multiple IOMMUs ...
Diffstat (limited to 'lib')
-rw-r--r--lib/swiotlb.c46
1 files changed, 39 insertions, 7 deletions
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index ac25cd28e80..795472d8ae2 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -97,6 +97,8 @@ static phys_addr_t *io_tlb_orig_addr;
*/
static DEFINE_SPINLOCK(io_tlb_lock);
+static int late_alloc;
+
static int __init
setup_io_tlb_npages(char *str)
{
@@ -109,6 +111,7 @@ setup_io_tlb_npages(char *str)
++str;
if (!strcmp(str, "force"))
swiotlb_force = 1;
+
return 1;
}
__setup("swiotlb=", setup_io_tlb_npages);
@@ -121,8 +124,9 @@ static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
return phys_to_dma(hwdev, virt_to_phys(address));
}
-static void swiotlb_print_info(unsigned long bytes)
+void swiotlb_print_info(void)
{
+ unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT;
phys_addr_t pstart, pend;
pstart = virt_to_phys(io_tlb_start);
@@ -140,7 +144,7 @@ static void swiotlb_print_info(unsigned long bytes)
* structures for the software IO TLB used to implement the DMA API.
*/
void __init
-swiotlb_init_with_default_size(size_t default_size)
+swiotlb_init_with_default_size(size_t default_size, int verbose)
{
unsigned long i, bytes;
@@ -176,14 +180,14 @@ swiotlb_init_with_default_size(size_t default_size)
io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
if (!io_tlb_overflow_buffer)
panic("Cannot allocate SWIOTLB overflow buffer!\n");
-
- swiotlb_print_info(bytes);
+ if (verbose)
+ swiotlb_print_info();
}
void __init
-swiotlb_init(void)
+swiotlb_init(int verbose)
{
- swiotlb_init_with_default_size(64 * (1<<20)); /* default to 64MB */
+ swiotlb_init_with_default_size(64 * (1<<20), verbose); /* default to 64MB */
}
/*
@@ -260,7 +264,9 @@ swiotlb_late_init_with_default_size(size_t default_size)
if (!io_tlb_overflow_buffer)
goto cleanup4;
- swiotlb_print_info(bytes);
+ swiotlb_print_info();
+
+ late_alloc = 1;
return 0;
@@ -281,6 +287,32 @@ cleanup1:
return -ENOMEM;
}
+void __init swiotlb_free(void)
+{
+ if (!io_tlb_overflow_buffer)
+ return;
+
+ if (late_alloc) {
+ free_pages((unsigned long)io_tlb_overflow_buffer,
+ get_order(io_tlb_overflow));
+ free_pages((unsigned long)io_tlb_orig_addr,
+ get_order(io_tlb_nslabs * sizeof(phys_addr_t)));
+ free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
+ sizeof(int)));
+ free_pages((unsigned long)io_tlb_start,
+ get_order(io_tlb_nslabs << IO_TLB_SHIFT));
+ } else {
+ free_bootmem_late(__pa(io_tlb_overflow_buffer),
+ io_tlb_overflow);
+ free_bootmem_late(__pa(io_tlb_orig_addr),
+ io_tlb_nslabs * sizeof(phys_addr_t));
+ free_bootmem_late(__pa(io_tlb_list),
+ io_tlb_nslabs * sizeof(int));
+ free_bootmem_late(__pa(io_tlb_start),
+ io_tlb_nslabs << IO_TLB_SHIFT);
+ }
+}
+
static int is_swiotlb_buffer(phys_addr_t paddr)
{
return paddr >= virt_to_phys(io_tlb_start) &&