aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLiam Mark <lmark@codeaurora.org>2020-02-26 14:23:41 -0800
committerTodd Kjos <tkjos@google.com>2020-03-04 09:01:27 -0800
commit6a3ea3c63c4e9587c8340053403ab4762ab6cd91 (patch)
treeb6ddf8c1735f476673b8ddff9fb885acd10be874
parent5e25537af702fdc66c123f3d563b7352bbf10c83 (diff)
FROMLIST: iommu/iova: Support limiting IOVA alignmentASB-2020-03-05_5.4
When the IOVA framework applies IOVA alignment it aligns all IOVAs to the smallest PAGE_SIZE order which is greater than or equal to the requested IOVA size. We support use cases that requires large buffers (> 64 MB in size) to be allocated and mapped in their stage 1 page tables. However, with this alignment scheme we find ourselves running out of IOVA space for 32 bit devices, so we are proposing this config, along the similar vein as CONFIG_CMA_ALIGNMENT for CMA allocations. Add CONFIG_IOMMU_LIMIT_IOVA_ALIGNMENT to limit the alignment of IOVAs to some desired PAGE_SIZE order, specified by CONFIG_IOMMU_IOVA_ALIGNMENT. This helps reduce the impact of fragmentation caused by the current IOVA alignment scheme, and gives better IOVA space utilization. Bug: 148141615 Link: https://lore.kernel.org/lkml/alpine.DEB.2.10.2002141223510.27047@lmark-linux.qualcomm.com/ Change-Id: I511ac685d5855e1b9feb5025e025ebbebee7f40d Signed-off-by: Liam Mark <lmark@codeaurora.org>
-rw-r--r--drivers/iommu/Kconfig30
-rw-r--r--drivers/iommu/iova.c22
2 files changed, 50 insertions, 2 deletions
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index fc55f7ba0d18..ba8b3e2138f9 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -7,6 +7,36 @@ config IOMMU_IOVA
config IOMMU_API
bool
+if IOMMU_IOVA
+
+config IOMMU_LIMIT_IOVA_ALIGNMENT
+ bool "Limit IOVA alignment"
+ help
+ When the IOVA framework applies IOVA alignment it aligns all
+ IOVAs to the smallest PAGE_SIZE order which is greater than or
+ equal to the requested IOVA size. This works fine for sizes up
+ to several MiB, but for larger sizes it results in address
+ space wastage and fragmentation. For example drivers with a 4
+ GiB IOVA space might run out of IOVA space when allocating
+ buffers great than 64 MiB.
+
+ Enable this option to impose a limit on the alignment of IOVAs.
+
+ If unsure, say N.
+
+config IOMMU_IOVA_ALIGNMENT
+ int "Maximum PAGE_SIZE order of alignment for IOVAs"
+ depends on IOMMU_LIMIT_IOVA_ALIGNMENT
+ range 4 9
+ default 9
+ help
+ With this parameter you can specify the maximum PAGE_SIZE order for
+ IOVAs. Larger IOVAs will be aligned only to this specified order.
+ The order is expressed a power of two multiplied by the PAGE_SIZE.
+
+ If unsure, leave the default value "9".
+endif
+
menuconfig IOMMU_SUPPORT
bool "IOMMU Hardware Support"
depends on MMU
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index 9e95de2c9103..9c82d4eeebe7 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -178,6 +178,24 @@ iova_insert_rbtree(struct rb_root *root, struct iova *iova,
rb_insert_color(&iova->node, root);
}
+#ifdef CONFIG_IOMMU_LIMIT_IOVA_ALIGNMENT
+static unsigned long limit_align_shift(struct iova_domain *iovad,
+ unsigned long shift)
+{
+ unsigned long max_align_shift;
+
+ max_align_shift = CONFIG_IOMMU_IOVA_ALIGNMENT + PAGE_SHIFT
+ - iova_shift(iovad);
+ return min_t(unsigned long, max_align_shift, shift);
+}
+#else
+static unsigned long limit_align_shift(struct iova_domain *iovad,
+ unsigned long shift)
+{
+ return shift;
+}
+#endif
+
static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
unsigned long size, unsigned long limit_pfn,
struct iova *new, bool size_aligned)
@@ -189,7 +207,7 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
unsigned long align_mask = ~0UL;
if (size_aligned)
- align_mask <<= fls_long(size - 1);
+ align_mask <<= limit_align_shift(iovad, fls_long(size - 1));
/* Walk the tree backwards */
spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
@@ -242,7 +260,7 @@ static int __alloc_and_insert_iova_best_fit(struct iova_domain *iovad,
unsigned long gap, candidate_gap = ~0UL;
if (size_aligned)
- align_mask <<= limit_align(iovad, fls_long(size - 1));
+ align_mask <<= limit_align_shift(iovad, fls_long(size - 1));
/* Walk the tree backwards */
spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);