From 466298c65678e7a2bd7bd268fd3776de73e96a4b Mon Sep 17 00:00:00 2001 From: Dongli Zhang Date: Sat, 11 Jun 2022 01:25:12 -0700 Subject: swiotlb: remove a useless return in swiotlb_init Both swiotlb_init_remap() and swiotlb_init() have return type void. Signed-off-by: Dongli Zhang Signed-off-by: Christoph Hellwig --- kernel/dma/swiotlb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index cb50f8d38360..fd21f4162f4b 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -282,7 +282,7 @@ retry: void __init swiotlb_init(bool addressing_limit, unsigned int flags) { - return swiotlb_init_remap(addressing_limit, flags, NULL); + swiotlb_init_remap(addressing_limit, flags, NULL); } /* -- cgit v1.2.3 From 0bf28fc40d89b1a3e00d1b79473bad4e9ca20ad1 Mon Sep 17 00:00:00 2001 From: Dongli Zhang Date: Sat, 11 Jun 2022 01:25:14 -0700 Subject: swiotlb: panic if nslabs is too small Panic on purpose if nslabs is too small, in order to sync with the remap retry logic. In addition, print the number of bytes for tlb alloc failure. Signed-off-by: Dongli Zhang Signed-off-by: Christoph Hellwig --- kernel/dma/swiotlb.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index fd21f4162f4b..1758b724c7a8 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -242,6 +242,9 @@ void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags, if (swiotlb_force_disable) return; + if (nslabs < IO_TLB_MIN_SLABS) + panic("%s: nslabs = %lu too small\n", __func__, nslabs); + /* * By default allocate the bounce buffer memory from low memory, but * allow to pick a location everywhere for hypervisors with guest @@ -254,7 +257,8 @@ retry: else tlb = memblock_alloc_low(bytes, PAGE_SIZE); if (!tlb) { - pr_warn("%s: failed to allocate tlb structure\n", __func__); + pr_warn("%s: Failed to allocate %zu bytes tlb structure\n", + __func__, bytes); return; } -- cgit v1.2.3 From c51ba246cb172c9e947dc6fb8868a1eaf0b2a913 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Tue, 12 Jul 2022 08:46:45 +0200 Subject: swiotlb: fail map correctly with failed io_tlb_default_mem In the failure case of trying to use a buffer which we'd previously failed to allocate, the "!mem" condition is no longer sufficient since io_tlb_default_mem became static and assigned by default. Update the condition to work as intended per the rest of that conversion. Fixes: 463e862ac63e ("swiotlb: Convert io_default_tlb_mem to static allocation") Signed-off-by: Robin Murphy Signed-off-by: Christoph Hellwig --- kernel/dma/swiotlb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index 1758b724c7a8..909b43445574 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -584,7 +584,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr, int index; phys_addr_t tlb_addr; - if (!mem) + if (!mem || !mem->nslabs) panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer"); if (cc_platform_has(CC_ATTR_MEM_ENCRYPT)) -- cgit v1.2.3 From 20347fca71a387a3751f7bb270062616ddc5317a Mon Sep 17 00:00:00 2001 From: Tianyu Lan Date: Fri, 8 Jul 2022 12:15:44 -0400 Subject: swiotlb: split up the global swiotlb lock Traditionally swiotlb was not performance critical because it was only used for slow devices. But in some setups, like TDX/SEV confidential guests, all IO has to go through swiotlb. Currently swiotlb only has a single lock. Under high IO load with multiple CPUs this can lead to significat lock contention on the swiotlb lock. This patch splits the swiotlb bounce buffer pool into individual areas which have their own lock. Each CPU tries to allocate in its own area first. Only if that fails does it search other areas. On freeing the allocation is freed into the area where the memory was originally allocated from. Area number can be set via swiotlb kernel parameter and is default to be possible cpu number. If possible cpu number is not power of 2, area number will be round up to the next power of 2. This idea from Andi Kleen patch(https://github.com/intel/tdx/commit/ 4529b5784c141782c72ec9bd9a92df2b68cb7d45). Based-on-idea-by: Andi Kleen Signed-off-by: Tianyu Lan Signed-off-by: Christoph Hellwig --- kernel/dma/swiotlb.c | 229 ++++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 189 insertions(+), 40 deletions(-) (limited to 'kernel') diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index 909b43445574..dcf1459ce723 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -70,6 +70,43 @@ struct io_tlb_mem io_tlb_default_mem; phys_addr_t swiotlb_unencrypted_base; static unsigned long default_nslabs = IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT; +static unsigned long default_nareas; + +/** + * struct io_tlb_area - IO TLB memory area descriptor + * + * This is a single area with a single lock. + * + * @used: The number of used IO TLB block. + * @index: The slot index to start searching in this area for next round. + * @lock: The lock to protect the above data structures in the map and + * unmap calls. + */ +struct io_tlb_area { + unsigned long used; + unsigned int index; + spinlock_t lock; +}; + +static void swiotlb_adjust_nareas(unsigned int nareas) +{ + if (!is_power_of_2(nareas)) + nareas = roundup_pow_of_two(nareas); + + default_nareas = nareas; + + pr_info("area num %d.\n", nareas); + /* + * Round up number of slabs to the next power of 2. + * The last area is going be smaller than the rest if + * default_nslabs is not power of two. + */ + if (nareas && !is_power_of_2(default_nslabs)) { + default_nslabs = roundup_pow_of_two(default_nslabs); + pr_info("SWIOTLB bounce buffer size roundup to %luMB", + (default_nslabs << IO_TLB_SHIFT) >> 20); + } +} static int __init setup_io_tlb_npages(char *str) @@ -79,6 +116,10 @@ setup_io_tlb_npages(char *str) default_nslabs = ALIGN(simple_strtoul(str, &str, 0), IO_TLB_SEGSIZE); } + if (*str == ',') + ++str; + if (isdigit(*str)) + swiotlb_adjust_nareas(simple_strtoul(str, &str, 0)); if (*str == ',') ++str; if (!strcmp(str, "force")) @@ -112,8 +153,19 @@ void __init swiotlb_adjust_size(unsigned long size) */ if (default_nslabs != IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT) return; + + /* + * Round up number of slabs to the next power of 2. + * The last area is going be smaller than the rest if + * default_nslabs is not power of two. + */ size = ALIGN(size, IO_TLB_SIZE); default_nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE); + if (default_nareas) { + default_nslabs = roundup_pow_of_two(default_nslabs); + size = default_nslabs << IO_TLB_SHIFT; + } + pr_info("SWIOTLB bounce buffer size adjusted to %luMB", size >> 20); } @@ -192,7 +244,8 @@ void __init swiotlb_update_mem_attributes(void) } static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start, - unsigned long nslabs, unsigned int flags, bool late_alloc) + unsigned long nslabs, unsigned int flags, + bool late_alloc, unsigned int nareas) { void *vaddr = phys_to_virt(start); unsigned long bytes = nslabs << IO_TLB_SHIFT, i; @@ -202,10 +255,17 @@ static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start, mem->end = mem->start + bytes; mem->index = 0; mem->late_alloc = late_alloc; + mem->nareas = nareas; + mem->area_nslabs = nslabs / mem->nareas; mem->force_bounce = swiotlb_force_bounce || (flags & SWIOTLB_FORCE); spin_lock_init(&mem->lock); + for (i = 0; i < mem->nareas; i++) { + spin_lock_init(&mem->areas[i].lock); + mem->areas[i].index = 0; + } + for (i = 0; i < mem->nslabs; i++) { mem->slots[i].list = IO_TLB_SEGSIZE - io_tlb_offset(i); mem->slots[i].orig_addr = INVALID_PHYS_ADDR; @@ -232,7 +292,7 @@ void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags, int (*remap)(void *tlb, unsigned long nslabs)) { struct io_tlb_mem *mem = &io_tlb_default_mem; - unsigned long nslabs = default_nslabs; + unsigned long nslabs; size_t alloc_size; size_t bytes; void *tlb; @@ -242,6 +302,14 @@ void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags, if (swiotlb_force_disable) return; + /* + * default_nslabs maybe changed when adjust area number. + * So allocate bounce buffer after adjusting area number. + */ + if (!default_nareas) + swiotlb_adjust_nareas(num_possible_cpus()); + + nslabs = default_nslabs; if (nslabs < IO_TLB_MIN_SLABS) panic("%s: nslabs = %lu too small\n", __func__, nslabs); @@ -278,7 +346,13 @@ retry: panic("%s: Failed to allocate %zu bytes align=0x%lx\n", __func__, alloc_size, PAGE_SIZE); - swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, flags, false); + mem->areas = memblock_alloc(sizeof(struct io_tlb_area) * + default_nareas, SMP_CACHE_BYTES); + if (!mem->areas) + panic("%s: Failed to allocate mem->areas.\n", __func__); + + swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, flags, false, + default_nareas); if (flags & SWIOTLB_VERBOSE) swiotlb_print_info(); @@ -300,7 +374,7 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask, struct io_tlb_mem *mem = &io_tlb_default_mem; unsigned long nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE); unsigned char *vstart = NULL; - unsigned int order; + unsigned int order, area_order; bool retried = false; int rc = 0; @@ -341,19 +415,34 @@ retry: (PAGE_SIZE << order) >> 20); } + if (!default_nareas) + swiotlb_adjust_nareas(num_possible_cpus()); + + area_order = get_order(array_size(sizeof(*mem->areas), + default_nareas)); + mem->areas = (struct io_tlb_area *) + __get_free_pages(GFP_KERNEL | __GFP_ZERO, area_order); + if (!mem->areas) + goto error_area; + mem->slots = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, get_order(array_size(sizeof(*mem->slots), nslabs))); - if (!mem->slots) { - free_pages((unsigned long)vstart, order); - return -ENOMEM; - } + if (!mem->slots) + goto error_slots; set_memory_decrypted((unsigned long)vstart, (nslabs << IO_TLB_SHIFT) >> PAGE_SHIFT); - swiotlb_init_io_tlb_mem(mem, virt_to_phys(vstart), nslabs, 0, true); + swiotlb_init_io_tlb_mem(mem, virt_to_phys(vstart), nslabs, 0, true, + default_nareas); swiotlb_print_info(); return 0; + +error_slots: + free_pages((unsigned long)mem->areas, area_order); +error_area: + free_pages((unsigned long)vstart, order); + return -ENOMEM; } void __init swiotlb_exit(void) @@ -361,6 +450,7 @@ void __init swiotlb_exit(void) struct io_tlb_mem *mem = &io_tlb_default_mem; unsigned long tbl_vaddr; size_t tbl_size, slots_size; + unsigned int area_order; if (swiotlb_force_bounce) return; @@ -375,9 +465,14 @@ void __init swiotlb_exit(void) set_memory_encrypted(tbl_vaddr, tbl_size >> PAGE_SHIFT); if (mem->late_alloc) { + area_order = get_order(array_size(sizeof(*mem->areas), + mem->nareas)); + free_pages((unsigned long)mem->areas, area_order); free_pages(tbl_vaddr, get_order(tbl_size)); free_pages((unsigned long)mem->slots, get_order(slots_size)); } else { + memblock_free_late(__pa(mem->areas), + mem->nareas * sizeof(struct io_tlb_area)); memblock_free_late(mem->start, tbl_size); memblock_free_late(__pa(mem->slots), slots_size); } @@ -480,9 +575,9 @@ static inline unsigned long get_max_slots(unsigned long boundary_mask) return nr_slots(boundary_mask + 1); } -static unsigned int wrap_index(struct io_tlb_mem *mem, unsigned int index) +static unsigned int wrap_area_index(struct io_tlb_mem *mem, unsigned int index) { - if (index >= mem->nslabs) + if (index >= mem->area_nslabs) return 0; return index; } @@ -491,10 +586,11 @@ static unsigned int wrap_index(struct io_tlb_mem *mem, unsigned int index) * Find a suitable number of IO TLB entries size that will fit this request and * allocate a buffer from that IO TLB pool. */ -static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr, - size_t alloc_size, unsigned int alloc_align_mask) +static int swiotlb_do_find_slots(struct io_tlb_mem *mem, + struct io_tlb_area *area, int area_index, + struct device *dev, phys_addr_t orig_addr, + size_t alloc_size, unsigned int alloc_align_mask) { - struct io_tlb_mem *mem = dev->dma_io_tlb_mem; unsigned long boundary_mask = dma_get_seg_boundary(dev); dma_addr_t tbl_dma_addr = phys_to_dma_unencrypted(dev, mem->start) & boundary_mask; @@ -505,8 +601,11 @@ static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr, unsigned int index, wrap, count = 0, i; unsigned int offset = swiotlb_align_offset(dev, orig_addr); unsigned long flags; + unsigned int slot_base; + unsigned int slot_index; BUG_ON(!nslots); + BUG_ON(area_index >= mem->nareas); /* * For mappings with an alignment requirement don't bother looping to @@ -518,16 +617,20 @@ static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr, stride = max(stride, stride << (PAGE_SHIFT - IO_TLB_SHIFT)); stride = max(stride, (alloc_align_mask >> IO_TLB_SHIFT) + 1); - spin_lock_irqsave(&mem->lock, flags); - if (unlikely(nslots > mem->nslabs - mem->used)) + spin_lock_irqsave(&area->lock, flags); + if (unlikely(nslots > mem->area_nslabs - area->used)) goto not_found; - index = wrap = wrap_index(mem, ALIGN(mem->index, stride)); + slot_base = area_index * mem->area_nslabs; + index = wrap = wrap_area_index(mem, ALIGN(area->index, stride)); + do { + slot_index = slot_base + index; + if (orig_addr && - (slot_addr(tbl_dma_addr, index) & iotlb_align_mask) != - (orig_addr & iotlb_align_mask)) { - index = wrap_index(mem, index + 1); + (slot_addr(tbl_dma_addr, slot_index) & + iotlb_align_mask) != (orig_addr & iotlb_align_mask)) { + index = wrap_area_index(mem, index + 1); continue; } @@ -536,26 +639,26 @@ static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr, * contiguous buffers, we allocate the buffers from that slot * and mark the entries as '0' indicating unavailable. */ - if (!iommu_is_span_boundary(index, nslots, + if (!iommu_is_span_boundary(slot_index, nslots, nr_slots(tbl_dma_addr), max_slots)) { - if (mem->slots[index].list >= nslots) + if (mem->slots[slot_index].list >= nslots) goto found; } - index = wrap_index(mem, index + stride); + index = wrap_area_index(mem, index + stride); } while (index != wrap); not_found: - spin_unlock_irqrestore(&mem->lock, flags); + spin_unlock_irqrestore(&area->lock, flags); return -1; found: - for (i = index; i < index + nslots; i++) { + for (i = slot_index; i < slot_index + nslots; i++) { mem->slots[i].list = 0; - mem->slots[i].alloc_size = - alloc_size - (offset + ((i - index) << IO_TLB_SHIFT)); + mem->slots[i].alloc_size = alloc_size - (offset + + ((i - slot_index) << IO_TLB_SHIFT)); } - for (i = index - 1; + for (i = slot_index - 1; io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 && mem->slots[i].list; i--) mem->slots[i].list = ++count; @@ -563,14 +666,43 @@ found: /* * Update the indices to avoid searching in the next round. */ - if (index + nslots < mem->nslabs) - mem->index = index + nslots; + if (index + nslots < mem->area_nslabs) + area->index = index + nslots; else - mem->index = 0; - mem->used += nslots; + area->index = 0; + area->used += nslots; + spin_unlock_irqrestore(&area->lock, flags); + return slot_index; +} - spin_unlock_irqrestore(&mem->lock, flags); - return index; +static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr, + size_t alloc_size, unsigned int alloc_align_mask) +{ + struct io_tlb_mem *mem = dev->dma_io_tlb_mem; + int start = raw_smp_processor_id() & ((1U << __fls(mem->nareas)) - 1); + int i = start, index; + + do { + index = swiotlb_do_find_slots(mem, mem->areas + i, i, + dev, orig_addr, alloc_size, + alloc_align_mask); + if (index >= 0) + return index; + if (++i >= mem->nareas) + i = 0; + } while (i != start); + + return -1; +} + +static unsigned long mem_used(struct io_tlb_mem *mem) +{ + int i; + unsigned long used = 0; + + for (i = 0; i < mem->nareas; i++) + used += mem->areas[i].used; + return used; } phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr, @@ -602,7 +734,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr, if (!(attrs & DMA_ATTR_NO_WARN)) dev_warn_ratelimited(dev, "swiotlb buffer is full (sz: %zd bytes), total %lu (slots), used %lu (slots)\n", - alloc_size, mem->nslabs, mem->used); + alloc_size, mem->nslabs, mem_used(mem)); return (phys_addr_t)DMA_MAPPING_ERROR; } @@ -632,6 +764,8 @@ static void swiotlb_release_slots(struct device *dev, phys_addr_t tlb_addr) unsigned int offset = swiotlb_align_offset(dev, tlb_addr); int index = (tlb_addr - offset - mem->start) >> IO_TLB_SHIFT; int nslots = nr_slots(mem->slots[index].alloc_size + offset); + int aindex = index / mem->area_nslabs; + struct io_tlb_area *area = &mem->areas[aindex]; int count, i; /* @@ -640,7 +774,9 @@ static void swiotlb_release_slots(struct device *dev, phys_addr_t tlb_addr) * While returning the entries to the free list, we merge the entries * with slots below and above the pool being returned. */ - spin_lock_irqsave(&mem->lock, flags); + BUG_ON(aindex >= mem->nareas); + + spin_lock_irqsave(&area->lock, flags); if (index + nslots < ALIGN(index + 1, IO_TLB_SEGSIZE)) count = mem->slots[index + nslots].list; else @@ -664,8 +800,8 @@ static void swiotlb_release_slots(struct device *dev, phys_addr_t tlb_addr) io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 && mem->slots[i].list; i--) mem->slots[i].list = ++count; - mem->used -= nslots; - spin_unlock_irqrestore(&mem->lock, flags); + area->used -= nslots; + spin_unlock_irqrestore(&area->lock, flags); } /* @@ -763,12 +899,14 @@ EXPORT_SYMBOL_GPL(is_swiotlb_active); static void swiotlb_create_debugfs_files(struct io_tlb_mem *mem, const char *dirname) { + unsigned long used = mem_used(mem); + mem->debugfs = debugfs_create_dir(dirname, io_tlb_default_mem.debugfs); if (!mem->nslabs) return; debugfs_create_ulong("io_tlb_nslabs", 0400, mem->debugfs, &mem->nslabs); - debugfs_create_ulong("io_tlb_used", 0400, mem->debugfs, &mem->used); + debugfs_create_ulong("io_tlb_used", 0400, mem->debugfs, &used); } static int __init __maybe_unused swiotlb_create_default_debugfs(void) @@ -819,6 +957,9 @@ static int rmem_swiotlb_device_init(struct reserved_mem *rmem, struct io_tlb_mem *mem = rmem->priv; unsigned long nslabs = rmem->size >> IO_TLB_SHIFT; + /* Set Per-device io tlb area to one */ + unsigned int nareas = 1; + /* * Since multiple devices can share the same pool, the private data, * io_tlb_mem struct, will be initialized by the first device attached @@ -835,10 +976,18 @@ static int rmem_swiotlb_device_init(struct reserved_mem *rmem, return -ENOMEM; } + mem->areas = kcalloc(nareas, sizeof(*mem->areas), + GFP_KERNEL); + if (!mem->areas) { + kfree(mem); + kfree(mem->slots); + return -ENOMEM; + } + set_memory_decrypted((unsigned long)phys_to_virt(rmem->base), rmem->size >> PAGE_SHIFT); swiotlb_init_io_tlb_mem(mem, rmem->base, nslabs, SWIOTLB_FORCE, - false); + false, nareas); mem->for_alloc = true; rmem->priv = mem; -- cgit v1.2.3 From 4a97739474c402e0a14cf6a432f1920262f6811c Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 15 Jul 2022 11:19:50 +0300 Subject: swiotlb: fix use after free on error handling path Don't dereference "mem" after it has been freed. Flip the two kfree()s around to address this bug. Fixes: 26ffb91fa5e0 ("swiotlb: split up the global swiotlb lock") Signed-off-by: Dan Carpenter Signed-off-by: Christoph Hellwig --- kernel/dma/swiotlb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index dcf1459ce723..c50e6fe20f37 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -979,8 +979,8 @@ static int rmem_swiotlb_device_init(struct reserved_mem *rmem, mem->areas = kcalloc(nareas, sizeof(*mem->areas), GFP_KERNEL); if (!mem->areas) { - kfree(mem); kfree(mem->slots); + kfree(mem); return -ENOMEM; } -- cgit v1.2.3 From 91561d4ecb755f056f8ff04f9dcaec210140e55c Mon Sep 17 00:00:00 2001 From: Chao Gao Date: Fri, 15 Jul 2022 18:45:33 +0800 Subject: swiotlb: remove unused fields in io_tlb_mem Commit 20347fca71a3 ("swiotlb: split up the global swiotlb lock") splits io_tlb_mem into multiple areas. Each area has its own lock and index. The global ones are not used so remove them. Signed-off-by: Chao Gao Signed-off-by: Christoph Hellwig --- kernel/dma/swiotlb.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'kernel') diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index c50e6fe20f37..cbffa0b1ace5 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -253,14 +253,12 @@ static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start, mem->nslabs = nslabs; mem->start = start; mem->end = mem->start + bytes; - mem->index = 0; mem->late_alloc = late_alloc; mem->nareas = nareas; mem->area_nslabs = nslabs / mem->nareas; mem->force_bounce = swiotlb_force_bounce || (flags & SWIOTLB_FORCE); - spin_lock_init(&mem->lock); for (i = 0; i < mem->nareas; i++) { spin_lock_init(&mem->areas[i].lock); mem->areas[i].index = 0; -- cgit v1.2.3 From 44335487bab05e06902f9184179857aae764bfe6 Mon Sep 17 00:00:00 2001 From: Chao Gao Date: Fri, 15 Jul 2022 18:45:34 +0800 Subject: swiotlb: consolidate rounding up default_nslabs default_nslabs are rounded up in two cases with exactly same comments. Add a simple wrapper to reduce duplicate code/comments. It is preparatory to adding more logics into the round-up. No functional change intended. Signed-off-by: Chao Gao Signed-off-by: Christoph Hellwig --- kernel/dma/swiotlb.c | 33 ++++++++++++++++----------------- 1 file changed, 16 insertions(+), 17 deletions(-) (limited to 'kernel') diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index cbffa0b1ace5..604f2469ac0e 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -88,6 +88,20 @@ struct io_tlb_area { spinlock_t lock; }; +/* + * Round up number of slabs to the next power of 2. The last area is going + * be smaller than the rest if default_nslabs is not power of two. + * + * Return true if default_nslabs is rounded up. + */ +static bool round_up_default_nslabs(void) +{ + if (!default_nareas || is_power_of_2(default_nslabs)) + return false; + default_nslabs = roundup_pow_of_two(default_nslabs); + return true; +} + static void swiotlb_adjust_nareas(unsigned int nareas) { if (!is_power_of_2(nareas)) @@ -96,16 +110,9 @@ static void swiotlb_adjust_nareas(unsigned int nareas) default_nareas = nareas; pr_info("area num %d.\n", nareas); - /* - * Round up number of slabs to the next power of 2. - * The last area is going be smaller than the rest if - * default_nslabs is not power of two. - */ - if (nareas && !is_power_of_2(default_nslabs)) { - default_nslabs = roundup_pow_of_two(default_nslabs); + if (round_up_default_nslabs()) pr_info("SWIOTLB bounce buffer size roundup to %luMB", (default_nslabs << IO_TLB_SHIFT) >> 20); - } } static int __init @@ -154,18 +161,10 @@ void __init swiotlb_adjust_size(unsigned long size) if (default_nslabs != IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT) return; - /* - * Round up number of slabs to the next power of 2. - * The last area is going be smaller than the rest if - * default_nslabs is not power of two. - */ size = ALIGN(size, IO_TLB_SIZE); default_nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE); - if (default_nareas) { - default_nslabs = roundup_pow_of_two(default_nslabs); + if (round_up_default_nslabs()) size = default_nslabs << IO_TLB_SHIFT; - } - pr_info("SWIOTLB bounce buffer size adjusted to %luMB", size >> 20); } -- cgit v1.2.3 From 57e6840cf79a4af84f44af3f8cfeacd8a14a6c6f Mon Sep 17 00:00:00 2001 From: Chao Gao Date: Fri, 15 Jul 2022 18:45:35 +0800 Subject: swiotlb: ensure a segment doesn't cross the area boundary Free slots tracking assumes that slots in a segment can be allocated to fulfill a request. This implies that slots in a segment should belong to the same area. Although the possibility of a violation is low, it is better to explicitly enforce segments won't span multiple areas by adjusting the number of slabs when configuring areas. Signed-off-by: Chao Gao Signed-off-by: Christoph Hellwig --- kernel/dma/swiotlb.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index 604f2469ac0e..608923e8dab1 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -91,12 +91,21 @@ struct io_tlb_area { /* * Round up number of slabs to the next power of 2. The last area is going * be smaller than the rest if default_nslabs is not power of two. + * The number of slot in an area should be a multiple of IO_TLB_SEGSIZE, + * otherwise a segment may span two or more areas. It conflicts with free + * contiguous slots tracking: free slots are treated contiguous no matter + * whether they cross an area boundary. * * Return true if default_nslabs is rounded up. */ static bool round_up_default_nslabs(void) { - if (!default_nareas || is_power_of_2(default_nslabs)) + if (!default_nareas) + return false; + + if (default_nslabs < IO_TLB_SEGSIZE * default_nareas) + default_nslabs = IO_TLB_SEGSIZE * default_nareas; + else if (is_power_of_2(default_nslabs)) return false; default_nslabs = roundup_pow_of_two(default_nslabs); return true; -- cgit v1.2.3 From 942a8186eb4451700dadd1d60a2e43ce67605991 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 12 Jul 2022 08:43:07 +0200 Subject: swiotlb: move struct io_tlb_slot to swiotlb.c No need to expose this structure definition in the header. Signed-off-by: Christoph Hellwig --- kernel/dma/swiotlb.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'kernel') diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index 608923e8dab1..39dee4004439 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -62,6 +62,12 @@ #define INVALID_PHYS_ADDR (~(phys_addr_t)0) +struct io_tlb_slot { + phys_addr_t orig_addr; + size_t alloc_size; + unsigned int list; +}; + static bool swiotlb_force_bounce; static bool swiotlb_force_disable; -- cgit v1.2.3 From a229cc14f3395311b899e5e582b71efa8dd01df0 Mon Sep 17 00:00:00 2001 From: John Garry Date: Thu, 14 Jul 2022 19:15:24 +0800 Subject: dma-mapping: add dma_opt_mapping_size() Streaming DMA mapping involving an IOMMU may be much slower for larger total mapping size. This is because every IOMMU DMA mapping requires an IOVA to be allocated and freed. IOVA sizes above a certain limit are not cached, which can have a big impact on DMA mapping performance. Provide an API for device drivers to know this "optimal" limit, such that they may try to produce mapping which don't exceed it. Signed-off-by: John Garry Reviewed-by: Damien Le Moal Acked-by: Martin K. Petersen Signed-off-by: Christoph Hellwig --- kernel/dma/mapping.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'kernel') diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c index db7244291b74..1bfe11b1edb6 100644 --- a/kernel/dma/mapping.c +++ b/kernel/dma/mapping.c @@ -773,6 +773,18 @@ size_t dma_max_mapping_size(struct device *dev) } EXPORT_SYMBOL_GPL(dma_max_mapping_size); +size_t dma_opt_mapping_size(struct device *dev) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + size_t size = SIZE_MAX; + + if (ops && ops->opt_mapping_size) + size = ops->opt_mapping_size(); + + return min(dma_max_mapping_size(dev), size); +} +EXPORT_SYMBOL_GPL(dma_opt_mapping_size); + bool dma_need_sync(struct device *dev, dma_addr_t dma_addr) { const struct dma_map_ops *ops = get_dma_ops(dev); -- cgit v1.2.3 From 72311809031217714e635b24f8478e6ecb0d93d9 Mon Sep 17 00:00:00 2001 From: Tianyu Lan Date: Thu, 21 Jul 2022 23:38:46 -0400 Subject: swiotlb: clean up some coding style and minor issues - Fix the used field of struct io_tlb_area wasn't initialized - Set area number to be 0 if input area number parameter is 0 - Use array_size() to calculate io_tlb_area array size - Make parameters of swiotlb_do_find_slots() more reasonable Fixes: 26ffb91fa5e0 ("swiotlb: split up the global swiotlb lock") Signed-off-by: Tianyu Lan Reviewed-by: Michael Kelley Signed-off-by: Christoph Hellwig --- kernel/dma/swiotlb.c | 26 +++++++++++++++----------- 1 file changed, 15 insertions(+), 11 deletions(-) (limited to 'kernel') diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index 39dee4004439..cc50f1fb127f 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -119,7 +119,10 @@ static bool round_up_default_nslabs(void) static void swiotlb_adjust_nareas(unsigned int nareas) { - if (!is_power_of_2(nareas)) + /* use a single area when non is specified */ + if (!nareas) + nareas = 1; + else if (!is_power_of_2(nareas)) nareas = roundup_pow_of_two(nareas); default_nareas = nareas; @@ -276,6 +279,7 @@ static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start, for (i = 0; i < mem->nareas; i++) { spin_lock_init(&mem->areas[i].lock); mem->areas[i].index = 0; + mem->areas[i].used = 0; } for (i = 0; i < mem->nslabs; i++) { @@ -358,8 +362,8 @@ retry: panic("%s: Failed to allocate %zu bytes align=0x%lx\n", __func__, alloc_size, PAGE_SIZE); - mem->areas = memblock_alloc(sizeof(struct io_tlb_area) * - default_nareas, SMP_CACHE_BYTES); + mem->areas = memblock_alloc(array_size(sizeof(struct io_tlb_area), + default_nareas), SMP_CACHE_BYTES); if (!mem->areas) panic("%s: Failed to allocate mem->areas.\n", __func__); @@ -484,7 +488,7 @@ void __init swiotlb_exit(void) free_pages((unsigned long)mem->slots, get_order(slots_size)); } else { memblock_free_late(__pa(mem->areas), - mem->nareas * sizeof(struct io_tlb_area)); + array_size(sizeof(*mem->areas), mem->nareas)); memblock_free_late(mem->start, tbl_size); memblock_free_late(__pa(mem->slots), slots_size); } @@ -598,11 +602,12 @@ static unsigned int wrap_area_index(struct io_tlb_mem *mem, unsigned int index) * Find a suitable number of IO TLB entries size that will fit this request and * allocate a buffer from that IO TLB pool. */ -static int swiotlb_do_find_slots(struct io_tlb_mem *mem, - struct io_tlb_area *area, int area_index, - struct device *dev, phys_addr_t orig_addr, - size_t alloc_size, unsigned int alloc_align_mask) +static int swiotlb_do_find_slots(struct device *dev, int area_index, + phys_addr_t orig_addr, size_t alloc_size, + unsigned int alloc_align_mask) { + struct io_tlb_mem *mem = dev->dma_io_tlb_mem; + struct io_tlb_area *area = mem->areas + area_index; unsigned long boundary_mask = dma_get_seg_boundary(dev); dma_addr_t tbl_dma_addr = phys_to_dma_unencrypted(dev, mem->start) & boundary_mask; @@ -691,12 +696,11 @@ static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr, size_t alloc_size, unsigned int alloc_align_mask) { struct io_tlb_mem *mem = dev->dma_io_tlb_mem; - int start = raw_smp_processor_id() & ((1U << __fls(mem->nareas)) - 1); + int start = raw_smp_processor_id() & (mem->nareas - 1); int i = start, index; do { - index = swiotlb_do_find_slots(mem, mem->areas + i, i, - dev, orig_addr, alloc_size, + index = swiotlb_do_find_slots(dev, i, orig_addr, alloc_size, alloc_align_mask); if (index >= 0) return index; -- cgit v1.2.3 From 7c2645a2a30a45d3dc4c98b315a51be44ec69a67 Mon Sep 17 00:00:00 2001 From: Logan Gunthorpe Date: Fri, 8 Jul 2022 10:50:55 -0600 Subject: dma-mapping: allow EREMOTEIO return code for P2PDMA transfers Add EREMOTEIO error return to dma_map_sgtable() which will be used by .map_sg() implementations that detect P2PDMA pages that the underlying DMA device cannot access. Signed-off-by: Logan Gunthorpe Reviewed-by: Jason Gunthorpe Reviewed-by: Christoph Hellwig Signed-off-by: Christoph Hellwig --- kernel/dma/mapping.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c index 1bfe11b1edb6..746d46825d08 100644 --- a/kernel/dma/mapping.c +++ b/kernel/dma/mapping.c @@ -197,7 +197,7 @@ static int __dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, if (ents > 0) debug_dma_map_sg(dev, sg, nents, ents, dir, attrs); else if (WARN_ON_ONCE(ents != -EINVAL && ents != -ENOMEM && - ents != -EIO)) + ents != -EIO && ents != -EREMOTEIO)) return -EIO; return ents; @@ -255,6 +255,9 @@ EXPORT_SYMBOL(dma_map_sg_attrs); * complete the mapping. Should succeed if retried later. * -EIO Legacy error code with an unknown meaning. eg. this is * returned if a lower level call returned DMA_MAPPING_ERROR. + * -EREMOTEIO The DMA device cannot access P2PDMA memory specified in + * the sg_table. This will not succeed if retried. + * */ int dma_map_sgtable(struct device *dev, struct sg_table *sgt, enum dma_data_direction dir, unsigned long attrs) -- cgit v1.2.3 From f02ad36d4f76645e7e1c21f572260e9a2e61c26b Mon Sep 17 00:00:00 2001 From: Logan Gunthorpe Date: Fri, 8 Jul 2022 10:50:56 -0600 Subject: dma-direct: support PCI P2PDMA pages in dma-direct map_sg Add PCI P2PDMA support for dma_direct_map_sg() so that it can map PCI P2PDMA pages directly without a hack in the callers. This allows for heterogeneous SGLs that contain both P2PDMA and regular pages. A P2PDMA page may have three possible outcomes when being mapped: 1) If the data path between the two devices doesn't go through the root port, then it should be mapped with a PCI bus address 2) If the data path goes through the host bridge, it should be mapped normally, as though it were a CPU physical address 3) It is not possible for the two devices to communicate and thus the mapping operation should fail (and it will return -EREMOTEIO). SGL segments that contain PCI bus addresses are marked with sg_dma_mark_pci_p2pdma() and are ignored when unmapped. P2PDMA mappings are also failed if swiotlb needs to be used on the mapping. Signed-off-by: Logan Gunthorpe Reviewed-by: Christoph Hellwig Signed-off-by: Christoph Hellwig --- kernel/dma/direct.c | 43 +++++++++++++++++++++++++++++++++++++------ kernel/dma/direct.h | 8 +++++++- 2 files changed, 44 insertions(+), 7 deletions(-) (limited to 'kernel') diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index e978f36e6be8..133a4be2d3e5 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -454,29 +454,60 @@ void dma_direct_sync_sg_for_cpu(struct device *dev, arch_sync_dma_for_cpu_all(); } +/* + * Unmaps segments, except for ones marked as pci_p2pdma which do not + * require any further action as they contain a bus address. + */ void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction dir, unsigned long attrs) { struct scatterlist *sg; int i; - for_each_sg(sgl, sg, nents, i) - dma_direct_unmap_page(dev, sg->dma_address, sg_dma_len(sg), dir, - attrs); + for_each_sg(sgl, sg, nents, i) { + if (sg_is_dma_bus_address(sg)) + sg_dma_unmark_bus_address(sg); + else + dma_direct_unmap_page(dev, sg->dma_address, + sg_dma_len(sg), dir, attrs); + } } #endif int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction dir, unsigned long attrs) { - int i; + struct pci_p2pdma_map_state p2pdma_state = {}; + enum pci_p2pdma_map_type map; struct scatterlist *sg; + int i, ret; for_each_sg(sgl, sg, nents, i) { + if (is_pci_p2pdma_page(sg_page(sg))) { + map = pci_p2pdma_map_segment(&p2pdma_state, dev, sg); + switch (map) { + case PCI_P2PDMA_MAP_BUS_ADDR: + continue; + case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE: + /* + * Any P2P mapping that traverses the PCI + * host bridge must be mapped with CPU physical + * address and not PCI bus addresses. This is + * done with dma_direct_map_page() below. + */ + break; + default: + ret = -EREMOTEIO; + goto out_unmap; + } + } + sg->dma_address = dma_direct_map_page(dev, sg_page(sg), sg->offset, sg->length, dir, attrs); - if (sg->dma_address == DMA_MAPPING_ERROR) + if (sg->dma_address == DMA_MAPPING_ERROR) { + ret = -EIO; goto out_unmap; + } sg_dma_len(sg) = sg->length; } @@ -484,7 +515,7 @@ int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, out_unmap: dma_direct_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); - return -EIO; + return ret; } dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr, diff --git a/kernel/dma/direct.h b/kernel/dma/direct.h index a78c0ba70645..e38ffc5e6bdd 100644 --- a/kernel/dma/direct.h +++ b/kernel/dma/direct.h @@ -8,6 +8,7 @@ #define _KERNEL_DMA_DIRECT_H #include +#include int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr, size_t size, @@ -87,10 +88,15 @@ static inline dma_addr_t dma_direct_map_page(struct device *dev, phys_addr_t phys = page_to_phys(page) + offset; dma_addr_t dma_addr = phys_to_dma(dev, phys); - if (is_swiotlb_force_bounce(dev)) + if (is_swiotlb_force_bounce(dev)) { + if (is_pci_p2pdma_page(page)) + return DMA_MAPPING_ERROR; return swiotlb_map(dev, phys, size, dir, attrs); + } if (unlikely(!dma_capable(dev, dma_addr, size, true))) { + if (is_pci_p2pdma_page(page)) + return DMA_MAPPING_ERROR; if (is_swiotlb_active(dev)) return swiotlb_map(dev, phys, size, dir, attrs); -- cgit v1.2.3 From 159bf19270e80b5bc4b13aa88072dcb390b4d297 Mon Sep 17 00:00:00 2001 From: Logan Gunthorpe Date: Fri, 8 Jul 2022 10:50:57 -0600 Subject: dma-mapping: add flags to dma_map_ops to indicate PCI P2PDMA support Add a flags member to the dma_map_ops structure with one flag to indicate support for PCI P2PDMA. Also, add a helper to check if a device supports PCI P2PDMA. Signed-off-by: Logan Gunthorpe Reviewed-by: Jason Gunthorpe Reviewed-by: Christoph Hellwig Signed-off-by: Christoph Hellwig --- kernel/dma/mapping.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) (limited to 'kernel') diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c index 746d46825d08..a9ce5d728231 100644 --- a/kernel/dma/mapping.c +++ b/kernel/dma/mapping.c @@ -723,6 +723,24 @@ int dma_supported(struct device *dev, u64 mask) } EXPORT_SYMBOL(dma_supported); +bool dma_pci_p2pdma_supported(struct device *dev) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + + /* if ops is not set, dma direct will be used which supports P2PDMA */ + if (!ops) + return true; + + /* + * Note: dma_ops_bypass is not checked here because P2PDMA should + * not be used with dma mapping ops that do not have support even + * if the specific device is bypassing them. + */ + + return ops->flags & DMA_F_PCI_P2PDMA_SUPPORTED; +} +EXPORT_SYMBOL_GPL(dma_pci_p2pdma_supported); + #ifdef CONFIG_ARCH_HAS_DMA_SET_MASK void arch_dma_set_mask(struct device *dev, u64 mask); #else -- cgit v1.2.3 From 8419702489f3be7f9e4fcf12c04d9d3f00114d35 Mon Sep 17 00:00:00 2001 From: Logan Gunthorpe Date: Wed, 27 Jul 2022 13:15:22 -0600 Subject: dma-mapping: reformat comment to suppress htmldoc warning make html doc reports a cryptic warning with the commit named below: kernel/dma/mapping.c:258: WARNING: Option list ends without a blank line; unexpected unindent. Seems the parser is a bit fussy about the tabbing and having a single space tab causes the warning. To suppress the warning add another tab to the list and reindent everything. Fixes: 7c2645a2a30a ("dma-mapping: allow EREMOTEIO return code for P2PDMA transfers") Signed-off-by: Logan Gunthorpe Signed-off-by: Christoph Hellwig --- kernel/dma/mapping.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'kernel') diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c index a9ce5d728231..49cbf3e33de7 100644 --- a/kernel/dma/mapping.c +++ b/kernel/dma/mapping.c @@ -249,15 +249,15 @@ EXPORT_SYMBOL(dma_map_sg_attrs); * Returns 0 on success or a negative error code on error. The following * error codes are supported with the given meaning: * - * -EINVAL An invalid argument, unaligned access or other error - * in usage. Will not succeed if retried. - * -ENOMEM Insufficient resources (like memory or IOVA space) to - * complete the mapping. Should succeed if retried later. - * -EIO Legacy error code with an unknown meaning. eg. this is - * returned if a lower level call returned DMA_MAPPING_ERROR. - * -EREMOTEIO The DMA device cannot access P2PDMA memory specified in - * the sg_table. This will not succeed if retried. - * + * -EINVAL An invalid argument, unaligned access or other error + * in usage. Will not succeed if retried. + * -ENOMEM Insufficient resources (like memory or IOVA space) to + * complete the mapping. Should succeed if retried later. + * -EIO Legacy error code with an unknown meaning. eg. this is + * returned if a lower level call returned + * DMA_MAPPING_ERROR. + * -EREMOTEIO The DMA device cannot access P2PDMA memory specified + * in the sg_table. This will not succeed if retried. */ int dma_map_sgtable(struct device *dev, struct sg_table *sgt, enum dma_data_direction dir, unsigned long attrs) -- cgit v1.2.3 From 5c850d31880e00f063fa2a3746ba212c4bcc510f Mon Sep 17 00:00:00 2001 From: Tianyu Lan Date: Thu, 28 Jul 2022 03:24:20 -0400 Subject: swiotlb: fix passing local variable to debugfs_create_ulong() Debugfs node will be run-timely checked and so local variable should be not passed to debugfs_create_ulong(). Fix it via debugfs_create_file() to create io_tlb_used node and calculate used io tlb number with fops_io_tlb_used attribute. Fixes: 20347fca71a3 ("swiotlb: split up the global swiotlb lock") Signed-off-by: Tianyu Lan Signed-off-by: Christoph Hellwig --- kernel/dma/swiotlb.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index cc50f1fb127f..c5a9190b218f 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -912,17 +912,23 @@ bool is_swiotlb_active(struct device *dev) } EXPORT_SYMBOL_GPL(is_swiotlb_active); +static int io_tlb_used_get(void *data, u64 *val) +{ + *val = mem_used(&io_tlb_default_mem); + return 0; +} +DEFINE_DEBUGFS_ATTRIBUTE(fops_io_tlb_used, io_tlb_used_get, NULL, "%llu\n"); + static void swiotlb_create_debugfs_files(struct io_tlb_mem *mem, const char *dirname) { - unsigned long used = mem_used(mem); - mem->debugfs = debugfs_create_dir(dirname, io_tlb_default_mem.debugfs); if (!mem->nslabs) return; debugfs_create_ulong("io_tlb_nslabs", 0400, mem->debugfs, &mem->nslabs); - debugfs_create_ulong("io_tlb_used", 0400, mem->debugfs, &used); + debugfs_create_file("io_tlb_used", 0400, mem->debugfs, NULL, + &fops_io_tlb_used); } static int __init __maybe_unused swiotlb_create_default_debugfs(void) -- cgit v1.2.3