From 4db2ce0199f04b6e99999f22e28ef9a0ae5f0d2f Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 14 Sep 2005 21:47:01 -0700 Subject: [LIB]: Consolidate _atomic_dec_and_lock() Several implementations were essentialy a common piece of C code using the cmpxchg() macro. Put the implementation in one spot that everyone can share, and convert sparc64 over to using this. Alpha is the lone arch-specific implementation, which codes up a special fast path for the common case in order to avoid GP reloading which a pure C version would require. Signed-off-by: David S. Miller --- arch/ia64/lib/Makefile | 1 - arch/ia64/lib/dec_and_lock.c | 42 ------------------------------------------ 2 files changed, 43 deletions(-) delete mode 100644 arch/ia64/lib/dec_and_lock.c (limited to 'arch/ia64/lib') diff --git a/arch/ia64/lib/Makefile b/arch/ia64/lib/Makefile index 799407e7726..cb1af597370 100644 --- a/arch/ia64/lib/Makefile +++ b/arch/ia64/lib/Makefile @@ -15,7 +15,6 @@ lib-$(CONFIG_ITANIUM) += copy_page.o copy_user.o memcpy.o lib-$(CONFIG_MCKINLEY) += copy_page_mck.o memcpy_mck.o lib-$(CONFIG_PERFMON) += carta_random.o lib-$(CONFIG_MD_RAID5) += xor.o -lib-$(CONFIG_HAVE_DEC_LOCK) += dec_and_lock.o AFLAGS___divdi3.o = AFLAGS___udivdi3.o = -DUNSIGNED diff --git a/arch/ia64/lib/dec_and_lock.c b/arch/ia64/lib/dec_and_lock.c deleted file mode 100644 index c7ce92f968f..00000000000 --- a/arch/ia64/lib/dec_and_lock.c +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright (C) 2003 Jerome Marchand, Bull S.A. - * Cleaned up by David Mosberger-Tang - * - * This file is released under the GPLv2, or at your option any later version. - * - * ia64 version of "atomic_dec_and_lock()" using the atomic "cmpxchg" instruction. This - * code is an adaptation of the x86 version of "atomic_dec_and_lock()". - */ - -#include -#include -#include -#include - -/* - * Decrement REFCOUNT and if the count reaches zero, acquire the spinlock. Both of these - * operations have to be done atomically, so that the count doesn't drop to zero without - * acquiring the spinlock first. - */ -int -_atomic_dec_and_lock (atomic_t *refcount, spinlock_t *lock) -{ - int old, new; - - do { - old = atomic_read(refcount); - new = old - 1; - - if (unlikely (old == 1)) { - /* oops, we may be decrementing to zero, do it the slow way... */ - spin_lock(lock); - if (atomic_dec_and_test(refcount)) - return 1; - spin_unlock(lock); - return 0; - } - } while (cmpxchg(&refcount->counter, old, new) != old); - return 0; -} - -EXPORT_SYMBOL(_atomic_dec_and_lock); -- cgit v1.2.3 From 281dd25cdc0d6903929b79183816d151ea626341 Mon Sep 17 00:00:00 2001 From: Yasunori Goto Date: Wed, 19 Oct 2005 15:52:18 -0700 Subject: [PATCH] swiotlb: make sure initial DMA allocations really are in DMA memory This introduces a limit parameter to the core bootmem allocator; The new parameter indicates that physical memory allocated by the bootmem allocator should be within the requested limit. We also introduce alloc_bootmem_low_pages_limit, alloc_bootmem_node_limit, alloc_bootmem_low_pages_node_limit apis, but alloc_bootmem_low_pages_limit is the only api used for swiotlb. The existing alloc_bootmem_low_pages() api could instead have been changed and made to pass right limit to the core allocator. But that would make the patch more intrusive for 2.6.14, as other arches use alloc_bootmem_low_pages(). We may be done that post 2.6.14 as a cleanup. With this, swiotlb gets memory within 4G for both x86_64 and ia64 arches. Signed-off-by: Yasunori Goto Cc: Ravikiran G Thirumalai Signed-off-by: Linus Torvalds --- arch/ia64/lib/swiotlb.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/ia64/lib') diff --git a/arch/ia64/lib/swiotlb.c b/arch/ia64/lib/swiotlb.c index dbc0b3e449c..a604efc7f6c 100644 --- a/arch/ia64/lib/swiotlb.c +++ b/arch/ia64/lib/swiotlb.c @@ -123,8 +123,8 @@ swiotlb_init_with_default_size (size_t default_size) /* * Get IO TLB memory from the low pages */ - io_tlb_start = alloc_bootmem_low_pages(io_tlb_nslabs * - (1 << IO_TLB_SHIFT)); + io_tlb_start = alloc_bootmem_low_pages_limit(io_tlb_nslabs * + (1 << IO_TLB_SHIFT), 0x100000000); if (!io_tlb_start) panic("Cannot allocate SWIOTLB buffer"); io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT); -- cgit v1.2.3