aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>2011-09-08 17:50:12 -0700
committerJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>2011-09-27 23:37:20 -0700
commit4a7f340c6a75ec5fca23d9c80a59f3f28cc4a61e (patch)
tree3680ac96e1a2ab091e7d157b65b30ac15d41f4b9
parent61e2cd0acc248c14793cefd7e23e209be9e0b70d (diff)
downloadlinaro-lsk-4a7f340c6a75ec5fca23d9c80a59f3f28cc4a61e.tar.gz
x86, ticketlock: remove obsolete comment
The note about partial registers is not really relevent now that we rely on gcc to generate all the assembler. Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
-rw-r--r--arch/x86/include/asm/spinlock.h4
1 files changed, 0 insertions, 4 deletions
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index f5695eeb83f..972c260919a 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -49,10 +49,6 @@
* issues and should be optimal for the uncontended case. Note the tail must be
* in the high part, because a wide xadd increment of the low part would carry
* up and contaminate the high part.
- *
- * With fewer than 2^8 possible CPUs, we can use x86's partial registers to
- * save some instructions and make the code more elegant. There really isn't
- * much between them in performance though, especially as locks are out of line.
*/
static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
{