From 420ae9518404c4aeda3abc8e017c8fdcc3a13d6b Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Mon, 16 Jun 2014 07:25:06 +0400 Subject: xtensa: simplify addition of new core variants Instead of adding new Kconfig options and Makefile rules for each new core variant provide XTENSA_VARIANT_CUSTOM variant and record variant name in the XTENSA_VARIANT_NAME variable. Adding new core variant now means providing directory structure under arch/xtensa/variant and specifying correct name in kernel configuration. Signed-off-by: Max Filippov --- arch/xtensa/Kconfig | 36 +++++++++++++++++++++++++++++++++++- arch/xtensa/Makefile | 7 ++----- 2 files changed, 37 insertions(+), 6 deletions(-) diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index 3a617af60d46..7feca8dfdff2 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -62,7 +62,9 @@ config TRACE_IRQFLAGS_SUPPORT def_bool y config MMU - def_bool n + bool + default n if !XTENSA_VARIANT_CUSTOM + default XTENSA_VARIANT_MMU if XTENSA_VARIANT_CUSTOM config VARIANT_IRQ_SWITCH def_bool n @@ -102,8 +104,40 @@ config XTENSA_VARIANT_S6000 select VARIANT_IRQ_SWITCH select ARCH_REQUIRE_GPIOLIB select XTENSA_CALIBRATE_CCOUNT + +config XTENSA_VARIANT_CUSTOM + bool "Custom Xtensa processor configuration" + select MAY_HAVE_SMP + select HAVE_XTENSA_GPIO32 + help + Select this variant to use a custom Xtensa processor configuration. + You will be prompted for a processor variant CORENAME. endchoice +config XTENSA_VARIANT_CUSTOM_NAME + string "Xtensa Processor Custom Core Variant Name" + depends on XTENSA_VARIANT_CUSTOM + help + Provide the name of a custom Xtensa processor variant. + This CORENAME selects arch/xtensa/variant/CORENAME. + Dont forget you have to select MMU if you have one. + +config XTENSA_VARIANT_NAME + string + default "dc232b" if XTENSA_VARIANT_DC232B + default "dc233c" if XTENSA_VARIANT_DC233C + default "fsf" if XTENSA_VARIANT_FSF + default "s6000" if XTENSA_VARIANT_S6000 + default XTENSA_VARIANT_CUSTOM_NAME if XTENSA_VARIANT_CUSTOM + +config XTENSA_VARIANT_MMU + bool "Core variant has a Full MMU (TLB, Pages, Protection, etc)" + depends on XTENSA_VARIANT_CUSTOM + default y + help + Build a Conventional Kernel with full MMU support, + ie: it supports a TLB with auto-loading, page protection. + config XTENSA_UNALIGNED_USER bool "Unaligned memory access in use space" help diff --git a/arch/xtensa/Makefile b/arch/xtensa/Makefile index 81250ece3062..472533064b46 100644 --- a/arch/xtensa/Makefile +++ b/arch/xtensa/Makefile @@ -4,6 +4,7 @@ # for more details. # # Copyright (C) 2001 - 2005 Tensilica Inc. +# Copyright (C) 2014 Cadence Design Systems Inc. # # This file is included by the global makefile so that you can add your own # architecture-specific flags and dependencies. Remember to do have actions @@ -13,11 +14,7 @@ # Core configuration. # (Use VAR= to use another default compiler.) -variant-$(CONFIG_XTENSA_VARIANT_FSF) := fsf -variant-$(CONFIG_XTENSA_VARIANT_DC232B) := dc232b -variant-$(CONFIG_XTENSA_VARIANT_DC233C) := dc233c -variant-$(CONFIG_XTENSA_VARIANT_S6000) := s6000 -variant-$(CONFIG_XTENSA_VARIANT_LINUX_CUSTOM) := custom +variant-y := $(patsubst "%",%,$(CONFIG_XTENSA_VARIANT_NAME)) VARIANT = $(variant-y) export VARIANT -- cgit v1.2.3 From 8a9de05954846fe854cdea47c0178bb67fc70a47 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Mon, 16 Jun 2014 08:15:43 +0400 Subject: xtensa: make MMU-related configuration options depend on MMU MMUv3 and HIGHMEM support are available only on configurations with MMU, don't show them otherwise. Signed-off-by: Max Filippov --- arch/xtensa/Kconfig | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index 7feca8dfdff2..480a7d5fb105 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -197,6 +197,7 @@ config MATH_EMULATION config INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX bool "Initialize Xtensa MMU inside the Linux kernel code" + depends on MMU default y help Earlier version initialized the MMU in the exception vector @@ -226,6 +227,7 @@ config INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX config HIGHMEM bool "High Memory Support" + depends on MMU help Linux can use the full amount of RAM in the system by default. However, the default MMUv2 setup only maps the -- cgit v1.2.3 From 920f8a396595281037b48dff8bad6003ba6c7733 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Mon, 16 Jun 2014 08:20:17 +0400 Subject: xtensa: sort 'select' statements in Kconfig Signed-off-by: Max Filippov --- arch/xtensa/Kconfig | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index 480a7d5fb105..3b486dd5d6d4 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -4,24 +4,24 @@ config ZONE_DMA config XTENSA def_bool y select ARCH_WANT_FRAME_POINTERS - select HAVE_IDE - select GENERIC_ATOMIC64 - select GENERIC_CLOCKEVENTS - select VIRT_TO_BUS - select GENERIC_IRQ_SHOW - select GENERIC_SCHED_CLOCK - select MODULES_USE_ELF_RELA - select GENERIC_PCI_IOMAP select ARCH_WANT_IPC_PARSE_VERSION select ARCH_WANT_OPTIONAL_GPIOLIB select BUILDTIME_EXTABLE_SORT select CLONE_BACKWARDS - select IRQ_DOMAIN - select HAVE_OPROFILE + select COMMON_CLK + select GENERIC_ATOMIC64 + select GENERIC_CLOCKEVENTS + select GENERIC_IRQ_SHOW + select GENERIC_PCI_IOMAP + select GENERIC_SCHED_CLOCK select HAVE_FUNCTION_TRACER + select HAVE_IDE select HAVE_IRQ_TIME_ACCOUNTING + select HAVE_OPROFILE select HAVE_PERF_EVENTS - select COMMON_CLK + select IRQ_DOMAIN + select MODULES_USE_ELF_RELA + select VIRT_TO_BUS help Xtensa processors are 32-bit RISC machines designed by Tensilica primarily for embedded systems. These processors are both -- cgit v1.2.3 From 4964527da82d6faacc753e8a322b1b8ef8340bd1 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Mon, 16 Jun 2014 08:25:43 +0400 Subject: xtensa: select HAVE_IDE only on platforms that may have it HAVE_IDE is not a property of architecture but of a platform, and neither ISS or XTFPGA support it. Signed-off-by: Max Filippov --- arch/xtensa/Kconfig | 3 ++- arch/xtensa/configs/iss_defconfig | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index 3b486dd5d6d4..eaa9d07907d7 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -15,7 +15,6 @@ config XTENSA select GENERIC_PCI_IOMAP select GENERIC_SCHED_CLOCK select HAVE_FUNCTION_TRACER - select HAVE_IDE select HAVE_IRQ_TIME_ACCOUNTING select HAVE_OPROFILE select HAVE_PERF_EVENTS @@ -286,12 +285,14 @@ config XTENSA_PLATFORM_ISS config XTENSA_PLATFORM_XT2000 bool "XT2000" + select HAVE_IDE help XT2000 is the name of Tensilica's feature-rich emulation platform. This hardware is capable of running a full Linux distribution. config XTENSA_PLATFORM_S6105 bool "S6105" + select HAVE_IDE select SERIAL_CONSOLE select NO_IOPORT_MAP diff --git a/arch/xtensa/configs/iss_defconfig b/arch/xtensa/configs/iss_defconfig index 1493c68352d1..009fe4a266dc 100644 --- a/arch/xtensa/configs/iss_defconfig +++ b/arch/xtensa/configs/iss_defconfig @@ -308,7 +308,7 @@ CONFIG_MISC_DEVICES=y # EEPROM support # # CONFIG_EEPROM_93CX6 is not set -CONFIG_HAVE_IDE=y +# CONFIG_HAVE_IDE is not set # CONFIG_IDE is not set # -- cgit v1.2.3 From ad4a96b418aceed3f86882e7619bdccaf04c9ce0 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Tue, 17 Jun 2014 01:32:52 +0400 Subject: xtensa: remove orphan MATH_EMULATION symbol Signed-off-by: Max Filippov --- arch/xtensa/Kconfig | 5 ----- arch/xtensa/configs/common_defconfig | 1 - arch/xtensa/configs/iss_defconfig | 1 - arch/xtensa/configs/s6105_defconfig | 1 - 4 files changed, 8 deletions(-) diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index eaa9d07907d7..3d83c29c9232 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -189,11 +189,6 @@ config HOTPLUG_CPU Say N if you want to disable CPU hotplug. -config MATH_EMULATION - bool "Math emulation" - help - Can we use information of configuration file? - config INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX bool "Initialize Xtensa MMU inside the Linux kernel code" depends on MMU diff --git a/arch/xtensa/configs/common_defconfig b/arch/xtensa/configs/common_defconfig index f6000fe05119..721df1214bc3 100644 --- a/arch/xtensa/configs/common_defconfig +++ b/arch/xtensa/configs/common_defconfig @@ -66,7 +66,6 @@ CONFIG_XTENSA_ARCH_LINUX_BE=y CONFIG_MMU=y # CONFIG_XTENSA_UNALIGNED_USER is not set # CONFIG_PREEMPT is not set -# CONFIG_MATH_EMULATION is not set # CONFIG_HIGHMEM is not set # diff --git a/arch/xtensa/configs/iss_defconfig b/arch/xtensa/configs/iss_defconfig index 009fe4a266dc..b966baf82cae 100644 --- a/arch/xtensa/configs/iss_defconfig +++ b/arch/xtensa/configs/iss_defconfig @@ -146,7 +146,6 @@ CONFIG_XTENSA_VARIANT_FSF=y # CONFIG_XTENSA_VARIANT_S6000 is not set # CONFIG_XTENSA_UNALIGNED_USER is not set # CONFIG_PREEMPT is not set -# CONFIG_MATH_EMULATION is not set CONFIG_XTENSA_CALIBRATE_CCOUNT=y CONFIG_SERIAL_CONSOLE=y CONFIG_XTENSA_ISS_NETWORK=y diff --git a/arch/xtensa/configs/s6105_defconfig b/arch/xtensa/configs/s6105_defconfig index 12a492ab6d17..9471265b8ca6 100644 --- a/arch/xtensa/configs/s6105_defconfig +++ b/arch/xtensa/configs/s6105_defconfig @@ -109,7 +109,6 @@ CONFIG_VARIANT_IRQ_SWITCH=y CONFIG_XTENSA_VARIANT_S6000=y # CONFIG_XTENSA_UNALIGNED_USER is not set CONFIG_PREEMPT=y -# CONFIG_MATH_EMULATION is not set # CONFIG_HIGHMEM is not set CONFIG_XTENSA_CALIBRATE_CCOUNT=y CONFIG_SERIAL_CONSOLE=y -- cgit v1.2.3 From f61bf8e7d19e0a3456a7a9ed97c399e4353698dc Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Sun, 20 Jul 2014 03:38:53 +0400 Subject: xtensa: replace IOCTL code definitions with constants This fixes userspace code that builds on other architectures but fails on xtensa due to references to structures that other architectures don't refer to. E.g. this fixes the following issue with python-2.7.8: python-2.7.8/Modules/termios.c:861:25: error: invalid application of 'sizeof' to incomplete type 'struct serial_multiport_struct' {"TIOCSERGETMULTI", TIOCSERGETMULTI}, python-2.7.8/Modules/termios.c:870:25: error: invalid application of 'sizeof' to incomplete type 'struct serial_multiport_struct' {"TIOCSERSETMULTI", TIOCSERSETMULTI}, python-2.7.8/Modules/termios.c:900:24: error: invalid application of 'sizeof' to incomplete type 'struct tty_struct' {"TIOCTTYGSTRUCT", TIOCTTYGSTRUCT}, Cc: stable@vger.kernel.org Signed-off-by: Max Filippov --- arch/xtensa/include/uapi/asm/ioctls.h | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/arch/xtensa/include/uapi/asm/ioctls.h b/arch/xtensa/include/uapi/asm/ioctls.h index b4cb1100c0fb..a47909f0c34b 100644 --- a/arch/xtensa/include/uapi/asm/ioctls.h +++ b/arch/xtensa/include/uapi/asm/ioctls.h @@ -28,17 +28,17 @@ #define TCSETSW 0x5403 #define TCSETSF 0x5404 -#define TCGETA _IOR('t', 23, struct termio) -#define TCSETA _IOW('t', 24, struct termio) -#define TCSETAW _IOW('t', 25, struct termio) -#define TCSETAF _IOW('t', 28, struct termio) +#define TCGETA 0x80127417 /* _IOR('t', 23, struct termio) */ +#define TCSETA 0x40127418 /* _IOW('t', 24, struct termio) */ +#define TCSETAW 0x40127419 /* _IOW('t', 25, struct termio) */ +#define TCSETAF 0x4012741C /* _IOW('t', 28, struct termio) */ #define TCSBRK _IO('t', 29) #define TCXONC _IO('t', 30) #define TCFLSH _IO('t', 31) -#define TIOCSWINSZ _IOW('t', 103, struct winsize) -#define TIOCGWINSZ _IOR('t', 104, struct winsize) +#define TIOCSWINSZ 0x40087467 /* _IOW('t', 103, struct winsize) */ +#define TIOCGWINSZ 0x80087468 /* _IOR('t', 104, struct winsize) */ #define TIOCSTART _IO('t', 110) /* start output, like ^Q */ #define TIOCSTOP _IO('t', 111) /* stop output, like ^S */ #define TIOCOUTQ _IOR('t', 115, int) /* output queue size */ @@ -88,7 +88,6 @@ #define TIOCSETD _IOW('T', 35, int) #define TIOCGETD _IOR('T', 36, int) #define TCSBRKP _IOW('T', 37, int) /* Needed for POSIX tcsendbreak()*/ -#define TIOCTTYGSTRUCT _IOR('T', 38, struct tty_struct) /* For debugging only*/ #define TIOCSBRK _IO('T', 39) /* BSD compatibility */ #define TIOCCBRK _IO('T', 40) /* BSD compatibility */ #define TIOCGSID _IOR('T', 41, pid_t) /* Return the session ID of FD*/ @@ -114,8 +113,10 @@ #define TIOCSERGETLSR _IOR('T', 89, unsigned int) /* Get line status reg. */ /* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */ # define TIOCSER_TEMT 0x01 /* Transmitter physically empty */ -#define TIOCSERGETMULTI _IOR('T', 90, struct serial_multiport_struct) /* Get multiport config */ -#define TIOCSERSETMULTI _IOW('T', 91, struct serial_multiport_struct) /* Set multiport config */ +#define TIOCSERGETMULTI 0x80a8545a /* Get multiport config */ + /* _IOR('T', 90, struct serial_multiport_struct) */ +#define TIOCSERSETMULTI 0x40a8545b /* Set multiport config */ + /* _IOW('T', 91, struct serial_multiport_struct) */ #define TIOCMIWAIT _IO('T', 92) /* wait for a change on serial input line(s) */ #define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */ -- cgit v1.2.3 From 1ca49463c44c970b1ab1d71b0f268bfdf8427a7e Mon Sep 17 00:00:00 2001 From: Alan Douglas Date: Wed, 23 Jul 2014 14:06:40 +0400 Subject: xtensa: fix address checks in dma_{alloc,free}_coherent Virtual address is translated to the XCHAL_KSEG_CACHED region in the dma_free_coherent, but is checked to be in the 0...XCHAL_KSEG_SIZE range. Change check for end of the range from 'addr >= X' to 'addr > X - 1' to handle the case of X == 0. Replace 'if (C) BUG();' construct with 'BUG_ON(C);'. Cc: stable@vger.kernel.org Signed-off-by: Alan Douglas Signed-off-by: Max Filippov --- arch/xtensa/kernel/pci-dma.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/arch/xtensa/kernel/pci-dma.c b/arch/xtensa/kernel/pci-dma.c index 2d9cc6dbfd78..e8b76b8e4b29 100644 --- a/arch/xtensa/kernel/pci-dma.c +++ b/arch/xtensa/kernel/pci-dma.c @@ -49,9 +49,8 @@ dma_alloc_coherent(struct device *dev,size_t size,dma_addr_t *handle,gfp_t flag) /* We currently don't support coherent memory outside KSEG */ - if (ret < XCHAL_KSEG_CACHED_VADDR - || ret >= XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE) - BUG(); + BUG_ON(ret < XCHAL_KSEG_CACHED_VADDR || + ret > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1); if (ret != 0) { @@ -68,10 +67,11 @@ EXPORT_SYMBOL(dma_alloc_coherent); void dma_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle) { - long addr=(long)vaddr+XCHAL_KSEG_CACHED_VADDR-XCHAL_KSEG_BYPASS_VADDR; + unsigned long addr = (unsigned long)vaddr + + XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR; - if (addr < 0 || addr >= XCHAL_KSEG_SIZE) - BUG(); + BUG_ON(addr < XCHAL_KSEG_CACHED_VADDR || + addr > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1); free_pages(addr, get_order(size)); } -- cgit v1.2.3 From 89f77c6f5bb4b0058f40f510809ec07255e02a7e Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Fri, 11 Apr 2014 12:25:50 +0200 Subject: xtensa: add renameat2 syscall Signed-off-by: Miklos Szeredi Cc: Chris Zankel Signed-off-by: Max Filippov --- arch/xtensa/include/uapi/asm/unistd.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/arch/xtensa/include/uapi/asm/unistd.h b/arch/xtensa/include/uapi/asm/unistd.h index b9395529f02d..8883fc877c5c 100644 --- a/arch/xtensa/include/uapi/asm/unistd.h +++ b/arch/xtensa/include/uapi/asm/unistd.h @@ -739,7 +739,10 @@ __SYSCALL(334, sys_sched_setattr, 2) #define __NR_sched_getattr 335 __SYSCALL(335, sys_sched_getattr, 3) -#define __NR_syscall_count 336 +#define __NR_renameat2 336 +__SYSCALL(336, sys_renameat2, 5) + +#define __NR_syscall_count 337 /* * sysxtensa syscall handler -- cgit v1.2.3 From 52247123749cc3cbc30168b33ad8c69515c96d23 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Sun, 27 Jul 2014 07:23:41 +0400 Subject: xtensa: fix access to THREAD_RA/THREAD_SP/THREAD_DS With SMP and a lot of debug options enabled task_struct::thread gets out of reach of s32i/l32i instructions with base pointing at task_struct, breaking build with the following messages: arch/xtensa/kernel/entry.S: Assembler messages: arch/xtensa/kernel/entry.S:1002: Error: operand 3 of 'l32i.n' has invalid value '1048' arch/xtensa/kernel/entry.S:1831: Error: operand 3 of 's32i.n' has invalid value '1040' arch/xtensa/kernel/entry.S:1832: Error: operand 3 of 's32i.n' has invalid value '1044' Change base to point to task_struct::thread in such cases. Don't use a10 in _switch_to to save/restore prev pointer as a2 is not clobbered. Cc: stable@vger.kernel.org Signed-off-by: Max Filippov --- arch/xtensa/include/asm/uaccess.h | 5 +++++ arch/xtensa/kernel/entry.S | 12 ++++++++---- 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h index fd686dc45d1a..c7211e7e182d 100644 --- a/arch/xtensa/include/asm/uaccess.h +++ b/arch/xtensa/include/asm/uaccess.h @@ -52,7 +52,12 @@ */ .macro get_fs ad, sp GET_CURRENT(\ad,\sp) +#if THREAD_CURRENT_DS > 1020 + addi \ad, \ad, TASK_THREAD + l32i \ad, \ad, THREAD_CURRENT_DS - TASK_THREAD +#else l32i \ad, \ad, THREAD_CURRENT_DS +#endif .endm /* diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S index ef7f4990722b..db96acb1362b 100644 --- a/arch/xtensa/kernel/entry.S +++ b/arch/xtensa/kernel/entry.S @@ -1820,7 +1820,6 @@ ENTRY(_switch_to) entry a1, 16 - mov a10, a2 # preserve 'prev' (a2) mov a11, a3 # and 'next' (a3) l32i a4, a2, TASK_THREAD_INFO @@ -1828,8 +1827,14 @@ ENTRY(_switch_to) save_xtregs_user a4 a6 a8 a9 a12 a13 THREAD_XTREGS_USER - s32i a0, a10, THREAD_RA # save return address - s32i a1, a10, THREAD_SP # save stack pointer +#if THREAD_RA > 1020 || THREAD_SP > 1020 + addi a10, a2, TASK_THREAD + s32i a0, a10, THREAD_RA - TASK_THREAD # save return address + s32i a1, a10, THREAD_SP - TASK_THREAD # save stack pointer +#else + s32i a0, a2, THREAD_RA # save return address + s32i a1, a2, THREAD_SP # save stack pointer +#endif /* Disable ints while we manipulate the stack pointer. */ @@ -1870,7 +1875,6 @@ ENTRY(_switch_to) load_xtregs_user a5 a6 a8 a9 a12 a13 THREAD_XTREGS_USER wsr a14, ps - mov a2, a10 # return 'prev' rsync retw -- cgit v1.2.3 From 22def7681186f65f4f1256ae9b0b6db2a7720cb1 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Tue, 15 Jul 2014 02:27:50 +0400 Subject: xtensa: make fixmap region addressing grow with index It's much easier to reason about alignment and coloring of regions located in the fixmap when fixmap index is just a PFN within the fixmap region. Change fixmap addressing so that index 0 corresponds to FIXADDR_START instead of the FIXADDR_TOP. Signed-off-by: Max Filippov --- arch/xtensa/include/asm/fixmap.h | 27 ++++++++++++++++++++++++--- arch/xtensa/mm/highmem.c | 6 +++--- 2 files changed, 27 insertions(+), 6 deletions(-) diff --git a/arch/xtensa/include/asm/fixmap.h b/arch/xtensa/include/asm/fixmap.h index 9f6c33d0428a..a43cd5265556 100644 --- a/arch/xtensa/include/asm/fixmap.h +++ b/arch/xtensa/include/asm/fixmap.h @@ -23,8 +23,8 @@ * Here we define all the compile-time 'special' virtual * addresses. The point is to have a constant address at * compile time, but to set the physical address only - * in the boot process. We allocate these special addresses - * from the end of the consistent memory region backwards. + * in the boot process. We allocate these special addresses + * from the start of the consistent memory region upwards. * Also this lets us do fail-safe vmalloc(), we * can guarantee that these special addresses and * vmalloc()-ed addresses never overlap. @@ -47,7 +47,28 @@ enum fixed_addresses { #define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) #define FIXADDR_START ((FIXADDR_TOP - FIXADDR_SIZE) & PMD_MASK) -#include +#define __fix_to_virt(x) (FIXADDR_START + ((x) << PAGE_SHIFT)) +#define __virt_to_fix(x) (((x) - FIXADDR_START) >> PAGE_SHIFT) + +#ifndef __ASSEMBLY__ +/* + * 'index to address' translation. If anyone tries to use the idx + * directly without translation, we catch the bug with a NULL-deference + * kernel oops. Illegal ranges of incoming indices are caught too. + */ +static __always_inline unsigned long fix_to_virt(const unsigned int idx) +{ + BUILD_BUG_ON(idx >= __end_of_fixed_addresses); + return __fix_to_virt(idx); +} + +static inline unsigned long virt_to_fix(const unsigned long vaddr) +{ + BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); + return __virt_to_fix(vaddr); +} + +#endif #define kmap_get_fixmap_pte(vaddr) \ pte_offset_kernel( \ diff --git a/arch/xtensa/mm/highmem.c b/arch/xtensa/mm/highmem.c index 17a8c0d6fd17..2e95a7665bf3 100644 --- a/arch/xtensa/mm/highmem.c +++ b/arch/xtensa/mm/highmem.c @@ -28,9 +28,9 @@ void *kmap_atomic(struct page *page) idx = type + KM_TYPE_NR * smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); #ifdef CONFIG_DEBUG_HIGHMEM - BUG_ON(!pte_none(*(kmap_pte - idx))); + BUG_ON(!pte_none(*(kmap_pte + idx))); #endif - set_pte(kmap_pte - idx, mk_pte(page, PAGE_KERNEL_EXEC)); + set_pte(kmap_pte + idx, mk_pte(page, PAGE_KERNEL_EXEC)); return (void *)vaddr; } @@ -51,7 +51,7 @@ void __kunmap_atomic(void *kvaddr) * is a bad idea also, in case the page changes cacheability * attributes or becomes a protected page in a hypervisor. */ - pte_clear(&init_mm, kvaddr, kmap_pte - idx); + pte_clear(&init_mm, kvaddr, kmap_pte + idx); local_flush_tlb_kernel_range((unsigned long)kvaddr, (unsigned long)kvaddr + PAGE_SIZE); -- cgit v1.2.3 From dec7305d9f752f6ad2ec30ec8a723182437c5aa5 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Tue, 15 Jul 2014 02:49:15 +0400 Subject: xtensa: allow fixmap and kmap span more than one page table To support aliasing cache both kmap region sizes are multiplied by the number of data cache colors. After that expansion page tables that cover kmap regions may become larger than one page. Correctly allocate and initialize page tables in this case. Signed-off-by: Max Filippov --- arch/xtensa/mm/mmu.c | 38 ++++++++++++++++++++++---------------- 1 file changed, 22 insertions(+), 16 deletions(-) diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c index 3429b483d9f8..abe4513eb0dd 100644 --- a/arch/xtensa/mm/mmu.c +++ b/arch/xtensa/mm/mmu.c @@ -18,32 +18,38 @@ #include #if defined(CONFIG_HIGHMEM) -static void * __init init_pmd(unsigned long vaddr) +static void * __init init_pmd(unsigned long vaddr, unsigned long n_pages) { pgd_t *pgd = pgd_offset_k(vaddr); pmd_t *pmd = pmd_offset(pgd, vaddr); + pte_t *pte; + unsigned long i; - if (pmd_none(*pmd)) { - unsigned i; - pte_t *pte = alloc_bootmem_low_pages(PAGE_SIZE); + n_pages = ALIGN(n_pages, PTRS_PER_PTE); - for (i = 0; i < 1024; i++) - pte_clear(NULL, 0, pte + i); + pr_debug("%s: vaddr: 0x%08lx, n_pages: %ld\n", + __func__, vaddr, n_pages); - set_pmd(pmd, __pmd(((unsigned long)pte) & PAGE_MASK)); - BUG_ON(pte != pte_offset_kernel(pmd, 0)); - pr_debug("%s: vaddr: 0x%08lx, pmd: 0x%p, pte: 0x%p\n", - __func__, vaddr, pmd, pte); - return pte; - } else { - return pte_offset_kernel(pmd, 0); + pte = alloc_bootmem_low_pages(n_pages * sizeof(pte_t)); + + for (i = 0; i < n_pages; ++i) + pte_clear(NULL, 0, pte + i); + + for (i = 0; i < n_pages; i += PTRS_PER_PTE, ++pmd) { + pte_t *cur_pte = pte + i; + + BUG_ON(!pmd_none(*pmd)); + set_pmd(pmd, __pmd(((unsigned long)cur_pte) & PAGE_MASK)); + BUG_ON(cur_pte != pte_offset_kernel(pmd, 0)); + pr_debug("%s: pmd: 0x%p, pte: 0x%p\n", + __func__, pmd, cur_pte); } + return pte; } static void __init fixedrange_init(void) { - BUILD_BUG_ON(FIXADDR_SIZE > PMD_SIZE); - init_pmd(__fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK); + init_pmd(__fix_to_virt(0), __end_of_fixed_addresses); } #endif @@ -52,7 +58,7 @@ void __init paging_init(void) memset(swapper_pg_dir, 0, PAGE_SIZE); #ifdef CONFIG_HIGHMEM fixedrange_init(); - pkmap_page_table = init_pmd(PKMAP_BASE); + pkmap_page_table = init_pmd(PKMAP_BASE, LAST_PKMAP); kmap_init(); #endif } -- cgit v1.2.3 From 7128039fe2dd3d59da9e4ffa036f3aaa3ba87b9f Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Mon, 21 Jul 2014 22:01:51 +0400 Subject: xtensa: fix TLBTEMP_BASE_2 region handling in fast_second_level_miss Current definition of TLBTEMP_BASE_2 is always 32K above the TLBTEMP_BASE_1, whereas fast_second_level_miss handler for the TLBTEMP region analyzes virtual address bit (PAGE_SHIFT + DCACHE_ALIAS_ORDER) to determine TLBTEMP region where the fault happened. The size of the TLBTEMP region is also checked incorrectly: not 64K, but twice data cache way size (whicht may as well be less than the instruction cache way size). Fix TLBTEMP_BASE_2 to be TLBTEMP_BASE_1 + data cache way size. Provide TLBTEMP_SIZE that is a greater of doubled data cache way size or the instruction cache way size, and use it to determine if the second level TLB miss occured in the TLBTEMP region. Practical occurence of page faults in the TLBTEMP area is extremely rare, this code can be tested by deletion of all w[di]tlb instructions in the tlbtemp_mapping region. Cc: stable@vger.kernel.org Signed-off-by: Max Filippov --- arch/xtensa/include/asm/pgtable.h | 7 ++++++- arch/xtensa/kernel/entry.S | 2 +- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h index 4b0ca35a93b1..b2173e5da601 100644 --- a/arch/xtensa/include/asm/pgtable.h +++ b/arch/xtensa/include/asm/pgtable.h @@ -67,7 +67,12 @@ #define VMALLOC_START 0xC0000000 #define VMALLOC_END 0xC7FEFFFF #define TLBTEMP_BASE_1 0xC7FF0000 -#define TLBTEMP_BASE_2 0xC7FF8000 +#define TLBTEMP_BASE_2 (TLBTEMP_BASE_1 + DCACHE_WAY_SIZE) +#if 2 * DCACHE_WAY_SIZE > ICACHE_WAY_SIZE +#define TLBTEMP_SIZE (2 * DCACHE_WAY_SIZE) +#else +#define TLBTEMP_SIZE ICACHE_WAY_SIZE +#endif /* * For the Xtensa architecture, the PTE layout is as follows: diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S index db96acb1362b..21917e5fd53a 100644 --- a/arch/xtensa/kernel/entry.S +++ b/arch/xtensa/kernel/entry.S @@ -1565,7 +1565,7 @@ ENTRY(fast_second_level_miss) rsr a0, excvaddr bltu a0, a3, 2f - addi a1, a0, -(2 << (DCACHE_ALIAS_ORDER + PAGE_SHIFT)) + addi a1, a0, -TLBTEMP_SIZE bgeu a1, a3, 2f /* Check if we have to restore an ITLB mapping. */ -- cgit v1.2.3 From a91902db2990909ea5e6b110811b448f2e8f1571 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Mon, 21 Jul 2014 18:54:11 +0400 Subject: xtensa: implement clear_user_highpage and copy_user_highpage Existing clear_user_page and copy_user_page cannot be used with highmem because they calculate physical page address from its virtual address and do it incorrectly in case of high memory page mapped with kmap_atomic. Also kmap is not needed, as most likely userspace mapping color would be different from the kmapped color. Provide clear_user_highpage and copy_user_highpage functions that determine if temporary mapping is needed for the pages. Move most of the logic of the former clear_user_page and copy_user_page to xtensa/mm/cache.c only leaving temporary mapping setup, invalidation and clearing/copying in the xtensa/mm/misc.S. Rename these functions to clear_page_alias and copy_page_alias. Signed-off-by: Max Filippov --- arch/xtensa/include/asm/cacheflush.h | 2 + arch/xtensa/include/asm/page.h | 12 +++- arch/xtensa/mm/cache.c | 63 +++++++++++++++++++ arch/xtensa/mm/misc.S | 116 ++++++++++++++++------------------- 4 files changed, 127 insertions(+), 66 deletions(-) diff --git a/arch/xtensa/include/asm/cacheflush.h b/arch/xtensa/include/asm/cacheflush.h index 555a98a18453..e72aaca7a77f 100644 --- a/arch/xtensa/include/asm/cacheflush.h +++ b/arch/xtensa/include/asm/cacheflush.h @@ -37,6 +37,7 @@ * specials for cache aliasing: * * __flush_invalidate_dcache_page_alias(vaddr,paddr) + * __invalidate_dcache_page_alias(vaddr,paddr) * __invalidate_icache_page_alias(vaddr,paddr) */ @@ -62,6 +63,7 @@ extern void __flush_invalidate_dcache_range(unsigned long, unsigned long); #if defined(CONFIG_MMU) && (DCACHE_WAY_SIZE > PAGE_SIZE) extern void __flush_invalidate_dcache_page_alias(unsigned long, unsigned long); +extern void __invalidate_dcache_page_alias(unsigned long, unsigned long); #else static inline void __flush_invalidate_dcache_page_alias(unsigned long virt, unsigned long phys) { } diff --git a/arch/xtensa/include/asm/page.h b/arch/xtensa/include/asm/page.h index 47f582333f6b..11721ccd7f23 100644 --- a/arch/xtensa/include/asm/page.h +++ b/arch/xtensa/include/asm/page.h @@ -134,6 +134,7 @@ static inline __attribute_const__ int get_order(unsigned long size) #endif struct page; +struct vm_area_struct; extern void clear_page(void *page); extern void copy_page(void *to, void *from); @@ -143,8 +144,15 @@ extern void copy_page(void *to, void *from); */ #if DCACHE_WAY_SIZE > PAGE_SIZE -extern void clear_user_page(void*, unsigned long, struct page*); -extern void copy_user_page(void*, void*, unsigned long, struct page*); +extern void clear_page_alias(void *vaddr, unsigned long paddr); +extern void copy_page_alias(void *to, void *from, + unsigned long to_paddr, unsigned long from_paddr); + +#define clear_user_highpage clear_user_highpage +void clear_user_highpage(struct page *page, unsigned long vaddr); +#define __HAVE_ARCH_COPY_USER_HIGHPAGE +void copy_user_highpage(struct page *to, struct page *from, + unsigned long vaddr, struct vm_area_struct *vma); #else # define clear_user_page(page, vaddr, pg) clear_page(page) # define copy_user_page(to, from, vaddr, pg) copy_page(to, from) diff --git a/arch/xtensa/mm/cache.c b/arch/xtensa/mm/cache.c index 63cbb867dadd..96aea6624318 100644 --- a/arch/xtensa/mm/cache.c +++ b/arch/xtensa/mm/cache.c @@ -63,6 +63,69 @@ #error "HIGHMEM is not supported on cores with aliasing cache." #endif +#if (DCACHE_WAY_SIZE > PAGE_SIZE) +static inline void kmap_invalidate_coherent(struct page *page, + unsigned long vaddr) +{ + if (!DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) { + unsigned long kvaddr; + + if (!PageHighMem(page)) { + kvaddr = (unsigned long)page_to_virt(page); + + __invalidate_dcache_page(kvaddr); + } else { + kvaddr = TLBTEMP_BASE_1 + + (page_to_phys(page) & DCACHE_ALIAS_MASK); + + __invalidate_dcache_page_alias(kvaddr, + page_to_phys(page)); + } + } +} + +static inline void *coherent_kvaddr(struct page *page, unsigned long base, + unsigned long vaddr, unsigned long *paddr) +{ + if (PageHighMem(page) || !DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) { + *paddr = page_to_phys(page); + return (void *)(base + (vaddr & DCACHE_ALIAS_MASK)); + } else { + *paddr = 0; + return page_to_virt(page); + } +} + +void clear_user_highpage(struct page *page, unsigned long vaddr) +{ + unsigned long paddr; + void *kvaddr = coherent_kvaddr(page, TLBTEMP_BASE_1, vaddr, &paddr); + + pagefault_disable(); + kmap_invalidate_coherent(page, vaddr); + set_bit(PG_arch_1, &page->flags); + clear_page_alias(kvaddr, paddr); + pagefault_enable(); +} + +void copy_user_highpage(struct page *dst, struct page *src, + unsigned long vaddr, struct vm_area_struct *vma) +{ + unsigned long dst_paddr, src_paddr; + void *dst_vaddr = coherent_kvaddr(dst, TLBTEMP_BASE_1, vaddr, + &dst_paddr); + void *src_vaddr = coherent_kvaddr(src, TLBTEMP_BASE_2, vaddr, + &src_paddr); + + pagefault_disable(); + kmap_invalidate_coherent(dst, vaddr); + set_bit(PG_arch_1, &dst->flags); + copy_page_alias(dst_vaddr, src_vaddr, dst_paddr, src_paddr); + pagefault_enable(); +} + +#endif /* DCACHE_WAY_SIZE > PAGE_SIZE */ + #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK /* diff --git a/arch/xtensa/mm/misc.S b/arch/xtensa/mm/misc.S index 1f68558dbcc2..11a01c3e9cea 100644 --- a/arch/xtensa/mm/misc.S +++ b/arch/xtensa/mm/misc.S @@ -110,41 +110,24 @@ ENTRY(__tlbtemp_mapping_start) #if (DCACHE_WAY_SIZE > PAGE_SIZE) /* - * clear_user_page (void *addr, unsigned long vaddr, struct page *page) - * a2 a3 a4 + * clear_page_alias(void *addr, unsigned long paddr) + * a2 a3 */ -ENTRY(clear_user_page) +ENTRY(clear_page_alias) entry a1, 32 - /* Mark page dirty and determine alias. */ + /* Skip setting up a temporary DTLB if not aliased low page. */ - movi a7, (1 << PG_ARCH_1) - l32i a5, a4, PAGE_FLAGS - xor a6, a2, a3 - extui a3, a3, PAGE_SHIFT, DCACHE_ALIAS_ORDER - extui a6, a6, PAGE_SHIFT, DCACHE_ALIAS_ORDER - or a5, a5, a7 - slli a3, a3, PAGE_SHIFT - s32i a5, a4, PAGE_FLAGS + movi a5, PAGE_OFFSET + movi a6, 0 + beqz a3, 1f - /* Skip setting up a temporary DTLB if not aliased. */ - - beqz a6, 1f - - /* Invalidate kernel page. */ - - mov a10, a2 - call8 __invalidate_dcache_page - - /* Setup a temporary DTLB with the color of the VPN */ - - movi a4, ((PAGE_KERNEL | _PAGE_HW_WRITE) - PAGE_OFFSET) & 0xffffffff - movi a5, TLBTEMP_BASE_1 # virt - add a6, a2, a4 # ppn - add a2, a5, a3 # add 'color' + /* Setup a temporary DTLB for the addr. */ + addi a6, a3, (PAGE_KERNEL | _PAGE_HW_WRITE) + mov a4, a2 wdtlb a6, a2 dsync @@ -165,62 +148,43 @@ ENTRY(clear_user_page) /* We need to invalidate the temporary idtlb entry, if any. */ -1: addi a2, a2, -PAGE_SIZE - idtlb a2 +1: idtlb a4 dsync retw -ENDPROC(clear_user_page) +ENDPROC(clear_page_alias) /* - * copy_page_user (void *to, void *from, unsigned long vaddr, struct page *page) - * a2 a3 a4 a5 + * copy_page_alias(void *to, void *from, + * a2 a3 + * unsigned long to_paddr, unsigned long from_paddr) + * a4 a5 */ -ENTRY(copy_user_page) +ENTRY(copy_page_alias) entry a1, 32 - /* Mark page dirty and determine alias for destination. */ - - movi a8, (1 << PG_ARCH_1) - l32i a9, a5, PAGE_FLAGS - xor a6, a2, a4 - xor a7, a3, a4 - extui a4, a4, PAGE_SHIFT, DCACHE_ALIAS_ORDER - extui a6, a6, PAGE_SHIFT, DCACHE_ALIAS_ORDER - extui a7, a7, PAGE_SHIFT, DCACHE_ALIAS_ORDER - or a9, a9, a8 - slli a4, a4, PAGE_SHIFT - s32i a9, a5, PAGE_FLAGS - movi a5, ((PAGE_KERNEL | _PAGE_HW_WRITE) - PAGE_OFFSET) & 0xffffffff - - beqz a6, 1f - - /* Invalidate dcache */ - - mov a10, a2 - call8 __invalidate_dcache_page + /* Skip setting up a temporary DTLB for destination if not aliased. */ - /* Setup a temporary DTLB with a matching color. */ + movi a6, 0 + movi a7, 0 + beqz a4, 1f - movi a8, TLBTEMP_BASE_1 # base - add a6, a2, a5 # ppn - add a2, a8, a4 # add 'color' + /* Setup a temporary DTLB for destination. */ + addi a6, a4, (PAGE_KERNEL | _PAGE_HW_WRITE) wdtlb a6, a2 dsync - /* Skip setting up a temporary DTLB for destination if not aliased. */ + /* Skip setting up a temporary DTLB for source if not aliased. */ -1: beqz a7, 1f +1: beqz a5, 1f - /* Setup a temporary DTLB with a matching color. */ + /* Setup a temporary DTLB for source. */ - movi a8, TLBTEMP_BASE_2 # base - add a7, a3, a5 # ppn - add a3, a8, a4 + addi a7, a5, PAGE_KERNEL addi a8, a3, 1 # way1 wdtlb a7, a8 @@ -271,7 +235,7 @@ ENTRY(copy_user_page) retw -ENDPROC(copy_user_page) +ENDPROC(copy_page_alias) #endif @@ -300,6 +264,30 @@ ENTRY(__flush_invalidate_dcache_page_alias) retw ENDPROC(__flush_invalidate_dcache_page_alias) + +/* + * void __invalidate_dcache_page_alias (addr, phys) + * a2 a3 + */ + +ENTRY(__invalidate_dcache_page_alias) + + entry sp, 16 + + movi a7, 0 # required for exception handler + addi a6, a3, (PAGE_KERNEL | _PAGE_HW_WRITE) + mov a4, a2 + wdtlb a6, a2 + dsync + + ___invalidate_dcache_page a2 a3 + + idtlb a4 + dsync + + retw + +ENDPROC(__invalidate_dcache_page_alias) #endif ENTRY(__tlbtemp_mapping_itlb) -- cgit v1.2.3 From 32544d9c10c42bac3be8b87d2fc95b0aef008795 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Tue, 15 Jul 2014 02:51:49 +0400 Subject: xtensa: support aliasing cache in k[un]map_atomic Map high memory pages at virtual addresses with color that match color of their physical address. Existing cache alias management mechanisms may be used with such pages. Signed-off-by: Max Filippov --- arch/xtensa/include/asm/fixmap.h | 3 ++- arch/xtensa/include/asm/page.h | 2 ++ arch/xtensa/mm/highmem.c | 17 ++++++++++------- 3 files changed, 14 insertions(+), 8 deletions(-) diff --git a/arch/xtensa/include/asm/fixmap.h b/arch/xtensa/include/asm/fixmap.h index a43cd5265556..62b507deea9d 100644 --- a/arch/xtensa/include/asm/fixmap.h +++ b/arch/xtensa/include/asm/fixmap.h @@ -38,7 +38,8 @@ enum fixed_addresses { #ifdef CONFIG_HIGHMEM /* reserved pte's for temporary kernel mappings */ FIX_KMAP_BEGIN, - FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1, + FIX_KMAP_END = FIX_KMAP_BEGIN + + (KM_TYPE_NR * NR_CPUS * DCACHE_N_COLORS) - 1, #endif __end_of_fixed_addresses }; diff --git a/arch/xtensa/include/asm/page.h b/arch/xtensa/include/asm/page.h index 11721ccd7f23..abe24c6f8b2f 100644 --- a/arch/xtensa/include/asm/page.h +++ b/arch/xtensa/include/asm/page.h @@ -78,7 +78,9 @@ # define DCACHE_ALIAS_EQ(a,b) ((((a) ^ (b)) & DCACHE_ALIAS_MASK) == 0) #else # define DCACHE_ALIAS_ORDER 0 +# define DCACHE_ALIAS(a) ((void)(a), 0) #endif +#define DCACHE_N_COLORS (1 << DCACHE_ALIAS_ORDER) #if ICACHE_WAY_SIZE > PAGE_SIZE # define ICACHE_ALIAS_ORDER (ICACHE_WAY_SHIFT - PAGE_SHIFT) diff --git a/arch/xtensa/mm/highmem.c b/arch/xtensa/mm/highmem.c index 2e95a7665bf3..466abaed5382 100644 --- a/arch/xtensa/mm/highmem.c +++ b/arch/xtensa/mm/highmem.c @@ -14,18 +14,23 @@ static pte_t *kmap_pte; +static inline enum fixed_addresses kmap_idx(int type, unsigned long color) +{ + return (type + KM_TYPE_NR * smp_processor_id()) * DCACHE_N_COLORS + + color; +} + void *kmap_atomic(struct page *page) { enum fixed_addresses idx; unsigned long vaddr; - int type; pagefault_disable(); if (!PageHighMem(page)) return page_address(page); - type = kmap_atomic_idx_push(); - idx = type + KM_TYPE_NR * smp_processor_id(); + idx = kmap_idx(kmap_atomic_idx_push(), + DCACHE_ALIAS(page_to_phys(page))); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); #ifdef CONFIG_DEBUG_HIGHMEM BUG_ON(!pte_none(*(kmap_pte + idx))); @@ -38,12 +43,10 @@ EXPORT_SYMBOL(kmap_atomic); void __kunmap_atomic(void *kvaddr) { - int idx, type; - if (kvaddr >= (void *)FIXADDR_START && kvaddr < (void *)FIXADDR_TOP) { - type = kmap_atomic_idx(); - idx = type + KM_TYPE_NR * smp_processor_id(); + int idx = kmap_idx(kmap_atomic_idx(), + DCACHE_ALIAS((unsigned long)kvaddr)); /* * Force other mappings to Oops if they'll try to access this -- cgit v1.2.3 From 8504b503dfa86f698a38f9ee1fc2876ab012b776 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Thu, 17 Jul 2014 05:04:49 +0400 Subject: xtensa: support aliasing cache in kmap Define ARCH_PKMAP_COLORING and provide corresponding macro definitions on cores with aliasing data cache. Instead of single last_pkmap_nr maintain an array last_pkmap_nr_arr of pkmap counters for each page color. Make sure that kmap maps physical page at virtual address with color matching its physical address. Signed-off-by: Max Filippov --- arch/xtensa/include/asm/highmem.h | 40 +++++++++++++++++++++++++++++++++++++-- arch/xtensa/mm/highmem.c | 18 ++++++++++++++++++ 2 files changed, 56 insertions(+), 2 deletions(-) diff --git a/arch/xtensa/include/asm/highmem.h b/arch/xtensa/include/asm/highmem.h index 2653ef5d55f1..2c7901edffaf 100644 --- a/arch/xtensa/include/asm/highmem.h +++ b/arch/xtensa/include/asm/highmem.h @@ -12,19 +12,55 @@ #ifndef _XTENSA_HIGHMEM_H #define _XTENSA_HIGHMEM_H +#include #include #include #include #include -#define PKMAP_BASE (FIXADDR_START - PMD_SIZE) -#define LAST_PKMAP PTRS_PER_PTE +#define PKMAP_BASE ((FIXADDR_START - \ + (LAST_PKMAP + 1) * PAGE_SIZE) & PMD_MASK) +#define LAST_PKMAP (PTRS_PER_PTE * DCACHE_N_COLORS) #define LAST_PKMAP_MASK (LAST_PKMAP - 1) #define PKMAP_NR(virt) (((virt) - PKMAP_BASE) >> PAGE_SHIFT) #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) #define kmap_prot PAGE_KERNEL +#if DCACHE_WAY_SIZE > PAGE_SIZE +#define get_pkmap_color get_pkmap_color +static inline int get_pkmap_color(struct page *page) +{ + return DCACHE_ALIAS(page_to_phys(page)); +} + +extern unsigned int last_pkmap_nr_arr[]; + +static inline unsigned int get_next_pkmap_nr(unsigned int color) +{ + last_pkmap_nr_arr[color] = + (last_pkmap_nr_arr[color] + DCACHE_N_COLORS) & LAST_PKMAP_MASK; + return last_pkmap_nr_arr[color] + color; +} + +static inline int no_more_pkmaps(unsigned int pkmap_nr, unsigned int color) +{ + return pkmap_nr < DCACHE_N_COLORS; +} + +static inline int get_pkmap_entries_count(unsigned int color) +{ + return LAST_PKMAP / DCACHE_N_COLORS; +} + +extern wait_queue_head_t pkmap_map_wait_arr[]; + +static inline wait_queue_head_t *get_pkmap_wait_queue_head(unsigned int color) +{ + return pkmap_map_wait_arr + color; +} +#endif + extern pte_t *pkmap_page_table; void *kmap_high(struct page *page); diff --git a/arch/xtensa/mm/highmem.c b/arch/xtensa/mm/highmem.c index 466abaed5382..8cfb71ec0937 100644 --- a/arch/xtensa/mm/highmem.c +++ b/arch/xtensa/mm/highmem.c @@ -14,6 +14,23 @@ static pte_t *kmap_pte; +#if DCACHE_WAY_SIZE > PAGE_SIZE +unsigned int last_pkmap_nr_arr[DCACHE_N_COLORS]; +wait_queue_head_t pkmap_map_wait_arr[DCACHE_N_COLORS]; + +static void __init kmap_waitqueues_init(void) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(pkmap_map_wait_arr); ++i) + init_waitqueue_head(pkmap_map_wait_arr + i); +} +#else +static inline void kmap_waitqueues_init(void) +{ +} +#endif + static inline enum fixed_addresses kmap_idx(int type, unsigned long color) { return (type + KM_TYPE_NR * smp_processor_id()) * DCACHE_N_COLORS + @@ -72,4 +89,5 @@ void __init kmap_init(void) /* cache the first kmap pte */ kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN); kmap_pte = kmap_get_fixmap_pte(kmap_vstart); + kmap_waitqueues_init(); } -- cgit v1.2.3 From 270eec76de2557c9df01d74bc4c948d0924fc007 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Mon, 21 Jul 2014 04:24:40 +0400 Subject: xtensa: support highmem in aliasing cache flushing code Use __flush_invalidate_dcache_page_alias with alias set to color of the page physical address instead of __flush_invalidate_dcache_page: this works for high memory pages and mapping/unmapping to the TLBTEMP area is virtually free. Allow building configurations with aliasing cache and highmem enabled. Signed-off-by: Max Filippov --- arch/xtensa/mm/cache.c | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/arch/xtensa/mm/cache.c b/arch/xtensa/mm/cache.c index 96aea6624318..d75aa1476da7 100644 --- a/arch/xtensa/mm/cache.c +++ b/arch/xtensa/mm/cache.c @@ -59,10 +59,6 @@ * */ -#if (DCACHE_WAY_SIZE > PAGE_SIZE) && defined(CONFIG_HIGHMEM) -#error "HIGHMEM is not supported on cores with aliasing cache." -#endif - #if (DCACHE_WAY_SIZE > PAGE_SIZE) static inline void kmap_invalidate_coherent(struct page *page, unsigned long vaddr) @@ -166,7 +162,8 @@ void flush_dcache_page(struct page *page) if (!alias && !mapping) return; - __flush_invalidate_dcache_page((long)page_address(page)); + virt = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK); + __flush_invalidate_dcache_page_alias(virt, phys); virt = TLBTEMP_BASE_1 + (temp & DCACHE_ALIAS_MASK); @@ -231,13 +228,12 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep) #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) { - - unsigned long paddr = (unsigned long) page_address(page); unsigned long phys = page_to_phys(page); - unsigned long tmp = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK); - - __flush_invalidate_dcache_page(paddr); + unsigned long tmp; + tmp = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK); + __flush_invalidate_dcache_page_alias(tmp, phys); + tmp = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK); __flush_invalidate_dcache_page_alias(tmp, phys); __invalidate_icache_page_alias(tmp, phys); -- cgit v1.2.3 From b82837c772c3a1d8778295ab094cf46ecddc8057 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Mon, 24 Mar 2014 05:16:02 +0400 Subject: xtensa: configure kc705 for highmem Enable all memory available on KC705 (1G - 128M) by default. Update memory node in DTS and also limit usable memory in bootargs in case memmap is passed from the bootloader. Signed-off-by: Max Filippov --- arch/xtensa/boot/dts/kc705.dts | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/arch/xtensa/boot/dts/kc705.dts b/arch/xtensa/boot/dts/kc705.dts index 742a347be67a..c4d17a34ab86 100644 --- a/arch/xtensa/boot/dts/kc705.dts +++ b/arch/xtensa/boot/dts/kc705.dts @@ -4,8 +4,11 @@ / { compatible = "cdns,xtensa-kc705"; + chosen { + bootargs = "earlycon=uart8250,mmio32,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=0x38000000"; + }; memory@0 { device_type = "memory"; - reg = <0x00000000 0x08000000>; + reg = <0x00000000 0x38000000>; }; }; -- cgit v1.2.3 From a450dc69dc57e2bd9de5a970f5015502e6950c73 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Fri, 1 Aug 2014 19:07:10 +0400 Subject: xtensa: fix kernel/user jump out of fast_unaligned Use correct register (a0, just read from the PS) to check user mode bit. Signed-off-by: Max Filippov --- arch/xtensa/kernel/align.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/xtensa/kernel/align.S b/arch/xtensa/kernel/align.S index d4cef6039a5c..25a65938dda8 100644 --- a/arch/xtensa/kernel/align.S +++ b/arch/xtensa/kernel/align.S @@ -441,7 +441,7 @@ ENTRY(fast_unaligned) mov a1, a2 rsr a0, ps - bbsi.l a2, PS_UM_BIT, 1f # jump if user mode + bbsi.l a0, PS_UM_BIT, 1f # jump if user mode movi a0, _kernel_exception jx a0 -- cgit v1.2.3 From c3ef1f4d379cbc79daf80ffb8d43c611da090b82 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Wed, 22 Jan 2014 09:16:37 +0400 Subject: xtensa: add double exception fixup handler for fast_unaligned fast_unaligned_fixup restores user registers and runs normal exception handler in the current stack frame. Unaligned load/store is retried after that. Signed-off-by: Max Filippov --- arch/xtensa/kernel/align.S | 43 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/arch/xtensa/kernel/align.S b/arch/xtensa/kernel/align.S index 25a65938dda8..904f32f05c09 100644 --- a/arch/xtensa/kernel/align.S +++ b/arch/xtensa/kernel/align.S @@ -8,6 +8,7 @@ * this archive for more details. * * Copyright (C) 2001 - 2005 Tensilica, Inc. + * Copyright (C) 2014 Cadence Design Systems Inc. * * Rewritten by Chris Zankel * @@ -174,6 +175,10 @@ ENTRY(fast_unaligned) s32i a0, a2, PT_AREG2 s32i a3, a2, PT_AREG3 + rsr a3, excsave1 + movi a4, fast_unaligned_fixup + s32i a4, a3, EXC_TABLE_FIXUP + /* Keep value of SAR in a0 */ rsr a0, sar @@ -430,6 +435,10 @@ ENTRY(fast_unaligned) .Linvalid_instruction_store: .Linvalid_instruction: + movi a4, 0 + rsr a3, excsave1 + s32i a4, a3, EXC_TABLE_FIXUP + /* Restore a4...a8 and SAR, set SP, and jump to default exception. */ l32i a8, a2, PT_AREG8 @@ -451,4 +460,38 @@ ENTRY(fast_unaligned) ENDPROC(fast_unaligned) +ENTRY(fast_unaligned_fixup) + + l32i a2, a3, EXC_TABLE_DOUBLE_SAVE + wsr a3, excsave1 + + l32i a8, a2, PT_AREG8 + l32i a7, a2, PT_AREG7 + l32i a6, a2, PT_AREG6 + l32i a5, a2, PT_AREG5 + l32i a4, a2, PT_AREG4 + l32i a0, a2, PT_AREG2 + xsr a0, depc # restore depc and a0 + wsr a0, sar + + rsr a0, exccause + s32i a0, a2, PT_DEPC # mark as a regular exception + + rsr a0, ps + bbsi.l a0, PS_UM_BIT, 1f # jump if user mode + + rsr a0, exccause + addx4 a0, a0, a3 # find entry in table + l32i a0, a0, EXC_TABLE_FAST_KERNEL # load handler + l32i a3, a2, PT_AREG3 + jx a0 +1: + rsr a0, exccause + addx4 a0, a0, a3 # find entry in table + l32i a0, a0, EXC_TABLE_FAST_USER # load handler + l32i a3, a2, PT_AREG3 + jx a0 + +ENDPROC(fast_unaligned_fixup) + #endif /* XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION */ -- cgit v1.2.3 From e9500dd852ca6ede346500010545975bf10244dc Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Sun, 3 Aug 2014 00:42:38 +0400 Subject: xtensa: make fast_unaligned store restartable fast_unaligned may encounter DTLB miss or SEGFAULT during the store emulation. Don't update epc1 and lcount until after the store emulation is complete, so that the faulting store instruction could be replayed. Remove duplicate code handling zero overhead loops and calculate new epc1 and lcount in one place. Signed-off-by: Max Filippov --- arch/xtensa/kernel/align.S | 51 +++++++++++++++++----------------------------- 1 file changed, 19 insertions(+), 32 deletions(-) diff --git a/arch/xtensa/kernel/align.S b/arch/xtensa/kernel/align.S index 904f32f05c09..2c7c13d8e91a 100644 --- a/arch/xtensa/kernel/align.S +++ b/arch/xtensa/kernel/align.S @@ -277,18 +277,6 @@ ENTRY(fast_unaligned) /* Set target register. */ 1: - -#if XCHAL_HAVE_LOOPS - rsr a5, lend # check if we reached LEND - bne a7, a5, 1f - rsr a5, lcount # and LCOUNT != 0 - beqz a5, 1f - addi a5, a5, -1 # decrement LCOUNT and set - rsr a7, lbeg # set PC to LBEGIN - wsr a5, lcount -#endif - -1: wsr a7, epc1 # skip load instruction extui a4, a4, INSN_T, 4 # extract target register movi a5, .Lload_table addx8 a4, a4, a5 @@ -358,17 +346,6 @@ ENTRY(fast_unaligned) /* Get memory address */ 1: -#if XCHAL_HAVE_LOOPS - rsr a4, lend # check if we reached LEND - bne a7, a4, 1f - rsr a4, lcount # and LCOUNT != 0 - beqz a4, 1f - addi a4, a4, -1 # decrement LCOUNT and set - rsr a7, lbeg # set PC to LBEGIN - wsr a4, lcount -#endif - -1: wsr a7, epc1 # skip store instruction movi a4, ~3 and a4, a4, a8 # align memory address @@ -380,25 +357,25 @@ ENTRY(fast_unaligned) #endif __ssa8r a8 - __src_b a7, a5, a6 # lo-mask F..F0..0 (BE) 0..0F..F (LE) + __src_b a8, a5, a6 # lo-mask F..F0..0 (BE) 0..0F..F (LE) __src_b a6, a6, a5 # hi-mask 0..0F..F (BE) F..F0..0 (LE) #ifdef UNALIGNED_USER_EXCEPTION l32e a5, a4, -8 #else l32i a5, a4, 0 # load lower address word #endif - and a5, a5, a7 # mask - __sh a7, a3 # shift value - or a5, a5, a7 # or with original value + and a5, a5, a8 # mask + __sh a8, a3 # shift value + or a5, a5, a8 # or with original value #ifdef UNALIGNED_USER_EXCEPTION s32e a5, a4, -8 - l32e a7, a4, -4 + l32e a8, a4, -4 #else s32i a5, a4, 0 # store - l32i a7, a4, 4 # same for upper address word + l32i a8, a4, 4 # same for upper address word #endif __sl a5, a3 - and a6, a7, a6 + and a6, a8, a6 or a6, a6, a5 #ifdef UNALIGNED_USER_EXCEPTION s32e a6, a4, -4 @@ -406,9 +383,19 @@ ENTRY(fast_unaligned) s32i a6, a4, 4 #endif - /* Done. restore stack and return */ - .Lexit: +#if XCHAL_HAVE_LOOPS + rsr a4, lend # check if we reached LEND + bne a7, a4, 1f + rsr a4, lcount # and LCOUNT != 0 + beqz a4, 1f + addi a4, a4, -1 # decrement LCOUNT and set + rsr a7, lbeg # set PC to LBEGIN + wsr a4, lcount +#endif + +1: wsr a7, epc1 # skip emulated instruction + movi a4, 0 rsr a3, excsave1 s32i a4, a3, EXC_TABLE_FIXUP -- cgit v1.2.3 From 21570465a30f13197991eb2637d6ffc6c6880eef Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Mon, 4 Aug 2014 15:24:58 +0400 Subject: xtensa: move invalid unaligned instruction handler closer to its users With this change a threaded jump from .Linvalid_instruction_load to .Linvalid_instruction can be removed and more code may be added to common load/store exit path. Signed-off-by: Max Filippov --- arch/xtensa/kernel/align.S | 62 ++++++++++++++++++++++------------------------ 1 file changed, 29 insertions(+), 33 deletions(-) diff --git a/arch/xtensa/kernel/align.S b/arch/xtensa/kernel/align.S index 2c7c13d8e91a..87d80d8c33e5 100644 --- a/arch/xtensa/kernel/align.S +++ b/arch/xtensa/kernel/align.S @@ -230,10 +230,6 @@ ENTRY(fast_unaligned) addx8 a5, a6, a5 jx a5 # jump into table - /* Invalid instruction, CRITICAL! */ -.Linvalid_instruction_load: - j .Linvalid_instruction - /* Load: Load memory address. */ .Lload: movi a3, ~3 @@ -319,6 +315,35 @@ ENTRY(fast_unaligned) mov a3, a14 ; _j 1f; .align 8 mov a3, a15 ; _j 1f; .align 8 + /* We cannot handle this exception. */ + + .extern _kernel_exception +.Linvalid_instruction_load: +.Linvalid_instruction_store: + + movi a4, 0 + rsr a3, excsave1 + s32i a4, a3, EXC_TABLE_FIXUP + + /* Restore a4...a8 and SAR, set SP, and jump to default exception. */ + + l32i a8, a2, PT_AREG8 + l32i a7, a2, PT_AREG7 + l32i a6, a2, PT_AREG6 + l32i a5, a2, PT_AREG5 + l32i a4, a2, PT_AREG4 + wsr a0, sar + mov a1, a2 + + rsr a0, ps + bbsi.l a0, PS_UM_BIT, 2f # jump if user mode + + movi a0, _kernel_exception + jx a0 + +2: movi a0, _user_exception + jx a0 + 1: # a7: instruction pointer, a4: instruction, a3: value movi a6, 0 # mask: ffffffff:00000000 @@ -416,35 +441,6 @@ ENTRY(fast_unaligned) l32i a2, a2, PT_AREG2 rfe - /* We cannot handle this exception. */ - - .extern _kernel_exception -.Linvalid_instruction_store: -.Linvalid_instruction: - - movi a4, 0 - rsr a3, excsave1 - s32i a4, a3, EXC_TABLE_FIXUP - - /* Restore a4...a8 and SAR, set SP, and jump to default exception. */ - - l32i a8, a2, PT_AREG8 - l32i a7, a2, PT_AREG7 - l32i a6, a2, PT_AREG6 - l32i a5, a2, PT_AREG5 - l32i a4, a2, PT_AREG4 - wsr a0, sar - mov a1, a2 - - rsr a0, ps - bbsi.l a0, PS_UM_BIT, 1f # jump if user mode - - movi a0, _kernel_exception - jx a0 - -1: movi a0, _user_exception - jx a0 - ENDPROC(fast_unaligned) ENTRY(fast_unaligned_fixup) -- cgit v1.2.3 From a83b02e9bd0c28d27b6c6e5b184585f7a1b8bf86 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Mon, 4 Aug 2014 05:55:53 +0400 Subject: xtensa: allow single-stepping through unaligned load/store Update icount when icountlevel is non-zero but not greater than EXCM level when load/store instruction is successfully emulated. This allows single-stepping over such instruction in userspace debugger. Signed-off-by: Max Filippov --- arch/xtensa/kernel/align.S | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/arch/xtensa/kernel/align.S b/arch/xtensa/kernel/align.S index 87d80d8c33e5..890004af03a9 100644 --- a/arch/xtensa/kernel/align.S +++ b/arch/xtensa/kernel/align.S @@ -421,6 +421,14 @@ ENTRY(fast_unaligned) 1: wsr a7, epc1 # skip emulated instruction + /* Update icount if we're single-stepping in userspace. */ + rsr a4, icountlevel + beqz a4, 1f + bgeui a4, LOCKLEVEL + 1, 1f + rsr a4, icount + addi a4, a4, 1 + wsr a4, icount +1: movi a4, 0 rsr a3, excsave1 s32i a4, a3, EXC_TABLE_FIXUP -- cgit v1.2.3 From d1b6ba82a50cecf94be540a3a153aa89d97511a0 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Thu, 31 Jul 2014 22:40:57 +0400 Subject: xtensa: fix a6 and a7 handling in fast_syscall_xtensa Remove restoring a6 on some return paths and instead modify and restore it in a single place, using symbolic name. Correctly restore a7 from PT_AREG7 in case of illegal a6 value. Cc: stable@vger.kernel.org Signed-off-by: Max Filippov --- arch/xtensa/kernel/entry.S | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S index 21917e5fd53a..a06b7efaae82 100644 --- a/arch/xtensa/kernel/entry.S +++ b/arch/xtensa/kernel/entry.S @@ -1001,9 +1001,8 @@ ENTRY(fast_syscall_xtensa) movi a7, 4 # sizeof(unsigned int) access_ok a3, a7, a0, a2, .Leac # a0: scratch reg, a2: sp - addi a6, a6, -1 # assuming SYS_XTENSA_ATOMIC_SET = 1 - _bgeui a6, SYS_XTENSA_COUNT - 1, .Lill - _bnei a6, SYS_XTENSA_ATOMIC_CMP_SWP - 1, .Lnswp + _bgeui a6, SYS_XTENSA_COUNT, .Lill + _bnei a6, SYS_XTENSA_ATOMIC_CMP_SWP, .Lnswp /* Fall through for ATOMIC_CMP_SWP. */ @@ -1015,27 +1014,26 @@ TRY s32i a5, a3, 0 # different, modify value l32i a7, a2, PT_AREG7 # restore a7 l32i a0, a2, PT_AREG0 # restore a0 movi a2, 1 # and return 1 - addi a6, a6, 1 # restore a6 (really necessary?) rfe 1: l32i a7, a2, PT_AREG7 # restore a7 l32i a0, a2, PT_AREG0 # restore a0 movi a2, 0 # return 0 (note that we cannot set - addi a6, a6, 1 # restore a6 (really necessary?) rfe .Lnswp: /* Atomic set, add, and exg_add. */ TRY l32i a7, a3, 0 # orig + addi a6, a6, -SYS_XTENSA_ATOMIC_SET add a0, a4, a7 # + arg moveqz a0, a4, a6 # set + addi a6, a6, SYS_XTENSA_ATOMIC_SET TRY s32i a0, a3, 0 # write new value mov a0, a2 mov a2, a7 l32i a7, a0, PT_AREG7 # restore a7 l32i a0, a0, PT_AREG0 # restore a0 - addi a6, a6, 1 # restore a6 (really necessary?) rfe CATCH @@ -1044,7 +1042,7 @@ CATCH movi a2, -EFAULT rfe -.Lill: l32i a7, a2, PT_AREG0 # restore a7 +.Lill: l32i a7, a2, PT_AREG7 # restore a7 l32i a0, a2, PT_AREG0 # restore a0 movi a2, -EINVAL rfe -- cgit v1.2.3 From 3cfc096e4c4fbc234634cf8a30d40348a25fc9ba Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Thu, 7 Aug 2014 01:03:01 +0400 Subject: xtensa: don't allow overflow/underflow on unaligned stack Double exceptions that happen during register window overflow/underflow are handled in the topmost stack frame, as if it was the only exception that occured. However unaligned access exception handler is special because it needs to analyze instruction that caused the exception, but the userspace instruction that triggered window exception is completely irrelevant. Unaligned data access is rather normal in the generic userspace code, but stack pointer manipulation must always be done by architecture-aware code and thus unaligned stack means a serious problem anyway. Use the default unaligned access handler that raises SIGBUS in case of unaligned access in window overflow/underflow handler. Signed-off-by: Max Filippov --- arch/xtensa/kernel/traps.c | 5 +---- arch/xtensa/kernel/vectors.S | 8 +++++++- arch/xtensa/kernel/vmlinux.lds.S | 4 ++-- 3 files changed, 10 insertions(+), 7 deletions(-) diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c index eebbfd8c26fc..9d2f45f010ef 100644 --- a/arch/xtensa/kernel/traps.c +++ b/arch/xtensa/kernel/traps.c @@ -101,9 +101,8 @@ static dispatch_init_table_t __initdata dispatch_init_table[] = { #if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION #ifdef CONFIG_XTENSA_UNALIGNED_USER { EXCCAUSE_UNALIGNED, USER, fast_unaligned }, -#else -{ EXCCAUSE_UNALIGNED, 0, do_unaligned_user }, #endif +{ EXCCAUSE_UNALIGNED, 0, do_unaligned_user }, { EXCCAUSE_UNALIGNED, KRNL, fast_unaligned }, #endif #ifdef CONFIG_MMU @@ -264,7 +263,6 @@ do_illegal_instruction(struct pt_regs *regs) */ #if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION -#ifndef CONFIG_XTENSA_UNALIGNED_USER void do_unaligned_user (struct pt_regs *regs) { @@ -286,7 +284,6 @@ do_unaligned_user (struct pt_regs *regs) } #endif -#endif void do_debug(struct pt_regs *regs) diff --git a/arch/xtensa/kernel/vectors.S b/arch/xtensa/kernel/vectors.S index 8453e6e39895..1b397a902292 100644 --- a/arch/xtensa/kernel/vectors.S +++ b/arch/xtensa/kernel/vectors.S @@ -454,8 +454,14 @@ _DoubleExceptionVector_WindowOverflow: s32i a0, a2, PT_DEPC _DoubleExceptionVector_handle_exception: + addi a0, a0, -EXCCAUSE_UNALIGNED + beqz a0, 2f addx4 a0, a0, a3 - l32i a0, a0, EXC_TABLE_FAST_USER + l32i a0, a0, EXC_TABLE_FAST_USER + 4 * EXCCAUSE_UNALIGNED + xsr a3, excsave1 + jx a0 +2: + movi a0, user_exception xsr a3, excsave1 jx a0 diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S index d16db6df86f8..fc1bc2ba8d5d 100644 --- a/arch/xtensa/kernel/vmlinux.lds.S +++ b/arch/xtensa/kernel/vmlinux.lds.S @@ -269,13 +269,13 @@ SECTIONS .UserExceptionVector.literal) SECTION_VECTOR (_DoubleExceptionVector_literal, .DoubleExceptionVector.literal, - DOUBLEEXC_VECTOR_VADDR - 40, + DOUBLEEXC_VECTOR_VADDR - 48, SIZEOF(.UserExceptionVector.text), .UserExceptionVector.text) SECTION_VECTOR (_DoubleExceptionVector_text, .DoubleExceptionVector.text, DOUBLEEXC_VECTOR_VADDR, - 40, + 48, .DoubleExceptionVector.literal) . = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3; -- cgit v1.2.3 From 9184289c979e78ce466993b53fc951633441e571 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Thu, 7 Aug 2014 03:32:30 +0400 Subject: xtensa: deprecate fast_xtensa and fast_spill_registers syscalls These syscalls are not used by userspace tools for some time now, and they have issues when called with invalid arguments. It's not worth changing signal delivery mechanism as we don't expect any new users for these syscalls. Let's keep them for backwards compatibility under #ifdef, disabled by default. Signed-off-by: Max Filippov --- arch/xtensa/Kconfig | 26 ++++++++++++++++++++++++++ arch/xtensa/kernel/entry.S | 28 ++++++++++++++++++++++++++++ 2 files changed, 54 insertions(+) diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index 3d83c29c9232..49c6c3d94449 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -238,6 +238,32 @@ config HIGHMEM If unsure, say Y. +config FAST_SYSCALL_XTENSA + bool "Enable fast atomic syscalls" + default n + help + fast_syscall_xtensa is a syscall that can make atomic operations + on UP kernel when processor has no s32c1i support. + + This syscall is deprecated. It may have issues when called with + invalid arguments. It is provided only for backwards compatibility. + Only enable it if your userspace software requires it. + + If unsure, say N. + +config FAST_SYSCALL_SPILL_REGISTERS + bool "Enable spill registers syscall" + default n + help + fast_syscall_spill_registers is a syscall that spills all active + register windows of a calling userspace task onto its stack. + + This syscall is deprecated. It may have issues when called with + invalid arguments. It is provided only for backwards compatibility. + Only enable it if your userspace software requires it. + + If unsure, say N. + endmenu config XTENSA_CALIBRATE_CCOUNT diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S index a06b7efaae82..82bbfa5a05b3 100644 --- a/arch/xtensa/kernel/entry.S +++ b/arch/xtensa/kernel/entry.S @@ -986,6 +986,8 @@ ENDPROC(fast_syscall_unrecoverable) * j done */ +#ifdef CONFIG_FAST_SYSCALL_XTENSA + #define TRY \ .section __ex_table, "a"; \ .word 66f, 67f; \ @@ -1049,6 +1051,18 @@ CATCH ENDPROC(fast_syscall_xtensa) +#else /* CONFIG_FAST_SYSCALL_XTENSA */ + +ENTRY(fast_syscall_xtensa) + + l32i a0, a2, PT_AREG0 # restore a0 + movi a2, -ENOSYS + rfe + +ENDPROC(fast_syscall_xtensa) + +#endif /* CONFIG_FAST_SYSCALL_XTENSA */ + /* fast_syscall_spill_registers. * @@ -1064,6 +1078,8 @@ ENDPROC(fast_syscall_xtensa) * Note: We assume the stack pointer is EXC_TABLE_KSTK in the fixup handler. */ +#ifdef CONFIG_FAST_SYSCALL_SPILL_REGISTERS + ENTRY(fast_syscall_spill_registers) /* Register a FIXUP handler (pass current wb as a parameter) */ @@ -1398,6 +1414,18 @@ ENTRY(fast_syscall_spill_registers_fixup_return) ENDPROC(fast_syscall_spill_registers_fixup_return) +#else /* CONFIG_FAST_SYSCALL_SPILL_REGISTERS */ + +ENTRY(fast_syscall_spill_registers) + + l32i a0, a2, PT_AREG0 # restore a0 + movi a2, -ENOSYS + rfe + +ENDPROC(fast_syscall_spill_registers) + +#endif /* CONFIG_FAST_SYSCALL_SPILL_REGISTERS */ + #ifdef CONFIG_MMU /* * We should never get here. Bail out! -- cgit v1.2.3