diff options
Diffstat (limited to 'arch')
368 files changed, 11136 insertions, 22463 deletions
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index fe44b2494609..df94ac1f75b6 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -428,3 +428,4 @@ source "arch/arc/Kconfig.debug" source "security/Kconfig" source "crypto/Kconfig" source "lib/Kconfig" +source "kernel/power/Kconfig" diff --git a/arch/arc/Makefile b/arch/arc/Makefile index 10bc3d4e8a44..db72fec0e160 100644 --- a/arch/arc/Makefile +++ b/arch/arc/Makefile @@ -12,7 +12,7 @@ ifeq ($(CROSS_COMPILE),) CROSS_COMPILE := arc-linux-uclibc- endif -KBUILD_DEFCONFIG := fpga_defconfig +KBUILD_DEFCONFIG := nsim_700_defconfig cflags-y += -mA7 -fno-common -pipe -fno-builtin -D__linux__ diff --git a/arch/arc/boot/dts/nsimosci.dts b/arch/arc/boot/dts/nsimosci.dts index cfaedd9c61c9..1c169dc74ad1 100644 --- a/arch/arc/boot/dts/nsimosci.dts +++ b/arch/arc/boot/dts/nsimosci.dts @@ -20,7 +20,7 @@ /* this is for console on PGU */ /* bootargs = "console=tty0 consoleblank=0"; */ /* this is for console on serial */ - bootargs = "earlycon=uart8250,mmio32,0xc0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblank=0 debug"; + bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblank=0 debug"; }; aliases { @@ -41,9 +41,9 @@ #interrupt-cells = <1>; }; - uart0: serial@c0000000 { + uart0: serial@f0000000 { compatible = "ns8250"; - reg = <0xc0000000 0x2000>; + reg = <0xf0000000 0x2000>; interrupts = <11>; clock-frequency = <3686400>; baud = <115200>; @@ -52,21 +52,21 @@ no-loopback-test = <1>; }; - pgu0: pgu@c9000000 { + pgu0: pgu@f9000000 { compatible = "snps,arcpgufb"; - reg = <0xc9000000 0x400>; + reg = <0xf9000000 0x400>; }; - ps2: ps2@c9001000 { + ps2: ps2@f9001000 { compatible = "snps,arc_ps2"; - reg = <0xc9000400 0x14>; + reg = <0xf9000400 0x14>; interrupts = <13>; interrupt-names = "arc_ps2_irq"; }; - eth0: ethernet@c0003000 { + eth0: ethernet@f0003000 { compatible = "snps,oscilan"; - reg = <0xc0003000 0x44>; + reg = <0xf0003000 0x44>; interrupts = <7>, <8>; interrupt-names = "rx", "tx"; }; diff --git a/arch/arc/configs/fpga_noramfs_defconfig b/arch/arc/configs/fpga_noramfs_defconfig deleted file mode 100644 index 49c93011ab96..000000000000 --- a/arch/arc/configs/fpga_noramfs_defconfig +++ /dev/null @@ -1,63 +0,0 @@ -CONFIG_CROSS_COMPILE="arc-linux-uclibc-" -# CONFIG_LOCALVERSION_AUTO is not set -CONFIG_DEFAULT_HOSTNAME="ARCLinux" -# CONFIG_SWAP is not set -CONFIG_HIGH_RES_TIMERS=y -CONFIG_IKCONFIG=y -CONFIG_IKCONFIG_PROC=y -CONFIG_NAMESPACES=y -# CONFIG_UTS_NS is not set -# CONFIG_PID_NS is not set -CONFIG_BLK_DEV_INITRD=y -CONFIG_KALLSYMS_ALL=y -CONFIG_EMBEDDED=y -# CONFIG_SLUB_DEBUG is not set -# CONFIG_COMPAT_BRK is not set -CONFIG_KPROBES=y -CONFIG_MODULES=y -# CONFIG_LBDAF is not set -# CONFIG_BLK_DEV_BSG is not set -# CONFIG_IOSCHED_DEADLINE is not set -# CONFIG_IOSCHED_CFQ is not set -CONFIG_ARC_PLAT_FPGA_LEGACY=y -# CONFIG_ARC_HAS_RTSC is not set -CONFIG_ARC_BUILTIN_DTB_NAME="angel4" -CONFIG_PREEMPT=y -# CONFIG_COMPACTION is not set -# CONFIG_CROSS_MEMORY_ATTACH is not set -CONFIG_NET=y -CONFIG_PACKET=y -CONFIG_UNIX=y -CONFIG_UNIX_DIAG=y -CONFIG_NET_KEY=y -CONFIG_INET=y -# CONFIG_IPV6 is not set -# CONFIG_STANDALONE is not set -# CONFIG_PREVENT_FIRMWARE_BUILD is not set -# CONFIG_FIRMWARE_IN_KERNEL is not set -# CONFIG_BLK_DEV is not set -CONFIG_NETDEVICES=y -CONFIG_ARC_EMAC=y -CONFIG_LXT_PHY=y -# CONFIG_INPUT_MOUSEDEV_PSAUX is not set -# CONFIG_INPUT_KEYBOARD is not set -# CONFIG_INPUT_MOUSE is not set -# CONFIG_SERIO is not set -# CONFIG_LEGACY_PTYS is not set -# CONFIG_DEVKMEM is not set -CONFIG_SERIAL_ARC=y -CONFIG_SERIAL_ARC_CONSOLE=y -# CONFIG_HW_RANDOM is not set -# CONFIG_HWMON is not set -# CONFIG_VGA_CONSOLE is not set -# CONFIG_HID is not set -# CONFIG_USB_SUPPORT is not set -# CONFIG_IOMMU_SUPPORT is not set -CONFIG_EXT2_FS=y -CONFIG_EXT2_FS_XATTR=y -CONFIG_TMPFS=y -# CONFIG_MISC_FILESYSTEMS is not set -CONFIG_NFS_FS=y -# CONFIG_ENABLE_WARN_DEPRECATED is not set -# CONFIG_ENABLE_MUST_CHECK is not set -CONFIG_XZ_DEC=y diff --git a/arch/arc/configs/fpga_defconfig b/arch/arc/configs/nsim_700_defconfig index ef4d3bc7b6c0..ef4d3bc7b6c0 100644 --- a/arch/arc/configs/fpga_defconfig +++ b/arch/arc/configs/nsim_700_defconfig diff --git a/arch/arc/include/asm/irqflags.h b/arch/arc/include/asm/irqflags.h index 742816f1b210..27ecc6975a58 100644 --- a/arch/arc/include/asm/irqflags.h +++ b/arch/arc/include/asm/irqflags.h @@ -41,6 +41,15 @@ /****************************************************************** * IRQ Control Macros + * + * All of them have "memory" clobber (compiler barrier) which is needed to + * ensure that LD/ST requiring irq safetly (R-M-W when LLSC is not available) + * are redone after IRQs are re-enabled (and gcc doesn't reuse stale register) + * + * Noted at the time of Abilis Timer List corruption + * Orig Bug + Rejected solution : https://lkml.org/lkml/2013/3/29/67 + * Reasoning : https://lkml.org/lkml/2013/4/8/15 + * ******************************************************************/ /* diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c index d01df0c517a2..20ebb602ea2f 100644 --- a/arch/arc/kernel/smp.c +++ b/arch/arc/kernel/smp.c @@ -26,8 +26,10 @@ #include <asm/setup.h> #include <asm/mach_desc.h> +#ifndef CONFIG_ARC_HAS_LLSC arch_spinlock_t smp_atomic_ops_lock = __ARCH_SPIN_LOCK_UNLOCKED; arch_spinlock_t smp_bitops_lock = __ARCH_SPIN_LOCK_UNLOCKED; +#endif struct plat_smp_ops plat_smp_ops; diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile index 6a3d9a6c4497..91bd5bd62857 100644 --- a/arch/arm/boot/dts/Makefile +++ b/arch/arm/boot/dts/Makefile @@ -177,6 +177,9 @@ dtb-$(CONFIG_MACH_KIRKWOOD) += kirkwood-b3.dtb \ dtb-$(CONFIG_ARCH_LPC32XX) += ea3250.dtb phy3250.dtb dtb-$(CONFIG_ARCH_MARCO) += marco-evb.dtb dtb-$(CONFIG_MACH_MESON6) += meson6-atv1200.dtb +dtb-$(CONFIG_ARCH_MMP) += pxa168-aspenite.dtb \ + pxa910-dkb.dtb \ + mmp2-brownstone.dtb dtb-$(CONFIG_ARCH_MOXART) += moxart-uc7112lx.dtb dtb-$(CONFIG_ARCH_MXC) += \ imx1-ads.dtb \ diff --git a/arch/arm/boot/dts/am437x-sk-evm.dts b/arch/arm/boot/dts/am437x-sk-evm.dts index 87aa4f3b8b3d..53bbfc90b26a 100644 --- a/arch/arm/boot/dts/am437x-sk-evm.dts +++ b/arch/arm/boot/dts/am437x-sk-evm.dts @@ -100,7 +100,7 @@ }; lcd0: display { - compatible = "osddisplays,osd057T0559-34ts", "panel-dpi"; + compatible = "newhaven,nhd-4.3-480272ef-atxl", "panel-dpi"; label = "lcd"; pinctrl-names = "default"; @@ -112,11 +112,11 @@ clock-frequency = <9000000>; hactive = <480>; vactive = <272>; - hfront-porch = <8>; - hback-porch = <43>; - hsync-len = <4>; - vback-porch = <12>; - vfront-porch = <4>; + hfront-porch = <2>; + hback-porch = <2>; + hsync-len = <41>; + vfront-porch = <2>; + vback-porch = <2>; vsync-len = <10>; hsync-active = <0>; vsync-active = <0>; @@ -320,8 +320,7 @@ lcd_pins: lcd_pins { pinctrl-single,pins = < - /* GPIO 5_8 to select LCD / HDMI */ - 0x238 (PIN_OUTPUT_PULLUP | MUX_MODE7) + 0x1c (PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* gpcm_ad7.gpio1_7 */ >; }; }; diff --git a/arch/arm/boot/dts/armada-375.dtsi b/arch/arm/boot/dts/armada-375.dtsi index 9721e55384ce..50096d3427eb 100644 --- a/arch/arm/boot/dts/armada-375.dtsi +++ b/arch/arm/boot/dts/armada-375.dtsi @@ -14,6 +14,7 @@ #include "skeleton.dtsi" #include <dt-bindings/interrupt-controller/arm-gic.h> #include <dt-bindings/interrupt-controller/irq.h> +#include <dt-bindings/phy/phy.h> #define MBUS_ID(target,attributes) (((target) << 24) | ((attributes) << 16)) @@ -348,6 +349,12 @@ #clock-cells = <1>; }; + usbcluster: usb-cluster@18400 { + compatible = "marvell,armada-375-usb-cluster"; + reg = <0x18400 0x4>; + #phy-cells = <1>; + }; + mbusc: mbus-controller@20000 { compatible = "marvell,mbus-controller"; reg = <0x20000 0x100>, <0x20180 0x20>; @@ -398,6 +405,8 @@ reg = <0x50000 0x500>; interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>; clocks = <&gateclk 18>; + phys = <&usbcluster PHY_TYPE_USB2>; + phy-names = "usb"; status = "disabled"; }; @@ -414,6 +423,8 @@ reg = <0x58000 0x20000>,<0x5b880 0x80>; interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>; clocks = <&gateclk 16>; + phys = <&usbcluster PHY_TYPE_USB3>; + phy-names = "usb"; status = "disabled"; }; diff --git a/arch/arm/boot/dts/at91-sama5d4ek.dts b/arch/arm/boot/dts/at91-sama5d4ek.dts index b5b84006469e..9198b719d0ef 100644 --- a/arch/arm/boot/dts/at91-sama5d4ek.dts +++ b/arch/arm/boot/dts/at91-sama5d4ek.dts @@ -9,12 +9,12 @@ * licensing only applies to this file, and not this project as a * whole. * - * a) This library is free software; you can redistribute it and/or + * a) This file is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * - * This library is distributed in the hope that it will be useful, + * This file is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. diff --git a/arch/arm/boot/dts/at91sam9260.dtsi b/arch/arm/boot/dts/at91sam9260.dtsi index cb100b03a362..dd1313cbc314 100644 --- a/arch/arm/boot/dts/at91sam9260.dtsi +++ b/arch/arm/boot/dts/at91sam9260.dtsi @@ -956,6 +956,14 @@ }; }; + rtc@fffffd20 { + compatible = "atmel,at91sam9260-rtt"; + reg = <0xfffffd20 0x10>; + interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>; + clocks = <&clk32k>; + status = "disabled"; + }; + watchdog@fffffd40 { compatible = "atmel,at91sam9260-wdt"; reg = <0xfffffd40 0x10>; @@ -966,6 +974,12 @@ atmel,idle-halt; status = "disabled"; }; + + gpbr: syscon@fffffd50 { + compatible = "atmel,at91sam9260-gpbr", "syscon"; + reg = <0xfffffd50 0x10>; + status = "disabled"; + }; }; nand0: nand@40000000 { diff --git a/arch/arm/boot/dts/at91sam9261.dtsi b/arch/arm/boot/dts/at91sam9261.dtsi index a81aab4281a7..cdb9ed612109 100644 --- a/arch/arm/boot/dts/at91sam9261.dtsi +++ b/arch/arm/boot/dts/at91sam9261.dtsi @@ -828,12 +828,26 @@ clocks = <&mck>; }; + rtc@fffffd20 { + compatible = "atmel,at91sam9260-rtt"; + reg = <0xfffffd20 0x10>; + interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>; + clocks = <&slow_xtal>; + status = "disabled"; + }; + watchdog@fffffd40 { compatible = "atmel,at91sam9260-wdt"; reg = <0xfffffd40 0x10>; interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>; status = "disabled"; }; + + gpbr: syscon@fffffd50 { + compatible = "atmel,at91sam9260-gpbr", "syscon"; + reg = <0xfffffd50 0x10>; + status = "disabled"; + }; }; }; diff --git a/arch/arm/boot/dts/at91sam9263.dtsi b/arch/arm/boot/dts/at91sam9263.dtsi index 653e4395b7cb..1467750e3377 100644 --- a/arch/arm/boot/dts/at91sam9263.dtsi +++ b/arch/arm/boot/dts/at91sam9263.dtsi @@ -922,6 +922,27 @@ pinctrl-0 = <&pinctrl_can_rx_tx>; clocks = <&can_clk>; clock-names = "can_clk"; + }; + + rtc@fffffd20 { + compatible = "atmel,at91sam9260-rtt"; + reg = <0xfffffd20 0x10>; + interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>; + clocks = <&slow_xtal>; + status = "disabled"; + }; + + rtc@fffffd50 { + compatible = "atmel,at91sam9260-rtt"; + reg = <0xfffffd50 0x10>; + interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>; + clocks = <&slow_xtal>; + status = "disabled"; + }; + + gpbr: syscon@fffffd60 { + compatible = "atmel,at91sam9260-gpbr", "syscon"; + reg = <0xfffffd60 0x50>; status = "disabled"; }; }; diff --git a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi index d2919108e92d..dfaacb113f2e 100644 --- a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi +++ b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi @@ -112,9 +112,23 @@ }; }; + shdwc@fffffd10 { + atmel,wakeup-counter = <10>; + atmel,wakeup-rtt-timer; + }; + + rtc@fffffd20 { + atmel,rtt-rtc-time-reg = <&gpbr 0x0>; + status = "okay"; + }; + watchdog@fffffd40 { status = "okay"; }; + + gpbr: syscon@fffffd50 { + status = "okay"; + }; }; nand0: nand@40000000 { diff --git a/arch/arm/boot/dts/at91sam9g45.dtsi b/arch/arm/boot/dts/at91sam9g45.dtsi index 6c0637a4bda5..2a8da8a884b4 100644 --- a/arch/arm/boot/dts/at91sam9g45.dtsi +++ b/arch/arm/boot/dts/at91sam9g45.dtsi @@ -492,6 +492,27 @@ }; }; + isi { + pinctrl_isi: isi-0 { + atmel,pins = <AT91_PIOB 8 AT91_PERIPH_B AT91_PINCTRL_NONE /* D8 */ + AT91_PIOB 9 AT91_PERIPH_B AT91_PINCTRL_NONE /* D9 */ + AT91_PIOB 10 AT91_PERIPH_B AT91_PINCTRL_NONE /* D10 */ + AT91_PIOB 11 AT91_PERIPH_B AT91_PINCTRL_NONE /* D11 */ + AT91_PIOB 20 AT91_PERIPH_A AT91_PINCTRL_NONE /* D0 */ + AT91_PIOB 21 AT91_PERIPH_A AT91_PINCTRL_NONE /* D1 */ + AT91_PIOB 22 AT91_PERIPH_A AT91_PINCTRL_NONE /* D2 */ + AT91_PIOB 23 AT91_PERIPH_A AT91_PINCTRL_NONE /* D3 */ + AT91_PIOB 24 AT91_PERIPH_A AT91_PINCTRL_NONE /* D4 */ + AT91_PIOB 25 AT91_PERIPH_A AT91_PINCTRL_NONE /* D5 */ + AT91_PIOB 26 AT91_PERIPH_A AT91_PINCTRL_NONE /* D6 */ + AT91_PIOB 27 AT91_PERIPH_A AT91_PINCTRL_NONE /* D7 */ + AT91_PIOB 28 AT91_PERIPH_A AT91_PINCTRL_NONE /* PCK */ + AT91_PIOB 29 AT91_PERIPH_A AT91_PINCTRL_NONE /* VSYNC */ + AT91_PIOB 30 AT91_PERIPH_A AT91_PINCTRL_NONE /* HSYNC */ + AT91_PIOB 31 AT91_PERIPH_A AT91_PINCTRL_NONE /* MCK */>; + }; + }; + usart0 { pinctrl_usart0: usart0-0 { atmel,pins = @@ -1035,6 +1056,17 @@ }; }; + isi@fffb4000 { + compatible = "atmel,at91sam9g45-isi"; + reg = <0xfffb4000 0x4000>; + interrupts = <26 IRQ_TYPE_LEVEL_HIGH 5>; + clocks = <&isi_clk>; + clock-names = "isi_clk"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_isi>; + status = "disabled"; + }; + pwm0: pwm@fffb8000 { compatible = "atmel,at91sam9rl-pwm"; reg = <0xfffb8000 0x300>; @@ -1199,12 +1231,26 @@ }; }; + rtc@fffffd20 { + compatible = "atmel,at91sam9260-rtt"; + reg = <0xfffffd20 0x10>; + interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>; + clocks = <&clk32k>; + status = "disabled"; + }; + rtc@fffffdb0 { compatible = "atmel,at91rm9200-rtc"; reg = <0xfffffdb0 0x30>; interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>; status = "disabled"; }; + + gpbr: syscon@fffffd60 { + compatible = "atmel,at91sam9260-gpbr", "syscon"; + reg = <0xfffffd60 0x10>; + status = "disabled"; + }; }; fb0: fb@0x00500000 { diff --git a/arch/arm/boot/dts/at91sam9m10g45ek.dts b/arch/arm/boot/dts/at91sam9m10g45ek.dts index d8dd22651090..33ce7ca2c404 100644 --- a/arch/arm/boot/dts/at91sam9m10g45ek.dts +++ b/arch/arm/boot/dts/at91sam9m10g45ek.dts @@ -161,6 +161,15 @@ pinctrl-0 = <&pinctrl_pwm_leds>; }; + rtc@fffffd20 { + atmel,rtt-rtc-time-reg = <&gpbr 0x0>; + status = "okay"; + }; + + gpbr: syscon@fffffd60 { + status = "okay"; + }; + rtc@fffffdb0 { status = "okay"; }; diff --git a/arch/arm/boot/dts/at91sam9rl.dtsi b/arch/arm/boot/dts/at91sam9rl.dtsi index f0b4352650ed..72424371413e 100644 --- a/arch/arm/boot/dts/at91sam9rl.dtsi +++ b/arch/arm/boot/dts/at91sam9rl.dtsi @@ -1059,6 +1059,27 @@ clocks = <&slow_rc_osc &slow_osc>; }; }; + + rtc@fffffeb0 { + compatible = "atmel,at91rm9200-rtc"; + reg = <0xfffffeb0 0x40>; + interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>; + status = "disabled"; + }; + + rtc@fffffd20 { + compatible = "atmel,at91sam9260-rtt"; + reg = <0xfffffd20 0x10>; + interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>; + clocks = <&clk32k>; + status = "disabled"; + }; + + gpbr: syscon@fffffd60 { + compatible = "atmel,at91sam9260-gpbr", "syscon"; + reg = <0xfffffd60 0x10>; + status = "disabled"; + }; }; }; diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts index 736092b1a535..10b725c7bfc0 100644 --- a/arch/arm/boot/dts/dra7-evm.dts +++ b/arch/arm/boot/dts/dra7-evm.dts @@ -304,7 +304,7 @@ /* VDD_GPU - over VDD_SMPS6 */ regulator-name = "smps6"; regulator-min-microvolt = <850000>; - regulator-max-microvolt = <12500000>; + regulator-max-microvolt = <1250000>; regulator-always-on; regulator-boot-on; }; @@ -313,7 +313,7 @@ /* CORE_VDD */ regulator-name = "smps7"; regulator-min-microvolt = <850000>; - regulator-max-microvolt = <1030000>; + regulator-max-microvolt = <1060000>; regulator-always-on; regulator-boot-on; }; diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi index 63bf99be1762..22771bc1643a 100644 --- a/arch/arm/boot/dts/dra7.dtsi +++ b/arch/arm/boot/dts/dra7.dtsi @@ -742,7 +742,7 @@ }; wdt2: wdt@4ae14000 { - compatible = "ti,omap4-wdt"; + compatible = "ti,omap3-wdt"; reg = <0x4ae14000 0x80>; interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>; ti,hwmods = "wd_timer2"; diff --git a/arch/arm/boot/dts/dra72-evm.dts b/arch/arm/boot/dts/dra72-evm.dts index afc74fd4bb5e..89085d066c65 100644 --- a/arch/arm/boot/dts/dra72-evm.dts +++ b/arch/arm/boot/dts/dra72-evm.dts @@ -160,7 +160,7 @@ /* VDD_CORE */ regulator-name = "smps2"; regulator-min-microvolt = <850000>; - regulator-max-microvolt = <1030000>; + regulator-max-microvolt = <1060000>; regulator-boot-on; regulator-always-on; }; diff --git a/arch/arm/boot/dts/dra7xx-clocks.dtsi b/arch/arm/boot/dts/dra7xx-clocks.dtsi index 2c05b3f017fa..4bdcbd61ce47 100644 --- a/arch/arm/boot/dts/dra7xx-clocks.dtsi +++ b/arch/arm/boot/dts/dra7xx-clocks.dtsi @@ -1042,7 +1042,7 @@ #clock-cells = <0>; compatible = "ti,mux-clock"; clocks = <&sys_clkin1>, <&sys_clkin2>; - reg = <0x01a4>; + reg = <0x0164>; }; mlb_clk: mlb_clk { @@ -1084,14 +1084,14 @@ #clock-cells = <0>; compatible = "ti,mux-clock"; clocks = <&sys_clkin1>, <&sys_clkin2>; - reg = <0x01d0>; + reg = <0x0168>; }; video2_dpll_clk_mux: video2_dpll_clk_mux { #clock-cells = <0>; compatible = "ti,mux-clock"; clocks = <&sys_clkin1>, <&sys_clkin2>; - reg = <0x01d4>; + reg = <0x016c>; }; wkupaon_iclk_mux: wkupaon_iclk_mux { diff --git a/arch/arm/boot/dts/exynos3250.dtsi b/arch/arm/boot/dts/exynos3250.dtsi index 242ddda0a8cd..22465494b796 100644 --- a/arch/arm/boot/dts/exynos3250.dtsi +++ b/arch/arm/boot/dts/exynos3250.dtsi @@ -311,12 +311,13 @@ adc: adc@126C0000 { compatible = "samsung,exynos3250-adc", "samsung,exynos-adc-v2"; - reg = <0x126C0000 0x100>, <0x10020718 0x4>; + reg = <0x126C0000 0x100>; interrupts = <0 137 0>; clock-names = "adc", "sclk"; clocks = <&cmu CLK_TSADC>, <&cmu CLK_SCLK_TSADC>; #io-channel-cells = <1>; io-channel-ranges; + samsung,syscon-phandle = <&pmu_system_controller>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/exynos4x12.dtsi b/arch/arm/boot/dts/exynos4x12.dtsi index 2e9f1f7be77b..93b70402e943 100644 --- a/arch/arm/boot/dts/exynos4x12.dtsi +++ b/arch/arm/boot/dts/exynos4x12.dtsi @@ -108,13 +108,14 @@ adc: adc@126C0000 { compatible = "samsung,exynos-adc-v1"; - reg = <0x126C0000 0x100>, <0x10020718 0x4>; + reg = <0x126C0000 0x100>; interrupt-parent = <&combiner>; interrupts = <10 3>; clocks = <&clock CLK_TSADC>; clock-names = "adc"; #io-channel-cells = <1>; io-channel-ranges; + samsung,syscon-phandle = <&pmu_system_controller>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi index d45a07ea3402..0a229fcd7acf 100644 --- a/arch/arm/boot/dts/exynos5250.dtsi +++ b/arch/arm/boot/dts/exynos5250.dtsi @@ -754,12 +754,13 @@ adc: adc@12D10000 { compatible = "samsung,exynos-adc-v1"; - reg = <0x12D10000 0x100>, <0x10040718 0x4>; + reg = <0x12D10000 0x100>; interrupts = <0 106 0>; clocks = <&clock CLK_ADC>; clock-names = "adc"; #io-channel-cells = <1>; io-channel-ranges; + samsung,syscon-phandle = <&pmu_system_controller>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/exynos5420.dtsi b/arch/arm/boot/dts/exynos5420.dtsi index 90bf4011e319..517e50f6760b 100644 --- a/arch/arm/boot/dts/exynos5420.dtsi +++ b/arch/arm/boot/dts/exynos5420.dtsi @@ -541,12 +541,13 @@ adc: adc@12D10000 { compatible = "samsung,exynos-adc-v2"; - reg = <0x12D10000 0x100>, <0x10040720 0x4>; + reg = <0x12D10000 0x100>; interrupts = <0 106 0>; clocks = <&clock CLK_TSADC>; clock-names = "adc"; #io-channel-cells = <1>; io-channel-ranges; + samsung,syscon-phandle = <&pmu_system_controller>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/mmp2-brownstone.dts b/arch/arm/boot/dts/mmp2-brownstone.dts index 7f70a39459f6..350208c5e1ed 100644 --- a/arch/arm/boot/dts/mmp2-brownstone.dts +++ b/arch/arm/boot/dts/mmp2-brownstone.dts @@ -8,7 +8,7 @@ */ /dts-v1/; -/include/ "mmp2.dtsi" +#include "mmp2.dtsi" / { model = "Marvell MMP2 Brownstone Development Board"; diff --git a/arch/arm/boot/dts/mmp2.dtsi b/arch/arm/boot/dts/mmp2.dtsi index 4e8b08c628c7..766bbb8495b6 100644 --- a/arch/arm/boot/dts/mmp2.dtsi +++ b/arch/arm/boot/dts/mmp2.dtsi @@ -7,7 +7,8 @@ * publishhed by the Free Software Foundation. */ -/include/ "skeleton.dtsi" +#include "skeleton.dtsi" +#include <dt-bindings/clock/marvell,mmp2.h> / { aliases { @@ -135,6 +136,8 @@ compatible = "mrvl,mmp-uart"; reg = <0xd4030000 0x1000>; interrupts = <27>; + clocks = <&soc_clocks MMP2_CLK_UART0>; + resets = <&soc_clocks MMP2_CLK_UART0>; status = "disabled"; }; @@ -142,6 +145,8 @@ compatible = "mrvl,mmp-uart"; reg = <0xd4017000 0x1000>; interrupts = <28>; + clocks = <&soc_clocks MMP2_CLK_UART1>; + resets = <&soc_clocks MMP2_CLK_UART1>; status = "disabled"; }; @@ -149,6 +154,8 @@ compatible = "mrvl,mmp-uart"; reg = <0xd4018000 0x1000>; interrupts = <24>; + clocks = <&soc_clocks MMP2_CLK_UART2>; + resets = <&soc_clocks MMP2_CLK_UART2>; status = "disabled"; }; @@ -156,6 +163,8 @@ compatible = "mrvl,mmp-uart"; reg = <0xd4016000 0x1000>; interrupts = <46>; + clocks = <&soc_clocks MMP2_CLK_UART3>; + resets = <&soc_clocks MMP2_CLK_UART3>; status = "disabled"; }; @@ -168,6 +177,8 @@ #gpio-cells = <2>; interrupts = <49>; interrupt-names = "gpio_mux"; + clocks = <&soc_clocks MMP2_CLK_GPIO>; + resets = <&soc_clocks MMP2_CLK_GPIO>; interrupt-controller; #interrupt-cells = <1>; ranges; @@ -201,6 +212,8 @@ compatible = "mrvl,mmp-twsi"; reg = <0xd4011000 0x1000>; interrupts = <7>; + clocks = <&soc_clocks MMP2_CLK_TWSI0>; + resets = <&soc_clocks MMP2_CLK_TWSI0>; #address-cells = <1>; #size-cells = <0>; mrvl,i2c-fast-mode; @@ -211,6 +224,8 @@ compatible = "mrvl,mmp-twsi"; reg = <0xd4025000 0x1000>; interrupts = <58>; + clocks = <&soc_clocks MMP2_CLK_TWSI1>; + resets = <&soc_clocks MMP2_CLK_TWSI1>; status = "disabled"; }; @@ -220,8 +235,20 @@ interrupts = <1 0>; interrupt-names = "rtc 1Hz", "rtc alarm"; interrupt-parent = <&intcmux5>; + clocks = <&soc_clocks MMP2_CLK_RTC>; + resets = <&soc_clocks MMP2_CLK_RTC>; status = "disabled"; }; }; + + soc_clocks: clocks{ + compatible = "marvell,mmp2-clock"; + reg = <0xd4050000 0x1000>, + <0xd4282800 0x400>, + <0xd4015000 0x1000>; + reg-names = "mpmu", "apmu", "apbc"; + #clock-cells = <1>; + #reset-cells = <1>; + }; }; }; diff --git a/arch/arm/boot/dts/omap2430-sdp.dts b/arch/arm/boot/dts/omap2430-sdp.dts index 05eca2e4430f..6b36ede58488 100644 --- a/arch/arm/boot/dts/omap2430-sdp.dts +++ b/arch/arm/boot/dts/omap2430-sdp.dts @@ -48,22 +48,22 @@ gpmc,device-width = <1>; gpmc,cycle2cycle-samecsen = <1>; gpmc,cycle2cycle-diffcsen = <1>; - gpmc,cs-on-ns = <7>; - gpmc,cs-rd-off-ns = <233>; - gpmc,cs-wr-off-ns = <233>; - gpmc,adv-on-ns = <22>; - gpmc,adv-rd-off-ns = <60>; - gpmc,adv-wr-off-ns = <60>; - gpmc,oe-on-ns = <67>; - gpmc,oe-off-ns = <210>; - gpmc,we-on-ns = <67>; - gpmc,we-off-ns = <210>; - gpmc,rd-cycle-ns = <233>; - gpmc,wr-cycle-ns = <233>; - gpmc,access-ns = <233>; - gpmc,page-burst-access-ns = <30>; - gpmc,bus-turnaround-ns = <30>; - gpmc,cycle2cycle-delay-ns = <30>; + gpmc,cs-on-ns = <6>; + gpmc,cs-rd-off-ns = <187>; + gpmc,cs-wr-off-ns = <187>; + gpmc,adv-on-ns = <18>; + gpmc,adv-rd-off-ns = <48>; + gpmc,adv-wr-off-ns = <48>; + gpmc,oe-on-ns = <60>; + gpmc,oe-off-ns = <169>; + gpmc,we-on-ns = <66>; + gpmc,we-off-ns = <169>; + gpmc,rd-cycle-ns = <187>; + gpmc,wr-cycle-ns = <187>; + gpmc,access-ns = <187>; + gpmc,page-burst-access-ns = <24>; + gpmc,bus-turnaround-ns = <24>; + gpmc,cycle2cycle-delay-ns = <24>; gpmc,wait-monitoring-ns = <0>; gpmc,clk-activation-ns = <0>; gpmc,wr-data-mux-bus-ns = <0>; diff --git a/arch/arm/boot/dts/pxa168-aspenite.dts b/arch/arm/boot/dts/pxa168-aspenite.dts index e762facb3fa4..0a988b3fb248 100644 --- a/arch/arm/boot/dts/pxa168-aspenite.dts +++ b/arch/arm/boot/dts/pxa168-aspenite.dts @@ -8,7 +8,7 @@ */ /dts-v1/; -/include/ "pxa168.dtsi" +#include "pxa168.dtsi" / { model = "Marvell PXA168 Aspenite Development Board"; diff --git a/arch/arm/boot/dts/pxa168.dtsi b/arch/arm/boot/dts/pxa168.dtsi index 975dad21ac38..b899e25cbb1b 100644 --- a/arch/arm/boot/dts/pxa168.dtsi +++ b/arch/arm/boot/dts/pxa168.dtsi @@ -7,7 +7,8 @@ * publishhed by the Free Software Foundation. */ -/include/ "skeleton.dtsi" +#include "skeleton.dtsi" +#include <dt-bindings/clock/marvell,pxa168.h> / { aliases { @@ -59,6 +60,8 @@ compatible = "mrvl,mmp-uart"; reg = <0xd4017000 0x1000>; interrupts = <27>; + clocks = <&soc_clocks PXA168_CLK_UART0>; + resets = <&soc_clocks PXA168_CLK_UART0>; status = "disabled"; }; @@ -66,6 +69,8 @@ compatible = "mrvl,mmp-uart"; reg = <0xd4018000 0x1000>; interrupts = <28>; + clocks = <&soc_clocks PXA168_CLK_UART1>; + resets = <&soc_clocks PXA168_CLK_UART1>; status = "disabled"; }; @@ -73,6 +78,8 @@ compatible = "mrvl,mmp-uart"; reg = <0xd4026000 0x1000>; interrupts = <29>; + clocks = <&soc_clocks PXA168_CLK_UART2>; + resets = <&soc_clocks PXA168_CLK_UART2>; status = "disabled"; }; @@ -84,6 +91,8 @@ gpio-controller; #gpio-cells = <2>; interrupts = <49>; + clocks = <&soc_clocks PXA168_CLK_GPIO>; + resets = <&soc_clocks PXA168_CLK_GPIO>; interrupt-names = "gpio_mux"; interrupt-controller; #interrupt-cells = <1>; @@ -110,6 +119,8 @@ compatible = "mrvl,mmp-twsi"; reg = <0xd4011000 0x1000>; interrupts = <7>; + clocks = <&soc_clocks PXA168_CLK_TWSI0>; + resets = <&soc_clocks PXA168_CLK_TWSI0>; mrvl,i2c-fast-mode; status = "disabled"; }; @@ -118,6 +129,8 @@ compatible = "mrvl,mmp-twsi"; reg = <0xd4025000 0x1000>; interrupts = <58>; + clocks = <&soc_clocks PXA168_CLK_TWSI1>; + resets = <&soc_clocks PXA168_CLK_TWSI1>; status = "disabled"; }; @@ -126,8 +139,20 @@ reg = <0xd4010000 0x1000>; interrupts = <5 6>; interrupt-names = "rtc 1Hz", "rtc alarm"; + clocks = <&soc_clocks PXA168_CLK_RTC>; + resets = <&soc_clocks PXA168_CLK_RTC>; status = "disabled"; }; }; + + soc_clocks: clocks{ + compatible = "marvell,pxa168-clock"; + reg = <0xd4050000 0x1000>, + <0xd4282800 0x400>, + <0xd4015000 0x1000>; + reg-names = "mpmu", "apmu", "apbc"; + #clock-cells = <1>; + #reset-cells = <1>; + }; }; }; diff --git a/arch/arm/boot/dts/pxa910-dkb.dts b/arch/arm/boot/dts/pxa910-dkb.dts index 595492aa5053..c82f2810ec73 100644 --- a/arch/arm/boot/dts/pxa910-dkb.dts +++ b/arch/arm/boot/dts/pxa910-dkb.dts @@ -8,7 +8,7 @@ */ /dts-v1/; -/include/ "pxa910.dtsi" +#include "pxa910.dtsi" / { model = "Marvell PXA910 DKB Development Board"; diff --git a/arch/arm/boot/dts/pxa910.dtsi b/arch/arm/boot/dts/pxa910.dtsi index 0247c622f580..0868f6729be1 100644 --- a/arch/arm/boot/dts/pxa910.dtsi +++ b/arch/arm/boot/dts/pxa910.dtsi @@ -7,7 +7,8 @@ * publishhed by the Free Software Foundation. */ -/include/ "skeleton.dtsi" +#include "skeleton.dtsi" +#include <dt-bindings/clock/marvell,pxa910.h> / { aliases { @@ -71,6 +72,8 @@ compatible = "mrvl,mmp-uart"; reg = <0xd4017000 0x1000>; interrupts = <27>; + clocks = <&soc_clocks PXA910_CLK_UART0>; + resets = <&soc_clocks PXA910_CLK_UART0>; status = "disabled"; }; @@ -78,6 +81,8 @@ compatible = "mrvl,mmp-uart"; reg = <0xd4018000 0x1000>; interrupts = <28>; + clocks = <&soc_clocks PXA910_CLK_UART1>; + resets = <&soc_clocks PXA910_CLK_UART1>; status = "disabled"; }; @@ -85,6 +90,8 @@ compatible = "mrvl,mmp-uart"; reg = <0xd4036000 0x1000>; interrupts = <59>; + clocks = <&soc_clocks PXA910_CLK_UART2>; + resets = <&soc_clocks PXA910_CLK_UART2>; status = "disabled"; }; @@ -97,6 +104,8 @@ #gpio-cells = <2>; interrupts = <49>; interrupt-names = "gpio_mux"; + clocks = <&soc_clocks PXA910_CLK_GPIO>; + resets = <&soc_clocks PXA910_CLK_GPIO>; interrupt-controller; #interrupt-cells = <1>; ranges; @@ -124,6 +133,8 @@ #size-cells = <0>; reg = <0xd4011000 0x1000>; interrupts = <7>; + clocks = <&soc_clocks PXA910_CLK_TWSI0>; + resets = <&soc_clocks PXA910_CLK_TWSI0>; mrvl,i2c-fast-mode; status = "disabled"; }; @@ -134,6 +145,8 @@ #size-cells = <0>; reg = <0xd4037000 0x1000>; interrupts = <54>; + clocks = <&soc_clocks PXA910_CLK_TWSI1>; + resets = <&soc_clocks PXA910_CLK_TWSI1>; status = "disabled"; }; @@ -142,8 +155,21 @@ reg = <0xd4010000 0x1000>; interrupts = <5 6>; interrupt-names = "rtc 1Hz", "rtc alarm"; + clocks = <&soc_clocks PXA910_CLK_RTC>; + resets = <&soc_clocks PXA910_CLK_RTC>; status = "disabled"; }; }; + + soc_clocks: clocks{ + compatible = "marvell,pxa910-clock"; + reg = <0xd4050000 0x1000>, + <0xd4282800 0x400>, + <0xd4015000 0x1000>, + <0xd403b000 0x1000>; + reg-names = "mpmu", "apmu", "apbc", "apbcp"; + #clock-cells = <1>; + #reset-cells = <1>; + }; }; }; diff --git a/arch/arm/boot/dts/sama5d4.dtsi b/arch/arm/boot/dts/sama5d4.dtsi index e0157b0f075c..1b0f30c2c4a5 100644 --- a/arch/arm/boot/dts/sama5d4.dtsi +++ b/arch/arm/boot/dts/sama5d4.dtsi @@ -9,12 +9,12 @@ * licensing only applies to this file, and not this project as a * whole. * - * a) This library is free software; you can redistribute it and/or + * a) This file is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * - * This library is distributed in the hope that it will be useful, + * This file is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. @@ -45,6 +45,7 @@ #include "skeleton.dtsi" #include <dt-bindings/clock/at91.h> +#include <dt-bindings/dma/at91.h> #include <dt-bindings/pinctrl/at91.h> #include <dt-bindings/interrupt-controller/irq.h> #include <dt-bindings/gpio/gpio.h> @@ -302,6 +303,15 @@ #size-cells = <1>; ranges; + dma1: dma-controller@f0004000 { + compatible = "atmel,sama5d4-dma"; + reg = <0xf0004000 0x200>; + interrupts = <50 IRQ_TYPE_LEVEL_HIGH 0>; + #dma-cells = <1>; + clocks = <&dma1_clk>; + clock-names = "dma_clk"; + }; + ramc0: ramc@f0010000 { compatible = "atmel,sama5d3-ddramc"; reg = <0xf0010000 0x200>; @@ -309,6 +319,15 @@ clock-names = "ddrck", "mpddr"; }; + dma0: dma-controller@f0014000 { + compatible = "atmel,sama5d4-dma"; + reg = <0xf0014000 0x200>; + interrupts = <8 IRQ_TYPE_LEVEL_HIGH 0>; + #dma-cells = <1>; + clocks = <&dma0_clk>; + clock-names = "dma_clk"; + }; + pmc: pmc@f0018000 { compatible = "atmel,sama5d3-pmc"; reg = <0xf0018000 0x120>; @@ -761,6 +780,10 @@ compatible = "atmel,hsmci"; reg = <0xf8000000 0x600>; interrupts = <35 IRQ_TYPE_LEVEL_HIGH 0>; + dmas = <&dma1 + (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) + | AT91_XDMAC_DT_PERID(0))>; + dma-names = "rxtx"; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_mmc0_clk_cmd_dat0 &pinctrl_mmc0_dat1_3>; status = "disabled"; @@ -776,6 +799,13 @@ compatible = "atmel,at91rm9200-spi"; reg = <0xf8010000 0x100>; interrupts = <37 IRQ_TYPE_LEVEL_HIGH 3>; + dmas = <&dma1 + (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) + | AT91_XDMAC_DT_PERID(10))>, + <&dma1 + (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) + | AT91_XDMAC_DT_PERID(11))>; + dma-names = "tx", "rx"; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_spi0>; clocks = <&spi0_clk>; @@ -787,6 +817,13 @@ compatible = "atmel,at91sam9x5-i2c"; reg = <0xf8014000 0x4000>; interrupts = <32 IRQ_TYPE_LEVEL_HIGH 6>; + dmas = <&dma1 + (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) + | AT91_XDMAC_DT_PERID(2))>, + <&dma1 + (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) + | AT91_XDMAC_DT_PERID(3))>; + dma-names = "tx", "rx"; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_i2c0>; #address-cells = <1>; @@ -817,7 +854,14 @@ i2c2: i2c@f8024000 { compatible = "atmel,at91sam9x5-i2c"; reg = <0xf8024000 0x4000>; - interrupts = <34 4 6>; + interrupts = <34 IRQ_TYPE_LEVEL_HIGH 6>; + dmas = <&dma1 + (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) + | AT91_XDMAC_DT_PERID(6))>, + <&dma1 + (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) + | AT91_XDMAC_DT_PERID(7))>; + dma-names = "tx", "rx"; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_i2c2>; #address-cells = <1>; @@ -830,6 +874,10 @@ compatible = "atmel,hsmci"; reg = <0xfc000000 0x600>; interrupts = <36 IRQ_TYPE_LEVEL_HIGH 0>; + dmas = <&dma1 + (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) + | AT91_XDMAC_DT_PERID(1))>; + dma-names = "rxtx"; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_mmc1_clk_cmd_dat0 &pinctrl_mmc1_dat1_3>; status = "disabled"; @@ -843,6 +891,13 @@ compatible = "atmel,at91sam9260-usart"; reg = <0xfc008000 0x100>; interrupts = <29 IRQ_TYPE_LEVEL_HIGH 5>; + dmas = <&dma1 + (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) + | AT91_XDMAC_DT_PERID(16))>, + <&dma1 + (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) + | AT91_XDMAC_DT_PERID(17))>; + dma-names = "tx", "rx"; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usart2 &pinctrl_usart2_rts &pinctrl_usart2_cts>; clocks = <&usart2_clk>; @@ -854,6 +909,13 @@ compatible = "atmel,at91sam9260-usart"; reg = <0xfc00c000 0x100>; interrupts = <30 IRQ_TYPE_LEVEL_HIGH 5>; + dmas = <&dma1 + (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) + | AT91_XDMAC_DT_PERID(18))>, + <&dma1 + (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) + | AT91_XDMAC_DT_PERID(19))>; + dma-names = "tx", "rx"; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usart3>; clocks = <&usart3_clk>; @@ -865,6 +927,13 @@ compatible = "atmel,at91sam9260-usart"; reg = <0xfc010000 0x100>; interrupts = <31 IRQ_TYPE_LEVEL_HIGH 5>; + dmas = <&dma1 + (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) + | AT91_XDMAC_DT_PERID(20))>, + <&dma1 + (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) + | AT91_XDMAC_DT_PERID(21))>; + dma-names = "tx", "rx"; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usart4>; clocks = <&usart4_clk>; diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi index e3ab942fd148..7b4099fcf817 100644 --- a/arch/arm/boot/dts/sun4i-a10.dtsi +++ b/arch/arm/boot/dts/sun4i-a10.dtsi @@ -188,19 +188,11 @@ "apb0_ir1", "apb0_keypad"; }; - apb1_mux: apb1_mux@01c20058 { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-apb1-mux-clk"; - reg = <0x01c20058 0x4>; - clocks = <&osc24M>, <&pll6 1>, <&osc32k>; - clock-output-names = "apb1_mux"; - }; - - apb1: apb1@01c20058 { + apb1: clk@01c20058 { #clock-cells = <0>; compatible = "allwinner,sun4i-a10-apb1-clk"; reg = <0x01c20058 0x4>; - clocks = <&apb1_mux>; + clocks = <&osc24M>, <&pll6 1>, <&osc32k>; clock-output-names = "apb1"; }; diff --git a/arch/arm/boot/dts/sun5i-a10s.dtsi b/arch/arm/boot/dts/sun5i-a10s.dtsi index 81ad4b94e812..1b76667f3182 100644 --- a/arch/arm/boot/dts/sun5i-a10s.dtsi +++ b/arch/arm/boot/dts/sun5i-a10s.dtsi @@ -176,19 +176,11 @@ "apb0_ir", "apb0_keypad"; }; - apb1_mux: apb1_mux@01c20058 { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-apb1-mux-clk"; - reg = <0x01c20058 0x4>; - clocks = <&osc24M>, <&pll6 1>, <&osc32k>; - clock-output-names = "apb1_mux"; - }; - - apb1: apb1@01c20058 { + apb1: clk@01c20058 { #clock-cells = <0>; compatible = "allwinner,sun4i-a10-apb1-clk"; reg = <0x01c20058 0x4>; - clocks = <&apb1_mux>; + clocks = <&osc24M>, <&pll6 1>, <&osc32k>; clock-output-names = "apb1"; }; diff --git a/arch/arm/boot/dts/sun5i-a13.dtsi b/arch/arm/boot/dts/sun5i-a13.dtsi index b131068f4f35..c35217ea1f64 100644 --- a/arch/arm/boot/dts/sun5i-a13.dtsi +++ b/arch/arm/boot/dts/sun5i-a13.dtsi @@ -161,19 +161,11 @@ clock-output-names = "apb0_codec", "apb0_pio", "apb0_ir"; }; - apb1_mux: apb1_mux@01c20058 { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-apb1-mux-clk"; - reg = <0x01c20058 0x4>; - clocks = <&osc24M>, <&pll6 1>, <&osc32k>; - clock-output-names = "apb1_mux"; - }; - - apb1: apb1@01c20058 { + apb1: clk@01c20058 { #clock-cells = <0>; compatible = "allwinner,sun4i-a10-apb1-clk"; reg = <0x01c20058 0x4>; - clocks = <&apb1_mux>; + clocks = <&osc24M>, <&pll6 1>, <&osc32k>; clock-output-names = "apb1"; }; diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi index a400172a8a52..f47156b6572b 100644 --- a/arch/arm/boot/dts/sun6i-a31.dtsi +++ b/arch/arm/boot/dts/sun6i-a31.dtsi @@ -229,19 +229,11 @@ "apb1_daudio1"; }; - apb2_mux: apb2_mux@01c20058 { + apb2: clk@01c20058 { #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-apb1-mux-clk"; + compatible = "allwinner,sun4i-a10-apb1-clk"; reg = <0x01c20058 0x4>; clocks = <&osc32k>, <&osc24M>, <&pll6 0>, <&pll6 0>; - clock-output-names = "apb2_mux"; - }; - - apb2: apb2@01c20058 { - #clock-cells = <0>; - compatible = "allwinner,sun6i-a31-apb2-div-clk"; - reg = <0x01c20058 0x4>; - clocks = <&apb2_mux>; clock-output-names = "apb2"; }; diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi index 82a524ce28ad..e21ce5992d56 100644 --- a/arch/arm/boot/dts/sun7i-a20.dtsi +++ b/arch/arm/boot/dts/sun7i-a20.dtsi @@ -236,19 +236,11 @@ "apb0_iis2", "apb0_keypad"; }; - apb1_mux: apb1_mux@01c20058 { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-apb1-mux-clk"; - reg = <0x01c20058 0x4>; - clocks = <&osc24M>, <&pll6 1>, <&osc32k>; - clock-output-names = "apb1_mux"; - }; - - apb1: apb1@01c20058 { + apb1: clk@01c20058 { #clock-cells = <0>; compatible = "allwinner,sun4i-a10-apb1-clk"; reg = <0x01c20058 0x4>; - clocks = <&apb1_mux>; + clocks = <&osc24M>, <&pll6 1>, <&osc32k>; clock-output-names = "apb1"; }; diff --git a/arch/arm/boot/dts/sun8i-a23.dtsi b/arch/arm/boot/dts/sun8i-a23.dtsi index 6086adbf9d74..0746cd1024d7 100644 --- a/arch/arm/boot/dts/sun8i-a23.dtsi +++ b/arch/arm/boot/dts/sun8i-a23.dtsi @@ -189,19 +189,11 @@ "apb1_daudio0", "apb1_daudio1"; }; - apb2_mux: apb2_mux_clk@01c20058 { + apb2: clk@01c20058 { #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-apb1-mux-clk"; + compatible = "allwinner,sun4i-a10-apb1-clk"; reg = <0x01c20058 0x4>; clocks = <&osc32k>, <&osc24M>, <&pll6>, <&pll6>; - clock-output-names = "apb2_mux"; - }; - - apb2: apb2_clk@01c20058 { - #clock-cells = <0>; - compatible = "allwinner,sun6i-a31-apb2-div-clk"; - reg = <0x01c20058 0x4>; - clocks = <&apb2_mux>; clock-output-names = "apb2"; }; diff --git a/arch/arm/boot/dts/tegra114.dtsi b/arch/arm/boot/dts/tegra114.dtsi index 222f3b3f4dd5..4296b5398bf5 100644 --- a/arch/arm/boot/dts/tegra114.dtsi +++ b/arch/arm/boot/dts/tegra114.dtsi @@ -1,5 +1,6 @@ #include <dt-bindings/clock/tegra114-car.h> #include <dt-bindings/gpio/tegra-gpio.h> +#include <dt-bindings/memory/tegra114-mc.h> #include <dt-bindings/pinctrl/pinctrl-tegra.h> #include <dt-bindings/interrupt-controller/arm-gic.h> @@ -50,6 +51,8 @@ resets = <&tegra_car 27>; reset-names = "dc"; + iommus = <&mc TEGRA_SWGROUP_DC>; + nvidia,head = <0>; rgb { @@ -67,6 +70,8 @@ resets = <&tegra_car 26>; reset-names = "dc"; + iommus = <&mc TEGRA_SWGROUP_DCB>; + nvidia,head = <1>; rgb { @@ -498,15 +503,15 @@ reset-names = "fuse"; }; - iommu@70019010 { - compatible = "nvidia,tegra114-smmu", "nvidia,tegra30-smmu"; - reg = <0x70019010 0x02c - 0x700191f0 0x010 - 0x70019228 0x074>; - nvidia,#asids = <4>; - dma-window = <0 0x40000000>; - nvidia,swgroups = <0x18659fe>; - nvidia,ahb = <&ahb>; + mc: memory-controller@70019000 { + compatible = "nvidia,tegra114-mc"; + reg = <0x70019000 0x1000>; + clocks = <&tegra_car TEGRA114_CLK_MC>; + clock-names = "mc"; + + interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>; + + #iommu-cells = <1>; }; ahub@70080000 { diff --git a/arch/arm/boot/dts/tegra124-jetson-tk1.dts b/arch/arm/boot/dts/tegra124-jetson-tk1.dts index 51b373ff1065..4eb540be368f 100644 --- a/arch/arm/boot/dts/tegra124-jetson-tk1.dts +++ b/arch/arm/boot/dts/tegra124-jetson-tk1.dts @@ -1942,4 +1942,48 @@ <&tegra_car TEGRA124_CLK_EXTERN1>; clock-names = "pll_a", "pll_a_out0", "mclk"; }; + + thermal-zones { + cpu { + trips { + trip@0 { + temperature = <101000>; + hysteresis = <0>; + type = "critical"; + }; + }; + + cooling-maps { + /* There are currently no cooling maps because there are no cooling devices */ + }; + }; + + mem { + trips { + trip@0 { + temperature = <101000>; + hysteresis = <0>; + type = "critical"; + }; + }; + + cooling-maps { + /* There are currently no cooling maps because there are no cooling devices */ + }; + }; + + gpu { + trips { + trip@0 { + temperature = <101000>; + hysteresis = <0>; + type = "critical"; + }; + }; + + cooling-maps { + /* There are currently no cooling maps because there are no cooling devices */ + }; + }; + }; }; diff --git a/arch/arm/boot/dts/tegra124.dtsi b/arch/arm/boot/dts/tegra124.dtsi index df2b06b29985..4be06c6ea0c8 100644 --- a/arch/arm/boot/dts/tegra124.dtsi +++ b/arch/arm/boot/dts/tegra124.dtsi @@ -1,8 +1,10 @@ #include <dt-bindings/clock/tegra124-car.h> #include <dt-bindings/gpio/tegra-gpio.h> +#include <dt-bindings/memory/tegra124-mc.h> #include <dt-bindings/pinctrl/pinctrl-tegra.h> #include <dt-bindings/pinctrl/pinctrl-tegra-xusb.h> #include <dt-bindings/interrupt-controller/arm-gic.h> +#include <dt-bindings/thermal/tegra124-soctherm.h> #include "skeleton.dtsi" @@ -102,6 +104,8 @@ resets = <&tegra_car 27>; reset-names = "dc"; + iommus = <&mc TEGRA_SWGROUP_DC>; + nvidia,head = <0>; }; @@ -115,6 +119,8 @@ resets = <&tegra_car 26>; reset-names = "dc"; + iommus = <&mc TEGRA_SWGROUP_DCB>; + nvidia,head = <1>; }; @@ -275,7 +281,8 @@ pinmux: pinmux@0,70000868 { compatible = "nvidia,tegra124-pinmux"; reg = <0x0 0x70000868 0x0 0x164>, /* Pad control registers */ - <0x0 0x70003000 0x0 0x434>; /* Mux registers */ + <0x0 0x70003000 0x0 0x434>, /* Mux registers */ + <0x0 0x70000820 0x0 0x008>; /* MIPI pad control */ }; /* @@ -551,6 +558,17 @@ reset-names = "fuse"; }; + mc: memory-controller@0,70019000 { + compatible = "nvidia,tegra124-mc"; + reg = <0x0 0x70019000 0x0 0x1000>; + clocks = <&tegra_car TEGRA124_CLK_MC>; + clock-names = "mc"; + + interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>; + + #iommu-cells = <1>; + }; + sata@0,70020000 { compatible = "nvidia,tegra124-ahci"; @@ -640,6 +658,18 @@ status = "disabled"; }; + soctherm: thermal-sensor@0,700e2000 { + compatible = "nvidia,tegra124-soctherm"; + reg = <0x0 0x700e2000 0x0 0x1000>; + interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&tegra_car TEGRA124_CLK_TSENSOR>, + <&tegra_car TEGRA124_CLK_SOC_THERM>; + clock-names = "tsensor", "soctherm"; + resets = <&tegra_car 78>; + reset-names = "soctherm"; + #thermal-sensor-cells = <1>; + }; + ahub@0,70300000 { compatible = "nvidia,tegra124-ahub"; reg = <0x0 0x70300000 0x0 0x200>, @@ -881,6 +911,40 @@ }; }; + thermal-zones { + cpu { + polling-delay-passive = <1000>; + polling-delay = <1000>; + + thermal-sensors = + <&soctherm TEGRA124_SOCTHERM_SENSOR_CPU>; + }; + + mem { + polling-delay-passive = <1000>; + polling-delay = <1000>; + + thermal-sensors = + <&soctherm TEGRA124_SOCTHERM_SENSOR_MEM>; + }; + + gpu { + polling-delay-passive = <1000>; + polling-delay = <1000>; + + thermal-sensors = + <&soctherm TEGRA124_SOCTHERM_SENSOR_GPU>; + }; + + pllx { + polling-delay-passive = <1000>; + polling-delay = <1000>; + + thermal-sensors = + <&soctherm TEGRA124_SOCTHERM_SENSOR_PLLX>; + }; + }; + timer { compatible = "arm,armv7-timer"; interrupts = <GIC_PPI 13 diff --git a/arch/arm/boot/dts/tegra30.dtsi b/arch/arm/boot/dts/tegra30.dtsi index b270b9e3d455..99475f6e76a3 100644 --- a/arch/arm/boot/dts/tegra30.dtsi +++ b/arch/arm/boot/dts/tegra30.dtsi @@ -1,5 +1,6 @@ #include <dt-bindings/clock/tegra30-car.h> #include <dt-bindings/gpio/tegra-gpio.h> +#include <dt-bindings/memory/tegra30-mc.h> #include <dt-bindings/pinctrl/pinctrl-tegra.h> #include <dt-bindings/interrupt-controller/arm-gic.h> @@ -166,6 +167,8 @@ resets = <&tegra_car 27>; reset-names = "dc"; + iommus = <&mc TEGRA_SWGROUP_DC>; + nvidia,head = <0>; rgb { @@ -183,6 +186,8 @@ resets = <&tegra_car 26>; reset-names = "dc"; + iommus = <&mc TEGRA_SWGROUP_DCB>; + nvidia,head = <1>; rgb { @@ -615,23 +620,15 @@ clock-names = "pclk", "clk32k_in"; }; - memory-controller@7000f000 { + mc: memory-controller@7000f000 { compatible = "nvidia,tegra30-mc"; - reg = <0x7000f000 0x010 - 0x7000f03c 0x1b4 - 0x7000f200 0x028 - 0x7000f284 0x17c>; + reg = <0x7000f000 0x400>; + clocks = <&tegra_car TEGRA30_CLK_MC>; + clock-names = "mc"; + interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>; - }; - iommu@7000f010 { - compatible = "nvidia,tegra30-smmu"; - reg = <0x7000f010 0x02c - 0x7000f1f0 0x010 - 0x7000f228 0x05c>; - nvidia,#asids = <4>; /* # of ASIDs */ - dma-window = <0 0x40000000>; /* IOVA start & length */ - nvidia,ahb = <&ahb>; + #iommu-cells = <1>; }; fuse@7000f800 { diff --git a/arch/arm/configs/ape6evm_defconfig b/arch/arm/configs/ape6evm_defconfig index db81d8ce4c03..9e9a72e3d30f 100644 --- a/arch/arm/configs/ape6evm_defconfig +++ b/arch/arm/configs/ape6evm_defconfig @@ -33,7 +33,7 @@ CONFIG_ARM_APPENDED_DTB=y CONFIG_VFP=y CONFIG_NEON=y CONFIG_BINFMT_MISC=y -CONFIG_PM_RUNTIME=y +CONFIG_PM=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y diff --git a/arch/arm/configs/armadillo800eva_defconfig b/arch/arm/configs/armadillo800eva_defconfig index d9675c68a399..5666e3700a82 100644 --- a/arch/arm/configs/armadillo800eva_defconfig +++ b/arch/arm/configs/armadillo800eva_defconfig @@ -43,7 +43,7 @@ CONFIG_KEXEC=y CONFIG_VFP=y CONFIG_NEON=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set -CONFIG_PM_RUNTIME=y +CONFIG_PM=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y diff --git a/arch/arm/configs/bcm_defconfig b/arch/arm/configs/bcm_defconfig index 83a87e48901c..7117662bab2e 100644 --- a/arch/arm/configs/bcm_defconfig +++ b/arch/arm/configs/bcm_defconfig @@ -39,7 +39,7 @@ CONFIG_CPU_IDLE=y CONFIG_VFP=y CONFIG_NEON=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set -CONFIG_PM_RUNTIME=y +CONFIG_PM=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_PACKET_DIAG=y diff --git a/arch/arm/configs/bockw_defconfig b/arch/arm/configs/bockw_defconfig index 1dde5daa84f9..3125e00f05ab 100644 --- a/arch/arm/configs/bockw_defconfig +++ b/arch/arm/configs/bockw_defconfig @@ -29,7 +29,7 @@ CONFIG_ZBOOT_ROM_BSS=0x0 CONFIG_ARM_APPENDED_DTB=y CONFIG_VFP=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set -CONFIG_PM_RUNTIME=y +CONFIG_PM=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y diff --git a/arch/arm/configs/davinci_all_defconfig b/arch/arm/configs/davinci_all_defconfig index 759f9b0053e2..235842c9ba96 100644 --- a/arch/arm/configs/davinci_all_defconfig +++ b/arch/arm/configs/davinci_all_defconfig @@ -49,7 +49,7 @@ CONFIG_CPU_FREQ_GOV_PERFORMANCE=m CONFIG_CPU_FREQ_GOV_POWERSAVE=m CONFIG_CPU_FREQ_GOV_ONDEMAND=m CONFIG_CPU_IDLE=y -CONFIG_PM_RUNTIME=y +CONFIG_PM=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y diff --git a/arch/arm/configs/exynos_defconfig b/arch/arm/configs/exynos_defconfig index c41990729024..5ef14de00a29 100644 --- a/arch/arm/configs/exynos_defconfig +++ b/arch/arm/configs/exynos_defconfig @@ -27,7 +27,7 @@ CONFIG_ARM_ATAG_DTB_COMPAT=y CONFIG_CMDLINE="root=/dev/ram0 rw ramdisk=8192 initrd=0x41000000,8M console=ttySAC1,115200 init=/linuxrc mem=256M" CONFIG_VFP=y CONFIG_NEON=y -CONFIG_PM_RUNTIME=y +CONFIG_PM=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y diff --git a/arch/arm/configs/ezx_defconfig b/arch/arm/configs/ezx_defconfig index eb440aae4283..ea316c4b890e 100644 --- a/arch/arm/configs/ezx_defconfig +++ b/arch/arm/configs/ezx_defconfig @@ -39,7 +39,6 @@ CONFIG_BINFMT_AOUT=m CONFIG_BINFMT_MISC=m CONFIG_PM=y CONFIG_APM_EMULATION=y -CONFIG_PM_RUNTIME=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y diff --git a/arch/arm/configs/hisi_defconfig b/arch/arm/configs/hisi_defconfig index 1fe3621faf65..112543665dd7 100644 --- a/arch/arm/configs/hisi_defconfig +++ b/arch/arm/configs/hisi_defconfig @@ -18,7 +18,7 @@ CONFIG_ARM_APPENDED_DTB=y CONFIG_ARM_ATAG_DTB_COMPAT=y CONFIG_NEON=y CONFIG_ARM_ATAG_DTB_COMPAT_CMDLINE_FROM_BOOTLOADER=y -CONFIG_PM_RUNTIME=y +CONFIG_PM=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y diff --git a/arch/arm/configs/imote2_defconfig b/arch/arm/configs/imote2_defconfig index 182e54692664..18e59feaa307 100644 --- a/arch/arm/configs/imote2_defconfig +++ b/arch/arm/configs/imote2_defconfig @@ -31,7 +31,6 @@ CONFIG_BINFMT_AOUT=m CONFIG_BINFMT_MISC=m CONFIG_PM=y CONFIG_APM_EMULATION=y -CONFIG_PM_RUNTIME=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig index f707cd2691cf..7c2075a07eba 100644 --- a/arch/arm/configs/imx_v6_v7_defconfig +++ b/arch/arm/configs/imx_v6_v7_defconfig @@ -54,7 +54,7 @@ CONFIG_ARM_IMX6Q_CPUFREQ=y CONFIG_VFP=y CONFIG_NEON=y CONFIG_BINFMT_MISC=m -CONFIG_PM_RUNTIME=y +CONFIG_PM=y CONFIG_PM_DEBUG=y CONFIG_PM_TEST_SUSPEND=y CONFIG_NET=y diff --git a/arch/arm/configs/keystone_defconfig b/arch/arm/configs/keystone_defconfig index 20a3ff99fae2..a2067cbfe173 100644 --- a/arch/arm/configs/keystone_defconfig +++ b/arch/arm/configs/keystone_defconfig @@ -30,7 +30,7 @@ CONFIG_HIGHMEM=y CONFIG_VFP=y CONFIG_NEON=y # CONFIG_SUSPEND is not set -CONFIG_PM_RUNTIME=y +CONFIG_PM=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y diff --git a/arch/arm/configs/kzm9g_defconfig b/arch/arm/configs/kzm9g_defconfig index 8cb115d74fdf..5d63fc5d2d48 100644 --- a/arch/arm/configs/kzm9g_defconfig +++ b/arch/arm/configs/kzm9g_defconfig @@ -43,7 +43,7 @@ CONFIG_KEXEC=y CONFIG_VFP=y CONFIG_NEON=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set -CONFIG_PM_RUNTIME=y +CONFIG_PM=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y diff --git a/arch/arm/configs/lager_defconfig b/arch/arm/configs/lager_defconfig index 929c571ea29b..a82afc916a89 100644 --- a/arch/arm/configs/lager_defconfig +++ b/arch/arm/configs/lager_defconfig @@ -37,7 +37,7 @@ CONFIG_AUTO_ZRELADDR=y CONFIG_VFP=y CONFIG_NEON=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set -CONFIG_PM_RUNTIME=y +CONFIG_PM=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y diff --git a/arch/arm/configs/mackerel_defconfig b/arch/arm/configs/mackerel_defconfig index 57ececba2ae6..05a529311b4d 100644 --- a/arch/arm/configs/mackerel_defconfig +++ b/arch/arm/configs/mackerel_defconfig @@ -28,7 +28,6 @@ CONFIG_KEXEC=y CONFIG_VFP=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_PM=y -CONFIG_PM_RUNTIME=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y diff --git a/arch/arm/configs/marzen_defconfig b/arch/arm/configs/marzen_defconfig index ff91630d34e1..3c8b6d823189 100644 --- a/arch/arm/configs/marzen_defconfig +++ b/arch/arm/configs/marzen_defconfig @@ -33,7 +33,7 @@ CONFIG_ARM_APPENDED_DTB=y CONFIG_VFP=y CONFIG_KEXEC=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set -CONFIG_PM_RUNTIME=y +CONFIG_PM=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig index d7896580f3bb..2328fe752e9c 100644 --- a/arch/arm/configs/multi_v7_defconfig +++ b/arch/arm/configs/multi_v7_defconfig @@ -479,4 +479,4 @@ CONFIG_DEBUG_FS=y CONFIG_MAGIC_SYSRQ=y CONFIG_LOCKUP_DETECTOR=y CONFIG_CRYPTO_DEV_TEGRA_AES=y -CONFIG_GENERIC_CPUFREQ_CPU0=y +CONFIG_CPUFREQ_DT=y diff --git a/arch/arm/configs/omap1_defconfig b/arch/arm/configs/omap1_defconfig index 115cda9f3260..a7dce674f1be 100644 --- a/arch/arm/configs/omap1_defconfig +++ b/arch/arm/configs/omap1_defconfig @@ -63,7 +63,6 @@ CONFIG_FPE_NWFPE=y CONFIG_BINFMT_MISC=y CONFIG_PM=y # CONFIG_SUSPEND is not set -CONFIG_PM_RUNTIME=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig index 3e09286f7ff1..c2c3a852af9f 100644 --- a/arch/arm/configs/omap2plus_defconfig +++ b/arch/arm/configs/omap2plus_defconfig @@ -127,6 +127,8 @@ CONFIG_SRAM=y CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_SCSI_SCAN_ASYNC=y +CONFIG_ATA=y +CONFIG_SATA_AHCI_PLATFORM=y CONFIG_MD=y CONFIG_NETDEVICES=y # CONFIG_NET_VENDOR_ARC is not set diff --git a/arch/arm/configs/prima2_defconfig b/arch/arm/configs/prima2_defconfig index 23591dba47a0..f610230b9c1f 100644 --- a/arch/arm/configs/prima2_defconfig +++ b/arch/arm/configs/prima2_defconfig @@ -18,7 +18,7 @@ CONFIG_PREEMPT=y CONFIG_AEABI=y CONFIG_KEXEC=y CONFIG_BINFMT_MISC=y -CONFIG_PM_RUNTIME=y +CONFIG_PM=y CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_RAM=y diff --git a/arch/arm/configs/sama5_defconfig b/arch/arm/configs/sama5_defconfig index b58fb32770a0..afa24799477a 100644 --- a/arch/arm/configs/sama5_defconfig +++ b/arch/arm/configs/sama5_defconfig @@ -32,7 +32,7 @@ CONFIG_VFP=y CONFIG_NEON=y CONFIG_KERNEL_MODE_NEON=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set -CONFIG_PM_RUNTIME=y +CONFIG_PM=y CONFIG_PM_DEBUG=y CONFIG_PM_ADVANCED_DEBUG=y CONFIG_NET=y diff --git a/arch/arm/configs/shmobile_defconfig b/arch/arm/configs/shmobile_defconfig index 63fb5316ff02..3df6ca0c1d1f 100644 --- a/arch/arm/configs/shmobile_defconfig +++ b/arch/arm/configs/shmobile_defconfig @@ -39,7 +39,7 @@ CONFIG_KEXEC=y CONFIG_VFP=y CONFIG_NEON=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set -CONFIG_PM_RUNTIME=y +CONFIG_PM=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -146,7 +146,6 @@ CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_S35390A=y CONFIG_DMADEVICES=y CONFIG_SH_DMAE=y -CONFIG_RCAR_AUDMAC_PP=y CONFIG_RCAR_DMAC=y # CONFIG_IOMMU_SUPPORT is not set CONFIG_PWM=y @@ -178,5 +177,5 @@ CONFIG_CPU_FREQ_GOV_USERSPACE=y CONFIG_CPU_FREQ_GOV_ONDEMAND=y CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y CONFIG_CPU_THERMAL=y -CONFIG_GENERIC_CPUFREQ_CPU0=y +CONFIG_CPUFREQ_DT=y CONFIG_REGULATOR_DA9210=y diff --git a/arch/arm/configs/sunxi_defconfig b/arch/arm/configs/sunxi_defconfig index f7ac0379850f..7a342d2780a8 100644 --- a/arch/arm/configs/sunxi_defconfig +++ b/arch/arm/configs/sunxi_defconfig @@ -11,7 +11,7 @@ CONFIG_ARM_APPENDED_DTB=y CONFIG_ARM_ATAG_DTB_COMPAT=y CONFIG_VFP=y CONFIG_NEON=y -CONFIG_PM_RUNTIME=y +CONFIG_PM=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y diff --git a/arch/arm/configs/tegra_defconfig b/arch/arm/configs/tegra_defconfig index 40750f93aa83..3ea9c3377ccb 100644 --- a/arch/arm/configs/tegra_defconfig +++ b/arch/arm/configs/tegra_defconfig @@ -46,7 +46,7 @@ CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y CONFIG_CPU_IDLE=y CONFIG_VFP=y CONFIG_NEON=y -CONFIG_PM_RUNTIME=y +CONFIG_PM=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y diff --git a/arch/arm/configs/u8500_defconfig b/arch/arm/configs/u8500_defconfig index d219d6a43238..6a1c9898fd03 100644 --- a/arch/arm/configs/u8500_defconfig +++ b/arch/arm/configs/u8500_defconfig @@ -25,7 +25,7 @@ CONFIG_CPU_IDLE=y CONFIG_ARM_U8500_CPUIDLE=y CONFIG_VFP=y CONFIG_NEON=y -CONFIG_PM_RUNTIME=y +CONFIG_PM=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y diff --git a/arch/arm/configs/vt8500_v6_v7_defconfig b/arch/arm/configs/vt8500_v6_v7_defconfig index 9e7a25639690..1bfaa7bfc392 100644 --- a/arch/arm/configs/vt8500_v6_v7_defconfig +++ b/arch/arm/configs/vt8500_v6_v7_defconfig @@ -16,7 +16,7 @@ CONFIG_ARM_APPENDED_DTB=y CONFIG_ARM_ATAG_DTB_COMPAT=y CONFIG_VFP=y CONFIG_NEON=y -CONFIG_PM_RUNTIME=y +CONFIG_PM=y CONFIG_NET=y CONFIG_UNIX=y CONFIG_INET=y diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index e6e3446abdf6..b52101d37ec7 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -121,13 +121,12 @@ static inline unsigned long dma_max_pfn(struct device *dev) } #define dma_max_pfn(dev) dma_max_pfn(dev) -static inline int set_arch_dma_coherent_ops(struct device *dev) -{ - dev->archdata.dma_coherent = true; - set_dma_ops(dev, &arm_coherent_dma_ops); - return 0; -} -#define set_arch_dma_coherent_ops(dev) set_arch_dma_coherent_ops(dev) +#define arch_setup_dma_ops arch_setup_dma_ops +extern void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, + struct iommu_ops *iommu, bool coherent); + +#define arch_teardown_dma_ops arch_teardown_dma_ops +extern void arch_teardown_dma_ops(struct device *dev); /* do not use this function in a driver */ static inline bool is_device_dma_coherent(struct device *dev) diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index b9db269c6e61..66ce17655bb9 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h @@ -33,6 +33,11 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu); void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); +static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) +{ + vcpu->arch.hcr = HCR_GUEST_MASK; +} + static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu) { return 1; diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 53036e21756b..254e0650e48b 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -150,8 +150,6 @@ struct kvm_vcpu_stat { u32 halt_wakeup; }; -int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, - const struct kvm_vcpu_init *init); int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init); unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index acb0d5712716..63e0ecc04901 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h @@ -52,6 +52,7 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t); void free_boot_hyp_pgd(void); void free_hyp_pgds(void); +void stage2_unmap_vm(struct kvm *kvm); int kvm_alloc_stage2_pgd(struct kvm *kvm); void kvm_free_stage2_pgd(struct kvm *kvm); int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, @@ -161,9 +162,10 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu) } static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva, - unsigned long size) + unsigned long size, + bool ipa_uncached) { - if (!vcpu_has_cache_enabled(vcpu)) + if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached) kvm_flush_dcache_to_poc((void *)hva, size); /* diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index ac4bfae26702..0fa418463f49 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h @@ -120,12 +120,12 @@ static inline int arch_spin_value_unlocked(arch_spinlock_t lock) static inline int arch_spin_is_locked(arch_spinlock_t *lock) { - return !arch_spin_value_unlocked(ACCESS_ONCE(*lock)); + return !arch_spin_value_unlocked(READ_ONCE(*lock)); } static inline int arch_spin_is_contended(arch_spinlock_t *lock) { - struct __raw_tickets tickets = ACCESS_ONCE(lock->tickets); + struct __raw_tickets tickets = READ_ONCE(lock->tickets); return (tickets.next - tickets.owner) > 1; } #define arch_spin_is_contended arch_spin_is_contended diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index e34934f63a49..f7c65adaa428 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -484,7 +484,7 @@ static void armpmu_disable(struct pmu *pmu) armpmu->stop(armpmu); } -#ifdef CONFIG_PM_RUNTIME +#ifdef CONFIG_PM static int armpmu_runtime_resume(struct device *dev) { struct arm_pmu_platdata *plat = dev_get_platdata(dev); diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 8361652b6dab..f9c863911038 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -18,6 +18,7 @@ #include <linux/bootmem.h> #include <linux/seq_file.h> #include <linux/screen_info.h> +#include <linux/of_iommu.h> #include <linux/of_platform.h> #include <linux/init.h> #include <linux/kexec.h> @@ -806,6 +807,7 @@ static int __init customize_machine(void) * machine from the device tree, if no callback is provided, * otherwise we would always need an init_machine callback. */ + of_iommu_init(); if (machine_desc->init_machine) machine_desc->init_machine(); #ifdef CONFIG_OF diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 9e193c8a959e..2d6d91001062 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -213,6 +213,11 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) int err; struct kvm_vcpu *vcpu; + if (irqchip_in_kernel(kvm) && vgic_initialized(kvm)) { + err = -EBUSY; + goto out; + } + vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); if (!vcpu) { err = -ENOMEM; @@ -263,6 +268,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) { /* Force users to call KVM_ARM_VCPU_INIT */ vcpu->arch.target = -1; + bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES); /* Set up the timer */ kvm_timer_vcpu_init(vcpu); @@ -419,6 +425,7 @@ static void update_vttbr(struct kvm *kvm) static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) { + struct kvm *kvm = vcpu->kvm; int ret; if (likely(vcpu->arch.has_run_once)) @@ -427,15 +434,23 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) vcpu->arch.has_run_once = true; /* - * Initialize the VGIC before running a vcpu the first time on - * this VM. + * Map the VGIC hardware resources before running a vcpu the first + * time on this VM. */ - if (unlikely(!vgic_initialized(vcpu->kvm))) { - ret = kvm_vgic_init(vcpu->kvm); + if (unlikely(!vgic_ready(kvm))) { + ret = kvm_vgic_map_resources(kvm); if (ret) return ret; } + /* + * Enable the arch timers only if we have an in-kernel VGIC + * and it has been properly initialized, since we cannot handle + * interrupts from the virtual timer with a userspace gic. + */ + if (irqchip_in_kernel(kvm) && vgic_initialized(kvm)) + kvm_timer_enable(kvm); + return 0; } @@ -649,6 +664,48 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, return -EINVAL; } +static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, + const struct kvm_vcpu_init *init) +{ + unsigned int i; + int phys_target = kvm_target_cpu(); + + if (init->target != phys_target) + return -EINVAL; + + /* + * Secondary and subsequent calls to KVM_ARM_VCPU_INIT must + * use the same target. + */ + if (vcpu->arch.target != -1 && vcpu->arch.target != init->target) + return -EINVAL; + + /* -ENOENT for unknown features, -EINVAL for invalid combinations. */ + for (i = 0; i < sizeof(init->features) * 8; i++) { + bool set = (init->features[i / 32] & (1 << (i % 32))); + + if (set && i >= KVM_VCPU_MAX_FEATURES) + return -ENOENT; + + /* + * Secondary and subsequent calls to KVM_ARM_VCPU_INIT must + * use the same feature set. + */ + if (vcpu->arch.target != -1 && i < KVM_VCPU_MAX_FEATURES && + test_bit(i, vcpu->arch.features) != set) + return -EINVAL; + + if (set) + set_bit(i, vcpu->arch.features); + } + + vcpu->arch.target = phys_target; + + /* Now we know what it is, we can reset it. */ + return kvm_reset_vcpu(vcpu); +} + + static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init) { @@ -659,10 +716,21 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu, return ret; /* + * Ensure a rebooted VM will fault in RAM pages and detect if the + * guest MMU is turned off and flush the caches as needed. + */ + if (vcpu->arch.has_run_once) + stage2_unmap_vm(vcpu->kvm); + + vcpu_reset_hcr(vcpu); + + /* * Handle the "start in power-off" case by marking the VCPU as paused. */ - if (__test_and_clear_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features)) + if (test_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features)) vcpu->arch.pause = true; + else + vcpu->arch.pause = false; return 0; } diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c index cc0b78769bd8..384bab67c462 100644 --- a/arch/arm/kvm/guest.c +++ b/arch/arm/kvm/guest.c @@ -38,7 +38,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) { - vcpu->arch.hcr = HCR_GUEST_MASK; return 0; } @@ -274,31 +273,6 @@ int __attribute_const__ kvm_target_cpu(void) } } -int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, - const struct kvm_vcpu_init *init) -{ - unsigned int i; - - /* We can only cope with guest==host and only on A15/A7 (for now). */ - if (init->target != kvm_target_cpu()) - return -EINVAL; - - vcpu->arch.target = init->target; - bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES); - - /* -ENOENT for unknown features, -EINVAL for invalid combinations. */ - for (i = 0; i < sizeof(init->features) * 8; i++) { - if (test_bit(i, (void *)init->features)) { - if (i >= KVM_VCPU_MAX_FEATURES) - return -ENOENT; - set_bit(i, vcpu->arch.features); - } - } - - /* Now we know what it is, we can reset it. */ - return kvm_reset_vcpu(vcpu); -} - int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init) { int target = kvm_target_cpu(); diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c index 4cb5a93182e9..5d3bfc0eb3f0 100644 --- a/arch/arm/kvm/mmio.c +++ b/arch/arm/kvm/mmio.c @@ -187,15 +187,18 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, } rt = vcpu->arch.mmio_decode.rt; - data = vcpu_data_guest_to_host(vcpu, *vcpu_reg(vcpu, rt), mmio.len); - trace_kvm_mmio((mmio.is_write) ? KVM_TRACE_MMIO_WRITE : - KVM_TRACE_MMIO_READ_UNSATISFIED, - mmio.len, fault_ipa, - (mmio.is_write) ? data : 0); + if (mmio.is_write) { + data = vcpu_data_guest_to_host(vcpu, *vcpu_reg(vcpu, rt), + mmio.len); - if (mmio.is_write) + trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, mmio.len, + fault_ipa, data); mmio_write_buf(mmio.data, mmio.len, data); + } else { + trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, mmio.len, + fault_ipa, 0); + } if (vgic_handle_mmio(vcpu, run, &mmio)) return 1; diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 8664ff17cbbe..1dc9778a00af 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -612,6 +612,71 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) unmap_range(kvm, kvm->arch.pgd, start, size); } +static void stage2_unmap_memslot(struct kvm *kvm, + struct kvm_memory_slot *memslot) +{ + hva_t hva = memslot->userspace_addr; + phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT; + phys_addr_t size = PAGE_SIZE * memslot->npages; + hva_t reg_end = hva + size; + + /* + * A memory region could potentially cover multiple VMAs, and any holes + * between them, so iterate over all of them to find out if we should + * unmap any of them. + * + * +--------------------------------------------+ + * +---------------+----------------+ +----------------+ + * | : VMA 1 | VMA 2 | | VMA 3 : | + * +---------------+----------------+ +----------------+ + * | memory region | + * +--------------------------------------------+ + */ + do { + struct vm_area_struct *vma = find_vma(current->mm, hva); + hva_t vm_start, vm_end; + + if (!vma || vma->vm_start >= reg_end) + break; + + /* + * Take the intersection of this VMA with the memory region + */ + vm_start = max(hva, vma->vm_start); + vm_end = min(reg_end, vma->vm_end); + + if (!(vma->vm_flags & VM_PFNMAP)) { + gpa_t gpa = addr + (vm_start - memslot->userspace_addr); + unmap_stage2_range(kvm, gpa, vm_end - vm_start); + } + hva = vm_end; + } while (hva < reg_end); +} + +/** + * stage2_unmap_vm - Unmap Stage-2 RAM mappings + * @kvm: The struct kvm pointer + * + * Go through the memregions and unmap any reguler RAM + * backing memory already mapped to the VM. + */ +void stage2_unmap_vm(struct kvm *kvm) +{ + struct kvm_memslots *slots; + struct kvm_memory_slot *memslot; + int idx; + + idx = srcu_read_lock(&kvm->srcu); + spin_lock(&kvm->mmu_lock); + + slots = kvm_memslots(kvm); + kvm_for_each_memslot(memslot, slots) + stage2_unmap_memslot(kvm, memslot); + + spin_unlock(&kvm->mmu_lock); + srcu_read_unlock(&kvm->srcu, idx); +} + /** * kvm_free_stage2_pgd - free all stage-2 tables * @kvm: The KVM struct pointer for the VM. @@ -853,6 +918,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, struct vm_area_struct *vma; pfn_t pfn; pgprot_t mem_type = PAGE_S2; + bool fault_ipa_uncached; write_fault = kvm_is_write_fault(vcpu); if (fault_status == FSC_PERM && !write_fault) { @@ -919,6 +985,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, if (!hugetlb && !force_pte) hugetlb = transparent_hugepage_adjust(&pfn, &fault_ipa); + fault_ipa_uncached = memslot->flags & KVM_MEMSLOT_INCOHERENT; + if (hugetlb) { pmd_t new_pmd = pfn_pmd(pfn, mem_type); new_pmd = pmd_mkhuge(new_pmd); @@ -926,7 +994,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, kvm_set_s2pmd_writable(&new_pmd); kvm_set_pfn_dirty(pfn); } - coherent_cache_guest_page(vcpu, hva & PMD_MASK, PMD_SIZE); + coherent_cache_guest_page(vcpu, hva & PMD_MASK, PMD_SIZE, + fault_ipa_uncached); ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd); } else { pte_t new_pte = pfn_pte(pfn, mem_type); @@ -934,7 +1003,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, kvm_set_s2pte_writable(&new_pte); kvm_set_pfn_dirty(pfn); } - coherent_cache_guest_page(vcpu, hva, PAGE_SIZE); + coherent_cache_guest_page(vcpu, hva, PAGE_SIZE, + fault_ipa_uncached); ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, pgprot_val(mem_type) == pgprot_val(PAGE_S2_DEVICE)); } @@ -1294,11 +1364,12 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, hva = vm_end; } while (hva < reg_end); - if (ret) { - spin_lock(&kvm->mmu_lock); + spin_lock(&kvm->mmu_lock); + if (ret) unmap_stage2_range(kvm, mem->guest_phys_addr, mem->memory_size); - spin_unlock(&kvm->mmu_lock); - } + else + stage2_flush_memslot(kvm, memslot); + spin_unlock(&kvm->mmu_lock); return ret; } @@ -1310,6 +1381,15 @@ void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, unsigned long npages) { + /* + * Readonly memslots are not incoherent with the caches by definition, + * but in practice, they are used mostly to emulate ROMs or NOR flashes + * that the guest may consider devices and hence map as uncached. + * To prevent incoherency issues in these cases, tag all readonly + * regions as incoherent. + */ + if (slot->flags & KVM_MEM_READONLY) + slot->flags |= KVM_MEMSLOT_INCOHERENT; return 0; } diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c index 09cf37737ee2..58cb3248d277 100644 --- a/arch/arm/kvm/psci.c +++ b/arch/arm/kvm/psci.c @@ -15,6 +15,7 @@ * along with this program. If not, see <http://www.gnu.org/licenses/>. */ +#include <linux/preempt.h> #include <linux/kvm_host.h> #include <linux/wait.h> @@ -166,6 +167,23 @@ static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu) static void kvm_prepare_system_event(struct kvm_vcpu *vcpu, u32 type) { + int i; + struct kvm_vcpu *tmp; + + /* + * The KVM ABI specifies that a system event exit may call KVM_RUN + * again and may perform shutdown/reboot at a later time that when the + * actual request is made. Since we are implementing PSCI and a + * caller of PSCI reboot and shutdown expects that the system shuts + * down or reboots immediately, let's make sure that VCPUs are not run + * after this call is handled and before the VCPUs have been + * re-initialized. + */ + kvm_for_each_vcpu(i, tmp, vcpu->kvm) { + tmp->arch.pause = true; + kvm_vcpu_kick(tmp); + } + memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event)); vcpu->run->system_event.type = type; vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; diff --git a/arch/arm/mach-davinci/pm_domain.c b/arch/arm/mach-davinci/pm_domain.c index 6b98413cebd6..641edc313938 100644 --- a/arch/arm/mach-davinci/pm_domain.c +++ b/arch/arm/mach-davinci/pm_domain.c @@ -14,7 +14,7 @@ #include <linux/pm_clock.h> #include <linux/platform_device.h> -#ifdef CONFIG_PM_RUNTIME +#ifdef CONFIG_PM static int davinci_pm_runtime_suspend(struct device *dev) { int ret; diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig index e4a00bafffc1..603820e5aba7 100644 --- a/arch/arm/mach-exynos/Kconfig +++ b/arch/arm/mach-exynos/Kconfig @@ -21,7 +21,7 @@ menuconfig ARCH_EXYNOS select HAVE_S3C_RTC if RTC_CLASS select PINCTRL select PINCTRL_EXYNOS - select PM_GENERIC_DOMAINS if PM_RUNTIME + select PM_GENERIC_DOMAINS if PM select S5P_DEV_MFC select SRAM select MFD_SYSCON diff --git a/arch/arm/mach-keystone/pm_domain.c b/arch/arm/mach-keystone/pm_domain.c index ca79ddac38bc..ef6041e7e675 100644 --- a/arch/arm/mach-keystone/pm_domain.c +++ b/arch/arm/mach-keystone/pm_domain.c @@ -19,7 +19,7 @@ #include <linux/clk-provider.h> #include <linux/of.h> -#ifdef CONFIG_PM_RUNTIME +#ifdef CONFIG_PM static int keystone_pm_runtime_suspend(struct device *dev) { int ret; diff --git a/arch/arm/mach-mmp/Kconfig b/arch/arm/mach-mmp/Kconfig index ebdba87b9671..fdbfadf00c84 100644 --- a/arch/arm/mach-mmp/Kconfig +++ b/arch/arm/mach-mmp/Kconfig @@ -86,11 +86,12 @@ config MACH_GPLUGD config MACH_MMP_DT bool "Support MMP (ARMv5) platforms from device tree" - select CPU_PXA168 - select CPU_PXA910 select USE_OF select PINCTRL select PINCTRL_SINGLE + select COMMON_CLK + select ARCH_HAS_RESET_CONTROLLER + select CPU_MOHAWK help Include support for Marvell MMP2 based platforms using the device tree. Needn't select any other machine while @@ -99,10 +100,12 @@ config MACH_MMP_DT config MACH_MMP2_DT bool "Support MMP2 (ARMv7) platforms from device tree" depends on !CPU_MOHAWK - select CPU_MMP2 select USE_OF select PINCTRL select PINCTRL_SINGLE + select COMMON_CLK + select ARCH_HAS_RESET_CONTROLLER + select CPU_PJ4 help Include support for Marvell MMP2 based platforms using the device tree. @@ -111,21 +114,18 @@ endmenu config CPU_PXA168 bool - select COMMON_CLK select CPU_MOHAWK help Select code specific to PXA168 config CPU_PXA910 bool - select COMMON_CLK select CPU_MOHAWK help Select code specific to PXA910 config CPU_MMP2 bool - select COMMON_CLK select CPU_PJ4 help Select code specific to MMP2. MMP2 is ARMv7 compatible. diff --git a/arch/arm/mach-mmp/mmp-dt.c b/arch/arm/mach-mmp/mmp-dt.c index cca529ceecb7..b2296c9309b8 100644 --- a/arch/arm/mach-mmp/mmp-dt.c +++ b/arch/arm/mach-mmp/mmp-dt.c @@ -11,63 +11,42 @@ #include <linux/irqchip.h> #include <linux/of_platform.h> +#include <linux/clk-provider.h> #include <asm/mach/arch.h> #include <asm/mach/time.h> +#include <asm/hardware/cache-tauros2.h> #include "common.h" extern void __init mmp_dt_init_timer(void); -static const struct of_dev_auxdata pxa168_auxdata_lookup[] __initconst = { - OF_DEV_AUXDATA("mrvl,mmp-uart", 0xd4017000, "pxa2xx-uart.0", NULL), - OF_DEV_AUXDATA("mrvl,mmp-uart", 0xd4018000, "pxa2xx-uart.1", NULL), - OF_DEV_AUXDATA("mrvl,mmp-uart", 0xd4026000, "pxa2xx-uart.2", NULL), - OF_DEV_AUXDATA("mrvl,mmp-twsi", 0xd4011000, "pxa2xx-i2c.0", NULL), - OF_DEV_AUXDATA("mrvl,mmp-twsi", 0xd4025000, "pxa2xx-i2c.1", NULL), - OF_DEV_AUXDATA("marvell,mmp-gpio", 0xd4019000, "mmp-gpio", NULL), - OF_DEV_AUXDATA("mrvl,mmp-rtc", 0xd4010000, "sa1100-rtc", NULL), - {} +static const char *pxa168_dt_board_compat[] __initdata = { + "mrvl,pxa168-aspenite", + NULL, }; -static const struct of_dev_auxdata pxa910_auxdata_lookup[] __initconst = { - OF_DEV_AUXDATA("mrvl,mmp-uart", 0xd4017000, "pxa2xx-uart.0", NULL), - OF_DEV_AUXDATA("mrvl,mmp-uart", 0xd4018000, "pxa2xx-uart.1", NULL), - OF_DEV_AUXDATA("mrvl,mmp-uart", 0xd4036000, "pxa2xx-uart.2", NULL), - OF_DEV_AUXDATA("mrvl,mmp-twsi", 0xd4011000, "pxa2xx-i2c.0", NULL), - OF_DEV_AUXDATA("mrvl,mmp-twsi", 0xd4037000, "pxa2xx-i2c.1", NULL), - OF_DEV_AUXDATA("marvell,mmp-gpio", 0xd4019000, "mmp-gpio", NULL), - OF_DEV_AUXDATA("mrvl,mmp-rtc", 0xd4010000, "sa1100-rtc", NULL), - {} +static const char *pxa910_dt_board_compat[] __initdata = { + "mrvl,pxa910-dkb", + NULL, }; -static void __init pxa168_dt_init(void) -{ - of_platform_populate(NULL, of_default_bus_match_table, - pxa168_auxdata_lookup, NULL); -} - -static void __init pxa910_dt_init(void) +static void __init mmp_init_time(void) { - of_platform_populate(NULL, of_default_bus_match_table, - pxa910_auxdata_lookup, NULL); +#ifdef CONFIG_CACHE_TAUROS2 + tauros2_init(0); +#endif + mmp_dt_init_timer(); + of_clk_init(NULL); } -static const char *mmp_dt_board_compat[] __initdata = { - "mrvl,pxa168-aspenite", - "mrvl,pxa910-dkb", - NULL, -}; - DT_MACHINE_START(PXA168_DT, "Marvell PXA168 (Device Tree Support)") .map_io = mmp_map_io, - .init_time = mmp_dt_init_timer, - .init_machine = pxa168_dt_init, - .dt_compat = mmp_dt_board_compat, + .init_time = mmp_init_time, + .dt_compat = pxa168_dt_board_compat, MACHINE_END DT_MACHINE_START(PXA910_DT, "Marvell PXA910 (Device Tree Support)") .map_io = mmp_map_io, - .init_time = mmp_dt_init_timer, - .init_machine = pxa910_dt_init, - .dt_compat = mmp_dt_board_compat, + .init_time = mmp_init_time, + .dt_compat = pxa910_dt_board_compat, MACHINE_END diff --git a/arch/arm/mach-mmp/mmp2-dt.c b/arch/arm/mach-mmp/mmp2-dt.c index 023cb453f157..998c0f533abc 100644 --- a/arch/arm/mach-mmp/mmp2-dt.c +++ b/arch/arm/mach-mmp/mmp2-dt.c @@ -12,29 +12,22 @@ #include <linux/io.h> #include <linux/irqchip.h> #include <linux/of_platform.h> +#include <linux/clk-provider.h> #include <asm/mach/arch.h> #include <asm/mach/time.h> +#include <asm/hardware/cache-tauros2.h> #include "common.h" extern void __init mmp_dt_init_timer(void); -static const struct of_dev_auxdata mmp2_auxdata_lookup[] __initconst = { - OF_DEV_AUXDATA("mrvl,mmp-uart", 0xd4030000, "pxa2xx-uart.0", NULL), - OF_DEV_AUXDATA("mrvl,mmp-uart", 0xd4017000, "pxa2xx-uart.1", NULL), - OF_DEV_AUXDATA("mrvl,mmp-uart", 0xd4018000, "pxa2xx-uart.2", NULL), - OF_DEV_AUXDATA("mrvl,mmp-uart", 0xd4016000, "pxa2xx-uart.3", NULL), - OF_DEV_AUXDATA("mrvl,mmp-twsi", 0xd4011000, "pxa2xx-i2c.0", NULL), - OF_DEV_AUXDATA("mrvl,mmp-twsi", 0xd4025000, "pxa2xx-i2c.1", NULL), - OF_DEV_AUXDATA("marvell,mmp-gpio", 0xd4019000, "mmp2-gpio", NULL), - OF_DEV_AUXDATA("mrvl,mmp-rtc", 0xd4010000, "sa1100-rtc", NULL), - {} -}; - -static void __init mmp2_dt_init(void) +static void __init mmp_init_time(void) { - of_platform_populate(NULL, of_default_bus_match_table, - mmp2_auxdata_lookup, NULL); +#ifdef CONFIG_CACHE_TAUROS2 + tauros2_init(0); +#endif + mmp_dt_init_timer(); + of_clk_init(NULL); } static const char *mmp2_dt_board_compat[] __initdata = { @@ -44,7 +37,6 @@ static const char *mmp2_dt_board_compat[] __initdata = { DT_MACHINE_START(MMP2_DT, "Marvell MMP2 (Device Tree Support)") .map_io = mmp_map_io, - .init_time = mmp_dt_init_timer, - .init_machine = mmp2_dt_init, + .init_time = mmp_init_time, .dt_compat = mmp2_dt_board_compat, MACHINE_END diff --git a/arch/arm/mach-omap1/pm_bus.c b/arch/arm/mach-omap1/pm_bus.c index 3f2d39672393..c40e209de65c 100644 --- a/arch/arm/mach-omap1/pm_bus.c +++ b/arch/arm/mach-omap1/pm_bus.c @@ -21,7 +21,7 @@ #include "soc.h" -#ifdef CONFIG_PM_RUNTIME +#ifdef CONFIG_PM static int omap1_pm_runtime_suspend(struct device *dev) { int ret; @@ -59,7 +59,7 @@ static struct dev_pm_domain default_pm_domain = { #define OMAP1_PM_DOMAIN (&default_pm_domain) #else #define OMAP1_PM_DOMAIN NULL -#endif /* CONFIG_PM_RUNTIME */ +#endif /* CONFIG_PM */ static struct pm_clk_notifier_block platform_bus_notifier = { .pm_domain = OMAP1_PM_DOMAIN, diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig index f0edec199cd4..6ab656cc4f16 100644 --- a/arch/arm/mach-omap2/Kconfig +++ b/arch/arm/mach-omap2/Kconfig @@ -15,7 +15,7 @@ config ARCH_OMAP3 select ARM_CPU_SUSPEND if PM select OMAP_INTERCONNECT select PM_OPP if PM - select PM_RUNTIME if CPU_IDLE + select PM if CPU_IDLE select SOC_HAS_OMAP2_SDRC config ARCH_OMAP4 @@ -32,7 +32,7 @@ config ARCH_OMAP4 select PL310_ERRATA_588369 if CACHE_L2X0 select PL310_ERRATA_727915 if CACHE_L2X0 select PM_OPP if PM - select PM_RUNTIME if CPU_IDLE + select PM if CPU_IDLE select ARM_ERRATA_754322 select ARM_ERRATA_775420 @@ -103,7 +103,7 @@ config ARCH_OMAP2PLUS_TYPICAL select I2C_OMAP select MENELAUS if ARCH_OMAP2 select NEON if CPU_V7 - select PM_RUNTIME + select PM select REGULATOR select TWL4030_CORE if ARCH_OMAP3 || ARCH_OMAP4 select TWL4030_POWER if ARCH_OMAP3 || ARCH_OMAP4 diff --git a/arch/arm/mach-omap2/cclock3xxx_data.c b/arch/arm/mach-omap2/cclock3xxx_data.c index 5c5ebb4db5f7..644ff3231bb8 100644 --- a/arch/arm/mach-omap2/cclock3xxx_data.c +++ b/arch/arm/mach-omap2/cclock3xxx_data.c @@ -111,6 +111,7 @@ static struct clk dpll3_ck; static const char *dpll3_ck_parent_names[] = { "sys_ck", + "sys_ck", }; static const struct clk_ops dpll3_ck_ops = { @@ -733,6 +734,10 @@ static const char *corex2_fck_parent_names[] = { DEFINE_STRUCT_CLK_HW_OMAP(corex2_fck, NULL); DEFINE_STRUCT_CLK(corex2_fck, corex2_fck_parent_names, core_ck_ops); +static const char *cpefuse_fck_parent_names[] = { + "sys_ck", +}; + static struct clk cpefuse_fck; static struct clk_hw_omap cpefuse_fck_hw = { @@ -744,7 +749,7 @@ static struct clk_hw_omap cpefuse_fck_hw = { .clkdm_name = "core_l4_clkdm", }; -DEFINE_STRUCT_CLK(cpefuse_fck, dpll3_ck_parent_names, aes2_ick_ops); +DEFINE_STRUCT_CLK(cpefuse_fck, cpefuse_fck_parent_names, aes2_ick_ops); static struct clk csi2_96m_fck; @@ -775,7 +780,7 @@ static struct clk_hw_omap d2d_26m_fck_hw = { .clkdm_name = "d2d_clkdm", }; -DEFINE_STRUCT_CLK(d2d_26m_fck, dpll3_ck_parent_names, aes2_ick_ops); +DEFINE_STRUCT_CLK(d2d_26m_fck, cpefuse_fck_parent_names, aes2_ick_ops); static struct clk des1_ick; @@ -1046,7 +1051,7 @@ static struct clk_hw_omap dss2_alwon_fck_hw = { .clkdm_name = "dss_clkdm", }; -DEFINE_STRUCT_CLK(dss2_alwon_fck, dpll3_ck_parent_names, aes2_ick_ops); +DEFINE_STRUCT_CLK(dss2_alwon_fck, cpefuse_fck_parent_names, aes2_ick_ops); static struct clk dss_96m_fck; @@ -1368,7 +1373,7 @@ DEFINE_STRUCT_CLK(gpio1_dbck, gpio1_dbck_parent_names, aes2_ick_ops); static struct clk wkup_l4_ick; DEFINE_STRUCT_CLK_HW_OMAP(wkup_l4_ick, "wkup_clkdm"); -DEFINE_STRUCT_CLK(wkup_l4_ick, dpll3_ck_parent_names, core_l4_ick_ops); +DEFINE_STRUCT_CLK(wkup_l4_ick, cpefuse_fck_parent_names, core_l4_ick_ops); static struct clk gpio1_ick; @@ -1862,7 +1867,7 @@ static struct clk_hw_omap hecc_ck_hw = { .clkdm_name = "core_l3_clkdm", }; -DEFINE_STRUCT_CLK(hecc_ck, dpll3_ck_parent_names, aes2_ick_ops); +DEFINE_STRUCT_CLK(hecc_ck, cpefuse_fck_parent_names, aes2_ick_ops); static struct clk hsotgusb_fck_am35xx; @@ -1875,7 +1880,7 @@ static struct clk_hw_omap hsotgusb_fck_am35xx_hw = { .clkdm_name = "core_l3_clkdm", }; -DEFINE_STRUCT_CLK(hsotgusb_fck_am35xx, dpll3_ck_parent_names, aes2_ick_ops); +DEFINE_STRUCT_CLK(hsotgusb_fck_am35xx, cpefuse_fck_parent_names, aes2_ick_ops); static struct clk hsotgusb_ick_3430es1; @@ -2411,7 +2416,7 @@ static struct clk_hw_omap modem_fck_hw = { .clkdm_name = "d2d_clkdm", }; -DEFINE_STRUCT_CLK(modem_fck, dpll3_ck_parent_names, aes2_ick_ops); +DEFINE_STRUCT_CLK(modem_fck, cpefuse_fck_parent_names, aes2_ick_ops); static struct clk mspro_fck; @@ -2710,7 +2715,7 @@ static struct clk_hw_omap sr1_fck_hw = { .clkdm_name = "wkup_clkdm", }; -DEFINE_STRUCT_CLK(sr1_fck, dpll3_ck_parent_names, aes2_ick_ops); +DEFINE_STRUCT_CLK(sr1_fck, cpefuse_fck_parent_names, aes2_ick_ops); static struct clk sr2_fck; @@ -2724,7 +2729,7 @@ static struct clk_hw_omap sr2_fck_hw = { .clkdm_name = "wkup_clkdm", }; -DEFINE_STRUCT_CLK(sr2_fck, dpll3_ck_parent_names, aes2_ick_ops); +DEFINE_STRUCT_CLK(sr2_fck, cpefuse_fck_parent_names, aes2_ick_ops); static struct clk sr_l4_ick; diff --git a/arch/arm/mach-omap2/clock.h b/arch/arm/mach-omap2/clock.h index 641337c6cde9..a4282e79143e 100644 --- a/arch/arm/mach-omap2/clock.h +++ b/arch/arm/mach-omap2/clock.h @@ -270,8 +270,6 @@ extern const struct clksel_rate div31_1to31_rates[]; extern void __iomem *clk_memmaps[]; -extern int am33xx_clk_init(void); - extern int omap2_clkops_enable_clkdm(struct clk_hw *hw); extern void omap2_clkops_disable_clkdm(struct clk_hw *hw); diff --git a/arch/arm/mach-omap2/dpll3xxx.c b/arch/arm/mach-omap2/dpll3xxx.c index 20e120d071dd..c2da2a0fe5ad 100644 --- a/arch/arm/mach-omap2/dpll3xxx.c +++ b/arch/arm/mach-omap2/dpll3xxx.c @@ -474,7 +474,7 @@ void omap3_noncore_dpll_disable(struct clk_hw *hw) */ long omap3_noncore_dpll_determine_rate(struct clk_hw *hw, unsigned long rate, unsigned long *best_parent_rate, - struct clk **best_parent_clk) + struct clk_hw **best_parent_clk) { struct clk_hw_omap *clk = to_clk_hw_omap(hw); struct dpll_data *dd; @@ -488,10 +488,10 @@ long omap3_noncore_dpll_determine_rate(struct clk_hw *hw, unsigned long rate, if (__clk_get_rate(dd->clk_bypass) == rate && (dd->modes & (1 << DPLL_LOW_POWER_BYPASS))) { - *best_parent_clk = dd->clk_bypass; + *best_parent_clk = __clk_get_hw(dd->clk_bypass); } else { rate = omap2_dpll_round_rate(hw, rate, best_parent_rate); - *best_parent_clk = dd->clk_ref; + *best_parent_clk = __clk_get_hw(dd->clk_ref); } *best_parent_rate = rate; diff --git a/arch/arm/mach-omap2/dpll44xx.c b/arch/arm/mach-omap2/dpll44xx.c index 535822fcf4bb..0e58e5a85d53 100644 --- a/arch/arm/mach-omap2/dpll44xx.c +++ b/arch/arm/mach-omap2/dpll44xx.c @@ -223,7 +223,7 @@ out: */ long omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw, unsigned long rate, unsigned long *best_parent_rate, - struct clk **best_parent_clk) + struct clk_hw **best_parent_clk) { struct clk_hw_omap *clk = to_clk_hw_omap(hw); struct dpll_data *dd; @@ -237,11 +237,11 @@ long omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw, unsigned long rate, if (__clk_get_rate(dd->clk_bypass) == rate && (dd->modes & (1 << DPLL_LOW_POWER_BYPASS))) { - *best_parent_clk = dd->clk_bypass; + *best_parent_clk = __clk_get_hw(dd->clk_bypass); } else { rate = omap4_dpll_regm4xen_round_rate(hw, rate, best_parent_rate); - *best_parent_clk = dd->clk_ref; + *best_parent_clk = __clk_get_hw(dd->clk_ref); } *best_parent_rate = rate; diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c index 53841dea80ea..c25feba05818 100644 --- a/arch/arm/mach-omap2/id.c +++ b/arch/arm/mach-omap2/id.c @@ -471,11 +471,15 @@ void __init omap3xxx_check_revision(void) cpu_rev = "1.0"; break; case 1: - /* FALLTHROUGH */ - default: omap_revision = AM437X_REV_ES1_1; cpu_rev = "1.1"; break; + case 2: + /* FALLTHROUGH */ + default: + omap_revision = AM437X_REV_ES1_2; + cpu_rev = "1.2"; + break; } break; case 0xb8f2: diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c index 4fc838354e31..a1bd6affb508 100644 --- a/arch/arm/mach-omap2/io.c +++ b/arch/arm/mach-omap2/io.c @@ -361,7 +361,7 @@ static void __init omap_hwmod_init_postsetup(void) u8 postsetup_state; /* Set the default postsetup state for all hwmods */ -#ifdef CONFIG_PM_RUNTIME +#ifdef CONFIG_PM postsetup_state = _HWMOD_STATE_IDLE; #else postsetup_state = _HWMOD_STATE_ENABLED; diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c index 8c58b71c2727..be9541e18650 100644 --- a/arch/arm/mach-omap2/omap_device.c +++ b/arch/arm/mach-omap2/omap_device.c @@ -588,7 +588,7 @@ odbs_exit: return ERR_PTR(ret); } -#ifdef CONFIG_PM_RUNTIME +#ifdef CONFIG_PM static int _od_runtime_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); diff --git a/arch/arm/mach-omap2/soc.h b/arch/arm/mach-omap2/soc.h index 4376f59626d1..c1a3b4416311 100644 --- a/arch/arm/mach-omap2/soc.h +++ b/arch/arm/mach-omap2/soc.h @@ -446,6 +446,7 @@ IS_OMAP_TYPE(3430, 0x3430) #define AM437X_CLASS 0x43700000 #define AM437X_REV_ES1_0 (AM437X_CLASS | (0x10 << 8)) #define AM437X_REV_ES1_1 (AM437X_CLASS | (0x11 << 8)) +#define AM437X_REV_ES1_2 (AM437X_CLASS | (0x12 << 8)) #define OMAP443X_CLASS 0x44300044 #define OMAP4430_REV_ES1_0 (OMAP443X_CLASS | (0x10 << 8)) diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index e8907117861e..7864797609b3 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -1947,9 +1947,8 @@ EXPORT_SYMBOL_GPL(arm_iommu_release_mapping); * arm_iommu_create_mapping) * * Attaches specified io address space mapping to the provided device, - * this replaces the dma operations (dma_map_ops pointer) with the - * IOMMU aware version. More than one client might be attached to - * the same io address space mapping. + * More than one client might be attached to the same io address space + * mapping. */ int arm_iommu_attach_device(struct device *dev, struct dma_iommu_mapping *mapping) @@ -1962,7 +1961,6 @@ int arm_iommu_attach_device(struct device *dev, kref_get(&mapping->kref); dev->archdata.mapping = mapping; - set_dma_ops(dev, &iommu_ops); pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev)); return 0; @@ -1974,7 +1972,6 @@ EXPORT_SYMBOL_GPL(arm_iommu_attach_device); * @dev: valid struct device pointer * * Detaches the provided device from a previously attached map. - * This voids the dma operations (dma_map_ops pointer) */ void arm_iommu_detach_device(struct device *dev) { @@ -1989,10 +1986,83 @@ void arm_iommu_detach_device(struct device *dev) iommu_detach_device(mapping->domain, dev); kref_put(&mapping->kref, release_iommu_mapping); dev->archdata.mapping = NULL; - set_dma_ops(dev, NULL); pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev)); } EXPORT_SYMBOL_GPL(arm_iommu_detach_device); -#endif +static struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent) +{ + return coherent ? &iommu_coherent_ops : &iommu_ops; +} + +static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size, + struct iommu_ops *iommu) +{ + struct dma_iommu_mapping *mapping; + + if (!iommu) + return false; + + mapping = arm_iommu_create_mapping(dev->bus, dma_base, size); + if (IS_ERR(mapping)) { + pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n", + size, dev_name(dev)); + return false; + } + + if (arm_iommu_attach_device(dev, mapping)) { + pr_warn("Failed to attached device %s to IOMMU_mapping\n", + dev_name(dev)); + arm_iommu_release_mapping(mapping); + return false; + } + + return true; +} + +static void arm_teardown_iommu_dma_ops(struct device *dev) +{ + struct dma_iommu_mapping *mapping = dev->archdata.mapping; + + arm_iommu_detach_device(dev); + arm_iommu_release_mapping(mapping); +} + +#else + +static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size, + struct iommu_ops *iommu) +{ + return false; +} + +static void arm_teardown_iommu_dma_ops(struct device *dev) { } + +#define arm_get_iommu_dma_map_ops arm_get_dma_map_ops + +#endif /* CONFIG_ARM_DMA_USE_IOMMU */ + +static struct dma_map_ops *arm_get_dma_map_ops(bool coherent) +{ + return coherent ? &arm_coherent_dma_ops : &arm_dma_ops; +} + +void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, + struct iommu_ops *iommu, bool coherent) +{ + struct dma_map_ops *dma_ops; + + dev->archdata.dma_coherent = coherent; + if (arm_setup_iommu_dma_ops(dev, dma_base, size, iommu)) + dma_ops = arm_get_iommu_dma_map_ops(coherent); + else + dma_ops = arm_get_dma_map_ops(coherent); + + set_dma_ops(dev, dma_ops); +} + +void arch_teardown_dma_ops(struct device *dev) +{ + arm_teardown_iommu_dma_ops(dev); +} diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 688db03ef5b8..b1f9a20a3677 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -14,7 +14,9 @@ config ARM64 select ARM_ARCH_TIMER select ARM_GIC select AUDIT_ARCH_COMPAT_GENERIC + select ARM_GIC_V2M if PCI_MSI select ARM_GIC_V3 + select ARM_GIC_V3_ITS if PCI_MSI select BUILDTIME_EXTABLE_SORT select CLONE_BACKWARDS select COMMON_CLK diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild index 6b61091c7f4c..55103e50c51b 100644 --- a/arch/arm64/include/asm/Kbuild +++ b/arch/arm64/include/asm/Kbuild @@ -27,6 +27,7 @@ generic-y += local64.h generic-y += mcs_spinlock.h generic-y += mman.h generic-y += msgbuf.h +generic-y += msi.h generic-y += mutex.h generic-y += pci.h generic-y += pci-bridge.h diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 5674a55b5518..8127e45e2637 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -38,6 +38,11 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu); void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); +static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) +{ + vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS; +} + static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu) { return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc; diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 2012c4ba8d67..0b7dfdb931df 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -165,8 +165,6 @@ struct kvm_vcpu_stat { u32 halt_wakeup; }; -int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, - const struct kvm_vcpu_init *init); int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init); unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); @@ -200,6 +198,7 @@ struct kvm_vcpu *kvm_arm_get_running_vcpu(void); struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void); u64 kvm_call_hyp(void *hypfn, ...); +void force_vm_exit(const cpumask_t *mask); int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, int exception_index); diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 0caf7a59f6a1..14a74f136272 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -83,6 +83,7 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t); void free_boot_hyp_pgd(void); void free_hyp_pgds(void); +void stage2_unmap_vm(struct kvm *kvm); int kvm_alloc_stage2_pgd(struct kvm *kvm); void kvm_free_stage2_pgd(struct kvm *kvm); int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, @@ -243,9 +244,10 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu) } static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva, - unsigned long size) + unsigned long size, + bool ipa_uncached) { - if (!vcpu_has_cache_enabled(vcpu)) + if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached) kvm_flush_dcache_to_poc((void *)hva, size); if (!icache_is_aliasing()) { /* PIPT */ diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h index c45b7b1b7197..cee128732435 100644 --- a/arch/arm64/include/asm/spinlock.h +++ b/arch/arm64/include/asm/spinlock.h @@ -99,12 +99,12 @@ static inline int arch_spin_value_unlocked(arch_spinlock_t lock) static inline int arch_spin_is_locked(arch_spinlock_t *lock) { - return !arch_spin_value_unlocked(ACCESS_ONCE(*lock)); + return !arch_spin_value_unlocked(READ_ONCE(*lock)); } static inline int arch_spin_is_contended(arch_spinlock_t *lock) { - arch_spinlock_t lockval = ACCESS_ONCE(*lock); + arch_spinlock_t lockval = READ_ONCE(*lock); return (lockval.next - lockval.owner) > 1; } #define arch_spin_is_contended arch_spin_is_contended diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c index 3425f311c49e..f1dbca7d5c96 100644 --- a/arch/arm64/kernel/psci.c +++ b/arch/arm64/kernel/psci.c @@ -540,6 +540,8 @@ const struct cpu_operations cpu_psci_ops = { .name = "psci", #ifdef CONFIG_CPU_IDLE .cpu_init_idle = cpu_psci_cpu_init_idle, +#endif +#ifdef CONFIG_ARM64_CPU_SUSPEND .cpu_suspend = cpu_psci_cpu_suspend, #endif #ifdef CONFIG_SMP diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index 76794692c20b..9535bd555d1d 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -38,7 +38,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) { - vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS; return 0; } @@ -297,31 +296,6 @@ int __attribute_const__ kvm_target_cpu(void) return -EINVAL; } -int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, - const struct kvm_vcpu_init *init) -{ - unsigned int i; - int phys_target = kvm_target_cpu(); - - if (init->target != phys_target) - return -EINVAL; - - vcpu->arch.target = phys_target; - bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES); - - /* -ENOENT for unknown features, -EINVAL for invalid combinations. */ - for (i = 0; i < sizeof(init->features) * 8; i++) { - if (init->features[i / 32] & (1 << (i % 32))) { - if (i >= KVM_VCPU_MAX_FEATURES) - return -ENOENT; - set_bit(i, vcpu->arch.features); - } - } - - /* Now we know what it is, we can reset it. */ - return kvm_reset_vcpu(vcpu); -} - int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init) { int target = kvm_target_cpu(); diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c index bf69601be546..cf33f33333cc 100644 --- a/arch/arm64/mm/dump.c +++ b/arch/arm64/mm/dump.c @@ -182,9 +182,6 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level, static const char units[] = "KMGTPE"; u64 prot = val & pg_level[level].mask; - if (addr < LOWEST_ADDR) - return; - if (!st->level) { st->level = level; st->current_prot = prot; @@ -272,7 +269,7 @@ static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start) static void walk_pgd(struct pg_state *st, struct mm_struct *mm, unsigned long start) { - pgd_t *pgd = pgd_offset(mm, 0); + pgd_t *pgd = pgd_offset(mm, 0UL); unsigned i; unsigned long addr; diff --git a/arch/cris/arch-v10/lib/usercopy.c b/arch/cris/arch-v10/lib/usercopy.c index b0a608da7bd1..b964c667aced 100644 --- a/arch/cris/arch-v10/lib/usercopy.c +++ b/arch/cris/arch-v10/lib/usercopy.c @@ -30,8 +30,7 @@ /* Copy to userspace. This is based on the memcpy used for kernel-to-kernel copying; see "string.c". */ -unsigned long -__copy_user (void __user *pdst, const void *psrc, unsigned long pn) +unsigned long __copy_user(void __user *pdst, const void *psrc, unsigned long pn) { /* We want the parameters put in special registers. Make sure the compiler is able to make something useful of this. @@ -187,13 +186,14 @@ __copy_user (void __user *pdst, const void *psrc, unsigned long pn) return retn; } +EXPORT_SYMBOL(__copy_user); /* Copy from user to kernel, zeroing the bytes that were inaccessible in userland. The return-value is the number of bytes that were inaccessible. */ -unsigned long -__copy_user_zeroing(void *pdst, const void __user *psrc, unsigned long pn) +unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, + unsigned long pn) { /* We want the parameters put in special registers. Make sure the compiler is able to make something useful of this. @@ -369,11 +369,10 @@ copy_exception_bytes: return retn + n; } +EXPORT_SYMBOL(__copy_user_zeroing); /* Zero userspace. */ - -unsigned long -__do_clear_user (void __user *pto, unsigned long pn) +unsigned long __do_clear_user(void __user *pto, unsigned long pn) { /* We want the parameters put in special registers. Make sure the compiler is able to make something useful of this. @@ -521,3 +520,4 @@ __do_clear_user (void __user *pto, unsigned long pn) return retn; } +EXPORT_SYMBOL(__do_clear_user); diff --git a/arch/cris/arch-v32/drivers/Kconfig b/arch/cris/arch-v32/drivers/Kconfig index 15a9ed1d579c..4fc16b44fff2 100644 --- a/arch/cris/arch-v32/drivers/Kconfig +++ b/arch/cris/arch-v32/drivers/Kconfig @@ -108,6 +108,7 @@ config ETRAX_AXISFLASHMAP select MTD_JEDECPROBE select MTD_BLOCK select MTD_COMPLEX_MAPPINGS + select MTD_MTDRAM help This option enables MTD mapping of flash devices. Needed to use flash memories. If unsure, say Y. @@ -358,13 +359,6 @@ config ETRAX_SPI_MMC default MMC select SPI select MMC_SPI - select ETRAX_SPI_MMC_BOARD - -# For the parts that can't be a module (due to restrictions in -# framework elsewhere). -config ETRAX_SPI_MMC_BOARD - boolean - default n # While the board info is MMC_SPI only, the drivers are written to be # independent of MMC_SPI, so we'll keep SPI non-dependent on the diff --git a/arch/cris/arch-v32/drivers/Makefile b/arch/cris/arch-v32/drivers/Makefile index 39aa3c117a86..15fbfefced2c 100644 --- a/arch/cris/arch-v32/drivers/Makefile +++ b/arch/cris/arch-v32/drivers/Makefile @@ -10,4 +10,3 @@ obj-$(CONFIG_ETRAX_IOP_FW_LOAD) += iop_fw_load.o obj-$(CONFIG_ETRAX_I2C) += i2c.o obj-$(CONFIG_ETRAX_SYNCHRONOUS_SERIAL) += sync_serial.o obj-$(CONFIG_PCI) += pci/ -obj-$(CONFIG_ETRAX_SPI_MMC_BOARD) += board_mmcspi.o diff --git a/arch/cris/arch-v32/drivers/i2c.h b/arch/cris/arch-v32/drivers/i2c.h index c073cf4ba016..d9cc856f89fb 100644 --- a/arch/cris/arch-v32/drivers/i2c.h +++ b/arch/cris/arch-v32/drivers/i2c.h @@ -2,7 +2,6 @@ #include <linux/init.h> /* High level I2C actions */ -int __init i2c_init(void); int i2c_write(unsigned char theSlave, void *data, size_t nbytes); int i2c_read(unsigned char theSlave, void *data, size_t nbytes); int i2c_writereg(unsigned char theSlave, unsigned char theReg, unsigned char theValue); diff --git a/arch/cris/arch-v32/drivers/sync_serial.c b/arch/cris/arch-v32/drivers/sync_serial.c index 5a149134cfb5..08a313fc2241 100644 --- a/arch/cris/arch-v32/drivers/sync_serial.c +++ b/arch/cris/arch-v32/drivers/sync_serial.c @@ -1,8 +1,7 @@ /* - * Simple synchronous serial port driver for ETRAX FS and Artpec-3. - * - * Copyright (c) 2005 Axis Communications AB + * Simple synchronous serial port driver for ETRAX FS and ARTPEC-3. * + * Copyright (c) 2005, 2008 Axis Communications AB * Author: Mikael Starvik * */ @@ -16,16 +15,17 @@ #include <linux/mutex.h> #include <linux/interrupt.h> #include <linux/poll.h> -#include <linux/init.h> -#include <linux/timer.h> -#include <linux/spinlock.h> +#include <linux/fs.h> +#include <linux/cdev.h> +#include <linux/device.h> #include <linux/wait.h> #include <asm/io.h> -#include <dma.h> +#include <mach/dma.h> #include <pinmux.h> #include <hwregs/reg_rdwr.h> #include <hwregs/sser_defs.h> +#include <hwregs/timer_defs.h> #include <hwregs/dma_defs.h> #include <hwregs/dma.h> #include <hwregs/intr_vect_defs.h> @@ -59,22 +59,23 @@ /* the rest of the data pointed out by Descr1 and set readp to the start */ /* of Descr2 */ -#define SYNC_SERIAL_MAJOR 125 - /* IN_BUFFER_SIZE should be a multiple of 6 to make sure that 24 bit */ /* words can be handled */ -#define IN_BUFFER_SIZE 12288 -#define IN_DESCR_SIZE 256 -#define NBR_IN_DESCR (IN_BUFFER_SIZE/IN_DESCR_SIZE) +#define IN_DESCR_SIZE SSP_INPUT_CHUNK_SIZE +#define NBR_IN_DESCR (8*6) +#define IN_BUFFER_SIZE (IN_DESCR_SIZE * NBR_IN_DESCR) -#define OUT_BUFFER_SIZE 1024*8 #define NBR_OUT_DESCR 8 +#define OUT_BUFFER_SIZE (1024 * NBR_OUT_DESCR) #define DEFAULT_FRAME_RATE 0 #define DEFAULT_WORD_RATE 7 +/* To be removed when we move to pure udev. */ +#define SYNC_SERIAL_MAJOR 125 + /* NOTE: Enabling some debug will likely cause overrun or underrun, - * especially if manual mode is use. + * especially if manual mode is used. */ #define DEBUG(x) #define DEBUGREAD(x) @@ -85,11 +86,28 @@ #define DEBUGTRDMA(x) #define DEBUGOUTBUF(x) -typedef struct sync_port -{ - reg_scope_instances regi_sser; - reg_scope_instances regi_dmain; - reg_scope_instances regi_dmaout; +enum syncser_irq_setup { + no_irq_setup = 0, + dma_irq_setup = 1, + manual_irq_setup = 2, +}; + +struct sync_port { + unsigned long regi_sser; + unsigned long regi_dmain; + unsigned long regi_dmaout; + + /* Interrupt vectors. */ + unsigned long dma_in_intr_vect; /* Used for DMA in. */ + unsigned long dma_out_intr_vect; /* Used for DMA out. */ + unsigned long syncser_intr_vect; /* Used when no DMA. */ + + /* DMA number for in and out. */ + unsigned int dma_in_nbr; + unsigned int dma_out_nbr; + + /* DMA owner. */ + enum dma_owner req_dma; char started; /* 1 if port has been started */ char port_nbr; /* Port 0 or 1 */ @@ -99,22 +117,29 @@ typedef struct sync_port char use_dma; /* 1 if port uses dma */ char tr_running; - char init_irqs; + enum syncser_irq_setup init_irqs; int output; int input; /* Next byte to be read by application */ - volatile unsigned char *volatile readp; + unsigned char *readp; /* Next byte to be written by etrax */ - volatile unsigned char *volatile writep; + unsigned char *writep; unsigned int in_buffer_size; + unsigned int in_buffer_len; unsigned int inbufchunk; - unsigned char out_buffer[OUT_BUFFER_SIZE] __attribute__ ((aligned(32))); - unsigned char in_buffer[IN_BUFFER_SIZE]__attribute__ ((aligned(32))); - unsigned char flip[IN_BUFFER_SIZE] __attribute__ ((aligned(32))); - struct dma_descr_data* next_rx_desc; - struct dma_descr_data* prev_rx_desc; + /* Data buffers for in and output. */ + unsigned char out_buffer[OUT_BUFFER_SIZE] __aligned(32); + unsigned char in_buffer[IN_BUFFER_SIZE] __aligned(32); + unsigned char flip[IN_BUFFER_SIZE] __aligned(32); + struct timespec timestamp[NBR_IN_DESCR]; + struct dma_descr_data *next_rx_desc; + struct dma_descr_data *prev_rx_desc; + + struct timeval last_timestamp; + int read_ts_idx; + int write_ts_idx; /* Pointer to the first available descriptor in the ring, * unless active_tr_descr == catch_tr_descr and a dma @@ -135,114 +160,138 @@ typedef struct sync_port /* Number of bytes currently locked for being read by DMA */ int out_buf_count; - dma_descr_data in_descr[NBR_IN_DESCR] __attribute__ ((__aligned__(16))); - dma_descr_context in_context __attribute__ ((__aligned__(32))); - dma_descr_data out_descr[NBR_OUT_DESCR] - __attribute__ ((__aligned__(16))); - dma_descr_context out_context __attribute__ ((__aligned__(32))); + dma_descr_context in_context __aligned(32); + dma_descr_context out_context __aligned(32); + dma_descr_data in_descr[NBR_IN_DESCR] __aligned(16); + dma_descr_data out_descr[NBR_OUT_DESCR] __aligned(16); + wait_queue_head_t out_wait_q; wait_queue_head_t in_wait_q; spinlock_t lock; -} sync_port; +}; static DEFINE_MUTEX(sync_serial_mutex); static int etrax_sync_serial_init(void); static void initialize_port(int portnbr); static inline int sync_data_avail(struct sync_port *port); -static int sync_serial_open(struct inode *, struct file*); -static int sync_serial_release(struct inode*, struct file*); +static int sync_serial_open(struct inode *, struct file *); +static int sync_serial_release(struct inode *, struct file *); static unsigned int sync_serial_poll(struct file *filp, poll_table *wait); -static int sync_serial_ioctl(struct file *, - unsigned int cmd, unsigned long arg); -static ssize_t sync_serial_write(struct file * file, const char * buf, +static long sync_serial_ioctl(struct file *file, + unsigned int cmd, unsigned long arg); +static int sync_serial_ioctl_unlocked(struct file *file, + unsigned int cmd, unsigned long arg); +static ssize_t sync_serial_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos); -static ssize_t sync_serial_read(struct file *file, char *buf, +static ssize_t sync_serial_read(struct file *file, char __user *buf, size_t count, loff_t *ppos); -#if (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) && \ - defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)) || \ - (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) && \ - defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA)) +#if ((defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) && \ + defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)) || \ + (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) && \ + defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA))) #define SYNC_SER_DMA +#else +#define SYNC_SER_MANUAL #endif -static void send_word(sync_port* port); -static void start_dma_out(struct sync_port *port, const char *data, int count); -static void start_dma_in(sync_port* port); #ifdef SYNC_SER_DMA +static void start_dma_out(struct sync_port *port, const char *data, int count); +static void start_dma_in(struct sync_port *port); static irqreturn_t tr_interrupt(int irq, void *dev_id); static irqreturn_t rx_interrupt(int irq, void *dev_id); #endif - -#if (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) && \ - !defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)) || \ - (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) && \ - !defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA)) -#define SYNC_SER_MANUAL -#endif #ifdef SYNC_SER_MANUAL +static void send_word(struct sync_port *port); static irqreturn_t manual_interrupt(int irq, void *dev_id); #endif -#ifdef CONFIG_ETRAXFS /* ETRAX FS */ -#define OUT_DMA_NBR 4 -#define IN_DMA_NBR 5 -#define PINMUX_SSER pinmux_sser0 -#define SYNCSER_INST regi_sser0 -#define SYNCSER_INTR_VECT SSER0_INTR_VECT -#define OUT_DMA_INST regi_dma4 -#define IN_DMA_INST regi_dma5 -#define DMA_OUT_INTR_VECT DMA4_INTR_VECT -#define DMA_IN_INTR_VECT DMA5_INTR_VECT -#define REQ_DMA_SYNCSER dma_sser0 -#else /* Artpec-3 */ -#define OUT_DMA_NBR 6 -#define IN_DMA_NBR 7 -#define PINMUX_SSER pinmux_sser -#define SYNCSER_INST regi_sser -#define SYNCSER_INTR_VECT SSER_INTR_VECT -#define OUT_DMA_INST regi_dma6 -#define IN_DMA_INST regi_dma7 -#define DMA_OUT_INTR_VECT DMA6_INTR_VECT -#define DMA_IN_INTR_VECT DMA7_INTR_VECT -#define REQ_DMA_SYNCSER dma_sser +#define artpec_pinmux_alloc_fixed crisv32_pinmux_alloc_fixed +#define artpec_request_dma crisv32_request_dma +#define artpec_free_dma crisv32_free_dma + +#ifdef CONFIG_ETRAXFS +/* ETRAX FS */ +#define DMA_OUT_NBR0 SYNC_SER0_TX_DMA_NBR +#define DMA_IN_NBR0 SYNC_SER0_RX_DMA_NBR +#define DMA_OUT_NBR1 SYNC_SER1_TX_DMA_NBR +#define DMA_IN_NBR1 SYNC_SER1_RX_DMA_NBR +#define PINMUX_SSER0 pinmux_sser0 +#define PINMUX_SSER1 pinmux_sser1 +#define SYNCSER_INST0 regi_sser0 +#define SYNCSER_INST1 regi_sser1 +#define SYNCSER_INTR_VECT0 SSER0_INTR_VECT +#define SYNCSER_INTR_VECT1 SSER1_INTR_VECT +#define OUT_DMA_INST0 regi_dma4 +#define IN_DMA_INST0 regi_dma5 +#define DMA_OUT_INTR_VECT0 DMA4_INTR_VECT +#define DMA_OUT_INTR_VECT1 DMA7_INTR_VECT +#define DMA_IN_INTR_VECT0 DMA5_INTR_VECT +#define DMA_IN_INTR_VECT1 DMA6_INTR_VECT +#define REQ_DMA_SYNCSER0 dma_sser0 +#define REQ_DMA_SYNCSER1 dma_sser1 +#if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA) +#define PORT1_DMA 1 +#else +#define PORT1_DMA 0 +#endif +#elif defined(CONFIG_CRIS_MACH_ARTPEC3) +/* ARTPEC-3 */ +#define DMA_OUT_NBR0 SYNC_SER_TX_DMA_NBR +#define DMA_IN_NBR0 SYNC_SER_RX_DMA_NBR +#define PINMUX_SSER0 pinmux_sser +#define SYNCSER_INST0 regi_sser +#define SYNCSER_INTR_VECT0 SSER_INTR_VECT +#define OUT_DMA_INST0 regi_dma6 +#define IN_DMA_INST0 regi_dma7 +#define DMA_OUT_INTR_VECT0 DMA6_INTR_VECT +#define DMA_IN_INTR_VECT0 DMA7_INTR_VECT +#define REQ_DMA_SYNCSER0 dma_sser +#define REQ_DMA_SYNCSER1 dma_sser #endif -/* The ports */ -static struct sync_port ports[]= -{ - { - .regi_sser = SYNCSER_INST, - .regi_dmaout = OUT_DMA_INST, - .regi_dmain = IN_DMA_INST, #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA) - .use_dma = 1, +#define PORT0_DMA 1 #else - .use_dma = 0, +#define PORT0_DMA 0 #endif - } -#ifdef CONFIG_ETRAXFS - , +/* The ports */ +static struct sync_port ports[] = { { - .regi_sser = regi_sser1, - .regi_dmaout = regi_dma6, - .regi_dmain = regi_dma7, -#if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA) - .use_dma = 1, -#else - .use_dma = 0, -#endif - } + .regi_sser = SYNCSER_INST0, + .regi_dmaout = OUT_DMA_INST0, + .regi_dmain = IN_DMA_INST0, + .use_dma = PORT0_DMA, + .dma_in_intr_vect = DMA_IN_INTR_VECT0, + .dma_out_intr_vect = DMA_OUT_INTR_VECT0, + .dma_in_nbr = DMA_IN_NBR0, + .dma_out_nbr = DMA_OUT_NBR0, + .req_dma = REQ_DMA_SYNCSER0, + .syncser_intr_vect = SYNCSER_INTR_VECT0, + }, +#ifdef CONFIG_ETRAXFS + { + .regi_sser = SYNCSER_INST1, + .regi_dmaout = regi_dma6, + .regi_dmain = regi_dma7, + .use_dma = PORT1_DMA, + .dma_in_intr_vect = DMA_IN_INTR_VECT1, + .dma_out_intr_vect = DMA_OUT_INTR_VECT1, + .dma_in_nbr = DMA_IN_NBR1, + .dma_out_nbr = DMA_OUT_NBR1, + .req_dma = REQ_DMA_SYNCSER1, + .syncser_intr_vect = SYNCSER_INTR_VECT1, + }, #endif }; #define NBR_PORTS ARRAY_SIZE(ports) -static const struct file_operations sync_serial_fops = { +static const struct file_operations syncser_fops = { .owner = THIS_MODULE, .write = sync_serial_write, .read = sync_serial_read, @@ -253,61 +302,40 @@ static const struct file_operations sync_serial_fops = { .llseek = noop_llseek, }; -static int __init etrax_sync_serial_init(void) -{ - ports[0].enabled = 0; -#ifdef CONFIG_ETRAXFS - ports[1].enabled = 0; -#endif - if (register_chrdev(SYNC_SERIAL_MAJOR, "sync serial", - &sync_serial_fops) < 0) { - printk(KERN_WARNING - "Unable to get major for synchronous serial port\n"); - return -EBUSY; - } - - /* Initialize Ports */ -#if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) - if (crisv32_pinmux_alloc_fixed(PINMUX_SSER)) { - printk(KERN_WARNING - "Unable to alloc pins for synchronous serial port 0\n"); - return -EIO; - } - ports[0].enabled = 1; - initialize_port(0); -#endif - -#if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) - if (crisv32_pinmux_alloc_fixed(pinmux_sser1)) { - printk(KERN_WARNING - "Unable to alloc pins for synchronous serial port 0\n"); - return -EIO; - } - ports[1].enabled = 1; - initialize_port(1); -#endif +static dev_t syncser_first; +static int minor_count = NBR_PORTS; +#define SYNCSER_NAME "syncser" +static struct cdev *syncser_cdev; +static struct class *syncser_class; -#ifdef CONFIG_ETRAXFS - printk(KERN_INFO "ETRAX FS synchronous serial port driver\n"); -#else - printk(KERN_INFO "Artpec-3 synchronous serial port driver\n"); -#endif - return 0; +static void sync_serial_start_port(struct sync_port *port) +{ + reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg); + reg_sser_rw_tr_cfg tr_cfg = + REG_RD(sser, port->regi_sser, rw_tr_cfg); + reg_sser_rw_rec_cfg rec_cfg = + REG_RD(sser, port->regi_sser, rw_rec_cfg); + cfg.en = regk_sser_yes; + tr_cfg.tr_en = regk_sser_yes; + rec_cfg.rec_en = regk_sser_yes; + REG_WR(sser, port->regi_sser, rw_cfg, cfg); + REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg); + REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg); + port->started = 1; } static void __init initialize_port(int portnbr) { - int __attribute__((unused)) i; struct sync_port *port = &ports[portnbr]; - reg_sser_rw_cfg cfg = {0}; - reg_sser_rw_frm_cfg frm_cfg = {0}; - reg_sser_rw_tr_cfg tr_cfg = {0}; - reg_sser_rw_rec_cfg rec_cfg = {0}; + reg_sser_rw_cfg cfg = { 0 }; + reg_sser_rw_frm_cfg frm_cfg = { 0 }; + reg_sser_rw_tr_cfg tr_cfg = { 0 }; + reg_sser_rw_rec_cfg rec_cfg = { 0 }; - DEBUG(printk(KERN_DEBUG "Init sync serial port %d\n", portnbr)); + DEBUG(pr_info("Init sync serial port %d\n", portnbr)); port->port_nbr = portnbr; - port->init_irqs = 1; + port->init_irqs = no_irq_setup; port->out_rd_ptr = port->out_buffer; port->out_buf_count = 0; @@ -318,10 +346,11 @@ static void __init initialize_port(int portnbr) port->readp = port->flip; port->writep = port->flip; port->in_buffer_size = IN_BUFFER_SIZE; + port->in_buffer_len = 0; port->inbufchunk = IN_DESCR_SIZE; - port->next_rx_desc = &port->in_descr[0]; - port->prev_rx_desc = &port->in_descr[NBR_IN_DESCR-1]; - port->prev_rx_desc->eol = 1; + + port->read_ts_idx = 0; + port->write_ts_idx = 0; init_waitqueue_head(&port->out_wait_q); init_waitqueue_head(&port->in_wait_q); @@ -368,14 +397,18 @@ static void __init initialize_port(int portnbr) REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg); #ifdef SYNC_SER_DMA - /* Setup the descriptor ring for dma out/transmit. */ - for (i = 0; i < NBR_OUT_DESCR; i++) { - port->out_descr[i].wait = 0; - port->out_descr[i].intr = 1; - port->out_descr[i].eol = 0; - port->out_descr[i].out_eop = 0; - port->out_descr[i].next = - (dma_descr_data *)virt_to_phys(&port->out_descr[i+1]); + { + int i; + /* Setup the descriptor ring for dma out/transmit. */ + for (i = 0; i < NBR_OUT_DESCR; i++) { + dma_descr_data *descr = &port->out_descr[i]; + descr->wait = 0; + descr->intr = 1; + descr->eol = 0; + descr->out_eop = 0; + descr->next = + (dma_descr_data *)virt_to_phys(&descr[i+1]); + } } /* Create a ring from the list. */ @@ -391,201 +424,116 @@ static void __init initialize_port(int portnbr) static inline int sync_data_avail(struct sync_port *port) { - int avail; - unsigned char *start; - unsigned char *end; - - start = (unsigned char*)port->readp; /* cast away volatile */ - end = (unsigned char*)port->writep; /* cast away volatile */ - /* 0123456789 0123456789 - * ----- - ----- - * ^rp ^wp ^wp ^rp - */ - - if (end >= start) - avail = end - start; - else - avail = port->in_buffer_size - (start - end); - return avail; -} - -static inline int sync_data_avail_to_end(struct sync_port *port) -{ - int avail; - unsigned char *start; - unsigned char *end; - - start = (unsigned char*)port->readp; /* cast away volatile */ - end = (unsigned char*)port->writep; /* cast away volatile */ - /* 0123456789 0123456789 - * ----- ----- - * ^rp ^wp ^wp ^rp - */ - - if (end >= start) - avail = end - start; - else - avail = port->flip + port->in_buffer_size - start; - return avail; + return port->in_buffer_len; } static int sync_serial_open(struct inode *inode, struct file *file) { + int ret = 0; int dev = iminor(inode); - int ret = -EBUSY; - sync_port *port; - reg_dma_rw_cfg cfg = {.en = regk_dma_yes}; - reg_dma_rw_intr_mask intr_mask = {.data = regk_dma_yes}; + struct sync_port *port; +#ifdef SYNC_SER_DMA + reg_dma_rw_cfg cfg = { .en = regk_dma_yes }; + reg_dma_rw_intr_mask intr_mask = { .data = regk_dma_yes }; +#endif - mutex_lock(&sync_serial_mutex); - DEBUG(printk(KERN_DEBUG "Open sync serial port %d\n", dev)); + DEBUG(pr_debug("Open sync serial port %d\n", dev)); - if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) - { - DEBUG(printk(KERN_DEBUG "Invalid minor %d\n", dev)); - ret = -ENODEV; - goto out; + if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) { + DEBUG(pr_info("Invalid minor %d\n", dev)); + return -ENODEV; } port = &ports[dev]; /* Allow open this device twice (assuming one reader and one writer) */ - if (port->busy == 2) - { - DEBUG(printk(KERN_DEBUG "Device is busy.. \n")); - goto out; + if (port->busy == 2) { + DEBUG(pr_info("syncser%d is busy\n", dev)); + return -EBUSY; } + mutex_lock(&sync_serial_mutex); - if (port->init_irqs) { - if (port->use_dma) { - if (port == &ports[0]) { -#ifdef SYNC_SER_DMA - if (request_irq(DMA_OUT_INTR_VECT, - tr_interrupt, - 0, - "synchronous serial 0 dma tr", - &ports[0])) { - printk(KERN_CRIT "Can't allocate sync serial port 0 IRQ"); - goto out; - } else if (request_irq(DMA_IN_INTR_VECT, - rx_interrupt, - 0, - "synchronous serial 1 dma rx", - &ports[0])) { - free_irq(DMA_OUT_INTR_VECT, &port[0]); - printk(KERN_CRIT "Can't allocate sync serial port 0 IRQ"); - goto out; - } else if (crisv32_request_dma(OUT_DMA_NBR, - "synchronous serial 0 dma tr", - DMA_VERBOSE_ON_ERROR, - 0, - REQ_DMA_SYNCSER)) { - free_irq(DMA_OUT_INTR_VECT, &port[0]); - free_irq(DMA_IN_INTR_VECT, &port[0]); - printk(KERN_CRIT "Can't allocate sync serial port 0 TX DMA channel"); - goto out; - } else if (crisv32_request_dma(IN_DMA_NBR, - "synchronous serial 0 dma rec", - DMA_VERBOSE_ON_ERROR, - 0, - REQ_DMA_SYNCSER)) { - crisv32_free_dma(OUT_DMA_NBR); - free_irq(DMA_OUT_INTR_VECT, &port[0]); - free_irq(DMA_IN_INTR_VECT, &port[0]); - printk(KERN_CRIT "Can't allocate sync serial port 1 RX DMA channel"); - goto out; - } -#endif - } -#ifdef CONFIG_ETRAXFS - else if (port == &ports[1]) { + /* Clear any stale date left in the flip buffer */ + port->readp = port->writep = port->flip; + port->in_buffer_len = 0; + port->read_ts_idx = 0; + port->write_ts_idx = 0; + + if (port->init_irqs != no_irq_setup) { + /* Init only on first call. */ + port->busy++; + mutex_unlock(&sync_serial_mutex); + return 0; + } + if (port->use_dma) { #ifdef SYNC_SER_DMA - if (request_irq(DMA6_INTR_VECT, - tr_interrupt, - 0, - "synchronous serial 1 dma tr", - &ports[1])) { - printk(KERN_CRIT "Can't allocate sync serial port 1 IRQ"); - goto out; - } else if (request_irq(DMA7_INTR_VECT, - rx_interrupt, - 0, - "synchronous serial 1 dma rx", - &ports[1])) { - free_irq(DMA6_INTR_VECT, &ports[1]); - printk(KERN_CRIT "Can't allocate sync serial port 3 IRQ"); - goto out; - } else if (crisv32_request_dma( - SYNC_SER1_TX_DMA_NBR, - "synchronous serial 1 dma tr", - DMA_VERBOSE_ON_ERROR, - 0, - dma_sser1)) { - free_irq(DMA6_INTR_VECT, &ports[1]); - free_irq(DMA7_INTR_VECT, &ports[1]); - printk(KERN_CRIT "Can't allocate sync serial port 3 TX DMA channel"); - goto out; - } else if (crisv32_request_dma( - SYNC_SER1_RX_DMA_NBR, - "synchronous serial 3 dma rec", - DMA_VERBOSE_ON_ERROR, - 0, - dma_sser1)) { - crisv32_free_dma(SYNC_SER1_TX_DMA_NBR); - free_irq(DMA6_INTR_VECT, &ports[1]); - free_irq(DMA7_INTR_VECT, &ports[1]); - printk(KERN_CRIT "Can't allocate sync serial port 3 RX DMA channel"); - goto out; - } -#endif - } + const char *tmp; + DEBUG(pr_info("Using DMA for syncser%d\n", dev)); + + tmp = dev == 0 ? "syncser0 tx" : "syncser1 tx"; + if (request_irq(port->dma_out_intr_vect, tr_interrupt, 0, + tmp, port)) { + pr_err("Can't alloc syncser%d TX IRQ", dev); + ret = -EBUSY; + goto unlock_and_exit; + } + if (artpec_request_dma(port->dma_out_nbr, tmp, + DMA_VERBOSE_ON_ERROR, 0, port->req_dma)) { + free_irq(port->dma_out_intr_vect, port); + pr_err("Can't alloc syncser%d TX DMA", dev); + ret = -EBUSY; + goto unlock_and_exit; + } + tmp = dev == 0 ? "syncser0 rx" : "syncser1 rx"; + if (request_irq(port->dma_in_intr_vect, rx_interrupt, 0, + tmp, port)) { + artpec_free_dma(port->dma_out_nbr); + free_irq(port->dma_out_intr_vect, port); + pr_err("Can't alloc syncser%d RX IRQ", dev); + ret = -EBUSY; + goto unlock_and_exit; + } + if (artpec_request_dma(port->dma_in_nbr, tmp, + DMA_VERBOSE_ON_ERROR, 0, port->req_dma)) { + artpec_free_dma(port->dma_out_nbr); + free_irq(port->dma_out_intr_vect, port); + free_irq(port->dma_in_intr_vect, port); + pr_err("Can't alloc syncser%d RX DMA", dev); + ret = -EBUSY; + goto unlock_and_exit; + } + /* Enable DMAs */ + REG_WR(dma, port->regi_dmain, rw_cfg, cfg); + REG_WR(dma, port->regi_dmaout, rw_cfg, cfg); + /* Enable DMA IRQs */ + REG_WR(dma, port->regi_dmain, rw_intr_mask, intr_mask); + REG_WR(dma, port->regi_dmaout, rw_intr_mask, intr_mask); + /* Set up wordsize = 1 for DMAs. */ + DMA_WR_CMD(port->regi_dmain, regk_dma_set_w_size1); + DMA_WR_CMD(port->regi_dmaout, regk_dma_set_w_size1); + + start_dma_in(port); + port->init_irqs = dma_irq_setup; #endif - /* Enable DMAs */ - REG_WR(dma, port->regi_dmain, rw_cfg, cfg); - REG_WR(dma, port->regi_dmaout, rw_cfg, cfg); - /* Enable DMA IRQs */ - REG_WR(dma, port->regi_dmain, rw_intr_mask, intr_mask); - REG_WR(dma, port->regi_dmaout, rw_intr_mask, intr_mask); - /* Set up wordsize = 1 for DMAs. */ - DMA_WR_CMD (port->regi_dmain, regk_dma_set_w_size1); - DMA_WR_CMD (port->regi_dmaout, regk_dma_set_w_size1); - - start_dma_in(port); - port->init_irqs = 0; - } else { /* !port->use_dma */ + } else { /* !port->use_dma */ #ifdef SYNC_SER_MANUAL - if (port == &ports[0]) { - if (request_irq(SYNCSER_INTR_VECT, - manual_interrupt, - 0, - "synchronous serial manual irq", - &ports[0])) { - printk("Can't allocate sync serial manual irq"); - goto out; - } - } -#ifdef CONFIG_ETRAXFS - else if (port == &ports[1]) { - if (request_irq(SSER1_INTR_VECT, - manual_interrupt, - 0, - "synchronous serial manual irq", - &ports[1])) { - printk(KERN_CRIT "Can't allocate sync serial manual irq"); - goto out; - } - } -#endif - port->init_irqs = 0; + const char *tmp = dev == 0 ? "syncser0 manual irq" : + "syncser1 manual irq"; + if (request_irq(port->syncser_intr_vect, manual_interrupt, + 0, tmp, port)) { + pr_err("Can't alloc syncser%d manual irq", + dev); + ret = -EBUSY; + goto unlock_and_exit; + } + port->init_irqs = manual_irq_setup; #else - panic("sync_serial: Manual mode not supported.\n"); + panic("sync_serial: Manual mode not supported\n"); #endif /* SYNC_SER_MANUAL */ - } - - } /* port->init_irqs */ - + } port->busy++; ret = 0; -out: + +unlock_and_exit: mutex_unlock(&sync_serial_mutex); return ret; } @@ -593,18 +541,17 @@ out: static int sync_serial_release(struct inode *inode, struct file *file) { int dev = iminor(inode); - sync_port *port; + struct sync_port *port; - if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) - { - DEBUG(printk("Invalid minor %d\n", dev)); + if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) { + DEBUG(pr_info("Invalid minor %d\n", dev)); return -ENODEV; } port = &ports[dev]; if (port->busy) port->busy--; if (!port->busy) - /* XXX */ ; + /* XXX */; return 0; } @@ -612,21 +559,15 @@ static unsigned int sync_serial_poll(struct file *file, poll_table *wait) { int dev = iminor(file_inode(file)); unsigned int mask = 0; - sync_port *port; - DEBUGPOLL( static unsigned int prev_mask = 0; ); + struct sync_port *port; + DEBUGPOLL( + static unsigned int prev_mask; + ); port = &ports[dev]; - if (!port->started) { - reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg); - reg_sser_rw_rec_cfg rec_cfg = - REG_RD(sser, port->regi_sser, rw_rec_cfg); - cfg.en = regk_sser_yes; - rec_cfg.rec_en = port->input; - REG_WR(sser, port->regi_sser, rw_cfg, cfg); - REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg); - port->started = 1; - } + if (!port->started) + sync_serial_start_port(port); poll_wait(file, &port->out_wait_q, wait); poll_wait(file, &port->in_wait_q, wait); @@ -645,33 +586,175 @@ static unsigned int sync_serial_poll(struct file *file, poll_table *wait) if (port->input && sync_data_avail(port) >= port->inbufchunk) mask |= POLLIN | POLLRDNORM; - DEBUGPOLL(if (mask != prev_mask) - printk("sync_serial_poll: mask 0x%08X %s %s\n", mask, - mask&POLLOUT?"POLLOUT":"", mask&POLLIN?"POLLIN":""); - prev_mask = mask; - ); + DEBUGPOLL( + if (mask != prev_mask) + pr_info("sync_serial_poll: mask 0x%08X %s %s\n", + mask, + mask & POLLOUT ? "POLLOUT" : "", + mask & POLLIN ? "POLLIN" : ""); + prev_mask = mask; + ); return mask; } -static int sync_serial_ioctl(struct file *file, - unsigned int cmd, unsigned long arg) +static ssize_t __sync_serial_read(struct file *file, + char __user *buf, + size_t count, + loff_t *ppos, + struct timespec *ts) +{ + unsigned long flags; + int dev = MINOR(file->f_dentry->d_inode->i_rdev); + int avail; + struct sync_port *port; + unsigned char *start; + unsigned char *end; + + if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) { + DEBUG(pr_info("Invalid minor %d\n", dev)); + return -ENODEV; + } + port = &ports[dev]; + + if (!port->started) + sync_serial_start_port(port); + + /* Calculate number of available bytes */ + /* Save pointers to avoid that they are modified by interrupt */ + spin_lock_irqsave(&port->lock, flags); + start = port->readp; + end = port->writep; + spin_unlock_irqrestore(&port->lock, flags); + + while ((start == end) && !port->in_buffer_len) { + if (file->f_flags & O_NONBLOCK) + return -EAGAIN; + + wait_event_interruptible(port->in_wait_q, + !(start == end && !port->full)); + + if (signal_pending(current)) + return -EINTR; + + spin_lock_irqsave(&port->lock, flags); + start = port->readp; + end = port->writep; + spin_unlock_irqrestore(&port->lock, flags); + } + + DEBUGREAD(pr_info("R%d c %d ri %u wi %u /%u\n", + dev, count, + start - port->flip, end - port->flip, + port->in_buffer_size)); + + /* Lazy read, never return wrapped data. */ + if (end > start) + avail = end - start; + else + avail = port->flip + port->in_buffer_size - start; + + count = count > avail ? avail : count; + if (copy_to_user(buf, start, count)) + return -EFAULT; + + /* If timestamp requested, find timestamp of first returned byte + * and copy it. + * N.B: Applications that request timstamps MUST read data in + * chunks that are multiples of IN_DESCR_SIZE. + * Otherwise the timestamps will not be aligned to the data read. + */ + if (ts != NULL) { + int idx = port->read_ts_idx; + memcpy(ts, &port->timestamp[idx], sizeof(struct timespec)); + port->read_ts_idx += count / IN_DESCR_SIZE; + if (port->read_ts_idx >= NBR_IN_DESCR) + port->read_ts_idx = 0; + } + + spin_lock_irqsave(&port->lock, flags); + port->readp += count; + /* Check for wrap */ + if (port->readp >= port->flip + port->in_buffer_size) + port->readp = port->flip; + port->in_buffer_len -= count; + port->full = 0; + spin_unlock_irqrestore(&port->lock, flags); + + DEBUGREAD(pr_info("r %d\n", count)); + + return count; +} + +static ssize_t sync_serial_input(struct file *file, unsigned long arg) +{ + struct ssp_request req; + int count; + int ret; + + /* Copy the request structure from user-mode. */ + ret = copy_from_user(&req, (struct ssp_request __user *)arg, + sizeof(struct ssp_request)); + + if (ret) { + DEBUG(pr_info("sync_serial_input copy from user failed\n")); + return -EFAULT; + } + + /* To get the timestamps aligned, make sure that 'len' + * is a multiple of IN_DESCR_SIZE. + */ + if ((req.len % IN_DESCR_SIZE) != 0) { + DEBUG(pr_info("sync_serial: req.len %x, IN_DESCR_SIZE %x\n", + req.len, IN_DESCR_SIZE)); + return -EFAULT; + } + + /* Do the actual read. */ + /* Note that req.buf is actually a pointer to user space. */ + count = __sync_serial_read(file, req.buf, req.len, + NULL, &req.ts); + + if (count < 0) { + DEBUG(pr_info("sync_serial_input read failed\n")); + return count; + } + + /* Copy the request back to user-mode. */ + ret = copy_to_user((struct ssp_request __user *)arg, &req, + sizeof(struct ssp_request)); + + if (ret) { + DEBUG(pr_info("syncser input copy2user failed\n")); + return -EFAULT; + } + + /* Return the number of bytes read. */ + return count; +} + + +static int sync_serial_ioctl_unlocked(struct file *file, + unsigned int cmd, unsigned long arg) { int return_val = 0; int dma_w_size = regk_dma_set_w_size1; int dev = iminor(file_inode(file)); - sync_port *port; + struct sync_port *port; reg_sser_rw_tr_cfg tr_cfg; reg_sser_rw_rec_cfg rec_cfg; reg_sser_rw_frm_cfg frm_cfg; reg_sser_rw_cfg gen_cfg; reg_sser_rw_intr_mask intr_mask; - if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) - { - DEBUG(printk("Invalid minor %d\n", dev)); + if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) { + DEBUG(pr_info("Invalid minor %d\n", dev)); return -1; } - port = &ports[dev]; + + if (cmd == SSP_INPUT) + return sync_serial_input(file, arg); + + port = &ports[dev]; spin_lock_irq(&port->lock); tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg); @@ -680,11 +763,9 @@ static int sync_serial_ioctl(struct file *file, gen_cfg = REG_RD(sser, port->regi_sser, rw_cfg); intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask); - switch(cmd) - { + switch (cmd) { case SSP_SPEED: - if (GET_SPEED(arg) == CODEC) - { + if (GET_SPEED(arg) == CODEC) { unsigned int freq; gen_cfg.base_freq = regk_sser_f32; @@ -701,15 +782,25 @@ static int sync_serial_ioctl(struct file *file, case FREQ_256kHz: gen_cfg.clk_div = 125 * (1 << (freq - FREQ_256kHz)) - 1; - break; + break; case FREQ_512kHz: gen_cfg.clk_div = 62; - break; + break; case FREQ_1MHz: case FREQ_2MHz: case FREQ_4MHz: gen_cfg.clk_div = 8 * (1 << freq) - 1; - break; + break; + } + } else if (GET_SPEED(arg) == CODEC_f32768) { + gen_cfg.base_freq = regk_sser_f32_768; + switch (GET_FREQ(arg)) { + case FREQ_4096kHz: + gen_cfg.clk_div = 7; + break; + default: + spin_unlock_irq(&port->lock); + return -EINVAL; } } else { gen_cfg.base_freq = regk_sser_f29_493; @@ -767,62 +858,64 @@ static int sync_serial_ioctl(struct file *file, break; case SSP_MODE: - switch(arg) - { - case MASTER_OUTPUT: - port->output = 1; - port->input = 0; - frm_cfg.out_on = regk_sser_tr; - frm_cfg.frame_pin_dir = regk_sser_out; - gen_cfg.clk_dir = regk_sser_out; - break; - case SLAVE_OUTPUT: - port->output = 1; - port->input = 0; - frm_cfg.frame_pin_dir = regk_sser_in; - gen_cfg.clk_dir = regk_sser_in; - break; - case MASTER_INPUT: - port->output = 0; - port->input = 1; - frm_cfg.frame_pin_dir = regk_sser_out; - frm_cfg.out_on = regk_sser_intern_tb; - gen_cfg.clk_dir = regk_sser_out; - break; - case SLAVE_INPUT: - port->output = 0; - port->input = 1; - frm_cfg.frame_pin_dir = regk_sser_in; - gen_cfg.clk_dir = regk_sser_in; - break; - case MASTER_BIDIR: - port->output = 1; - port->input = 1; - frm_cfg.frame_pin_dir = regk_sser_out; - frm_cfg.out_on = regk_sser_intern_tb; - gen_cfg.clk_dir = regk_sser_out; - break; - case SLAVE_BIDIR: - port->output = 1; - port->input = 1; - frm_cfg.frame_pin_dir = regk_sser_in; - gen_cfg.clk_dir = regk_sser_in; - break; - default: - spin_unlock_irq(&port->lock); - return -EINVAL; + switch (arg) { + case MASTER_OUTPUT: + port->output = 1; + port->input = 0; + frm_cfg.out_on = regk_sser_tr; + frm_cfg.frame_pin_dir = regk_sser_out; + gen_cfg.clk_dir = regk_sser_out; + break; + case SLAVE_OUTPUT: + port->output = 1; + port->input = 0; + frm_cfg.frame_pin_dir = regk_sser_in; + gen_cfg.clk_dir = regk_sser_in; + break; + case MASTER_INPUT: + port->output = 0; + port->input = 1; + frm_cfg.frame_pin_dir = regk_sser_out; + frm_cfg.out_on = regk_sser_intern_tb; + gen_cfg.clk_dir = regk_sser_out; + break; + case SLAVE_INPUT: + port->output = 0; + port->input = 1; + frm_cfg.frame_pin_dir = regk_sser_in; + gen_cfg.clk_dir = regk_sser_in; + break; + case MASTER_BIDIR: + port->output = 1; + port->input = 1; + frm_cfg.frame_pin_dir = regk_sser_out; + frm_cfg.out_on = regk_sser_intern_tb; + gen_cfg.clk_dir = regk_sser_out; + break; + case SLAVE_BIDIR: + port->output = 1; + port->input = 1; + frm_cfg.frame_pin_dir = regk_sser_in; + gen_cfg.clk_dir = regk_sser_in; + break; + default: + spin_unlock_irq(&port->lock); + return -EINVAL; } - if (!port->use_dma || (arg == MASTER_OUTPUT || arg == SLAVE_OUTPUT)) + if (!port->use_dma || arg == MASTER_OUTPUT || + arg == SLAVE_OUTPUT) intr_mask.rdav = regk_sser_yes; break; case SSP_FRAME_SYNC: if (arg & NORMAL_SYNC) { frm_cfg.rec_delay = 1; frm_cfg.tr_delay = 1; - } - else if (arg & EARLY_SYNC) + } else if (arg & EARLY_SYNC) frm_cfg.rec_delay = frm_cfg.tr_delay = 0; - else if (arg & SECOND_WORD_SYNC) { + else if (arg & LATE_SYNC) { + frm_cfg.tr_delay = 2; + frm_cfg.rec_delay = 2; + } else if (arg & SECOND_WORD_SYNC) { frm_cfg.rec_delay = 7; frm_cfg.tr_delay = 1; } @@ -914,15 +1007,12 @@ static int sync_serial_ioctl(struct file *file, frm_cfg.type = regk_sser_level; frm_cfg.tr_delay = 1; frm_cfg.level = regk_sser_neg_lo; - if (arg & SPI_SLAVE) - { + if (arg & SPI_SLAVE) { rec_cfg.clk_pol = regk_sser_neg; gen_cfg.clk_dir = regk_sser_in; port->input = 1; port->output = 0; - } - else - { + } else { gen_cfg.out_clk_pol = regk_sser_pos; port->input = 0; port->output = 1; @@ -965,19 +1055,19 @@ static int sync_serial_ioctl(struct file *file, } static long sync_serial_ioctl(struct file *file, - unsigned int cmd, unsigned long arg) + unsigned int cmd, unsigned long arg) { - long ret; + long ret; - mutex_lock(&sync_serial_mutex); - ret = sync_serial_ioctl_unlocked(file, cmd, arg); - mutex_unlock(&sync_serial_mutex); + mutex_lock(&sync_serial_mutex); + ret = sync_serial_ioctl_unlocked(file, cmd, arg); + mutex_unlock(&sync_serial_mutex); - return ret; + return ret; } /* NOTE: sync_serial_write does not support concurrency */ -static ssize_t sync_serial_write(struct file *file, const char *buf, +static ssize_t sync_serial_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { int dev = iminor(file_inode(file)); @@ -993,7 +1083,7 @@ static ssize_t sync_serial_write(struct file *file, const char *buf, unsigned char *buf_stop_ptr; /* Last byte + 1 */ if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) { - DEBUG(printk("Invalid minor %d\n", dev)); + DEBUG(pr_info("Invalid minor %d\n", dev)); return -ENODEV; } port = &ports[dev]; @@ -1006,9 +1096,9 @@ static ssize_t sync_serial_write(struct file *file, const char *buf, * |_________|___________________|________________________| * ^ rd_ptr ^ wr_ptr */ - DEBUGWRITE(printk(KERN_DEBUG "W d%d c %lu a: %p c: %p\n", - port->port_nbr, count, port->active_tr_descr, - port->catch_tr_descr)); + DEBUGWRITE(pr_info("W d%d c %u a: %p c: %p\n", + port->port_nbr, count, port->active_tr_descr, + port->catch_tr_descr)); /* Read variables that may be updated by interrupts */ spin_lock_irqsave(&port->lock, flags); @@ -1020,7 +1110,7 @@ static ssize_t sync_serial_write(struct file *file, const char *buf, if (port->tr_running && ((port->use_dma && port->active_tr_descr == port->catch_tr_descr) || out_buf_count >= OUT_BUFFER_SIZE)) { - DEBUGWRITE(printk(KERN_DEBUG "sser%d full\n", dev)); + DEBUGWRITE(pr_info("sser%d full\n", dev)); return -EAGAIN; } @@ -1043,15 +1133,16 @@ static ssize_t sync_serial_write(struct file *file, const char *buf, if (copy_from_user(wr_ptr, buf, trunc_count)) return -EFAULT; - DEBUGOUTBUF(printk(KERN_DEBUG "%-4d + %-4d = %-4d %p %p %p\n", - out_buf_count, trunc_count, - port->out_buf_count, port->out_buffer, - wr_ptr, buf_stop_ptr)); + DEBUGOUTBUF(pr_info("%-4d + %-4d = %-4d %p %p %p\n", + out_buf_count, trunc_count, + port->out_buf_count, port->out_buffer, + wr_ptr, buf_stop_ptr)); /* Make sure transmitter/receiver is running */ if (!port->started) { reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg); - reg_sser_rw_rec_cfg rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg); + reg_sser_rw_rec_cfg rec_cfg = + REG_RD(sser, port->regi_sser, rw_rec_cfg); cfg.en = regk_sser_yes; rec_cfg.rec_en = port->input; REG_WR(sser, port->regi_sser, rw_cfg, cfg); @@ -1068,8 +1159,11 @@ static ssize_t sync_serial_write(struct file *file, const char *buf, spin_lock_irqsave(&port->lock, flags); port->out_buf_count += trunc_count; if (port->use_dma) { +#ifdef SYNC_SER_DMA start_dma_out(port, wr_ptr, trunc_count); +#endif } else if (!port->tr_running) { +#ifdef SYNC_SER_MANUAL reg_sser_rw_intr_mask intr_mask; intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask); /* Start sender by writing data */ @@ -1077,14 +1171,15 @@ static ssize_t sync_serial_write(struct file *file, const char *buf, /* and enable transmitter ready IRQ */ intr_mask.trdy = 1; REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask); +#endif } spin_unlock_irqrestore(&port->lock, flags); /* Exit if non blocking */ if (file->f_flags & O_NONBLOCK) { - DEBUGWRITE(printk(KERN_DEBUG "w d%d c %lu %08x\n", - port->port_nbr, trunc_count, - REG_RD_INT(dma, port->regi_dmaout, r_intr))); + DEBUGWRITE(pr_info("w d%d c %u %08x\n", + port->port_nbr, trunc_count, + REG_RD_INT(dma, port->regi_dmaout, r_intr))); return trunc_count; } @@ -1094,105 +1189,32 @@ static ssize_t sync_serial_write(struct file *file, const char *buf, if (signal_pending(current)) return -EINTR; - DEBUGWRITE(printk(KERN_DEBUG "w d%d c %lu\n", - port->port_nbr, trunc_count)); + DEBUGWRITE(pr_info("w d%d c %u\n", port->port_nbr, trunc_count)); return trunc_count; } -static ssize_t sync_serial_read(struct file * file, char * buf, +static ssize_t sync_serial_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { - int dev = iminor(file_inode(file)); - int avail; - sync_port *port; - unsigned char* start; - unsigned char* end; - unsigned long flags; - - if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) - { - DEBUG(printk("Invalid minor %d\n", dev)); - return -ENODEV; - } - port = &ports[dev]; - - DEBUGREAD(printk("R%d c %d ri %lu wi %lu /%lu\n", dev, count, port->readp - port->flip, port->writep - port->flip, port->in_buffer_size)); - - if (!port->started) - { - reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg); - reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg); - reg_sser_rw_rec_cfg rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg); - cfg.en = regk_sser_yes; - tr_cfg.tr_en = regk_sser_yes; - rec_cfg.rec_en = regk_sser_yes; - REG_WR(sser, port->regi_sser, rw_cfg, cfg); - REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg); - REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg); - port->started = 1; - } - - /* Calculate number of available bytes */ - /* Save pointers to avoid that they are modified by interrupt */ - spin_lock_irqsave(&port->lock, flags); - start = (unsigned char*)port->readp; /* cast away volatile */ - end = (unsigned char*)port->writep; /* cast away volatile */ - spin_unlock_irqrestore(&port->lock, flags); - while ((start == end) && !port->full) /* No data */ - { - DEBUGREAD(printk(KERN_DEBUG "&")); - if (file->f_flags & O_NONBLOCK) - return -EAGAIN; - - wait_event_interruptible(port->in_wait_q, - !(start == end && !port->full)); - if (signal_pending(current)) - return -EINTR; - - spin_lock_irqsave(&port->lock, flags); - start = (unsigned char*)port->readp; /* cast away volatile */ - end = (unsigned char*)port->writep; /* cast away volatile */ - spin_unlock_irqrestore(&port->lock, flags); - } - - /* Lazy read, never return wrapped data. */ - if (port->full) - avail = port->in_buffer_size; - else if (end > start) - avail = end - start; - else - avail = port->flip + port->in_buffer_size - start; - - count = count > avail ? avail : count; - if (copy_to_user(buf, start, count)) - return -EFAULT; - /* Disable interrupts while updating readp */ - spin_lock_irqsave(&port->lock, flags); - port->readp += count; - if (port->readp >= port->flip + port->in_buffer_size) /* Wrap? */ - port->readp = port->flip; - port->full = 0; - spin_unlock_irqrestore(&port->lock, flags); - DEBUGREAD(printk("r %d\n", count)); - return count; + return __sync_serial_read(file, buf, count, ppos, NULL); } -static void send_word(sync_port* port) +#ifdef SYNC_SER_MANUAL +static void send_word(struct sync_port *port) { reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg); reg_sser_rw_tr_data tr_data = {0}; - switch(tr_cfg.sample_size) + switch (tr_cfg.sample_size) { + case 8: + port->out_buf_count--; + tr_data.data = *port->out_rd_ptr++; + REG_WR(sser, port->regi_sser, rw_tr_data, tr_data); + if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE) + port->out_rd_ptr = port->out_buffer; + break; + case 12: { - case 8: - port->out_buf_count--; - tr_data.data = *port->out_rd_ptr++; - REG_WR(sser, port->regi_sser, rw_tr_data, tr_data); - if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE) - port->out_rd_ptr = port->out_buffer; - break; - case 12: - { int data = (*port->out_rd_ptr++) << 8; data |= *port->out_rd_ptr++; port->out_buf_count -= 2; @@ -1200,8 +1222,8 @@ static void send_word(sync_port* port) REG_WR(sser, port->regi_sser, rw_tr_data, tr_data); if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE) port->out_rd_ptr = port->out_buffer; + break; } - break; case 16: port->out_buf_count -= 2; tr_data.data = *(unsigned short *)port->out_rd_ptr; @@ -1233,27 +1255,28 @@ static void send_word(sync_port* port) break; } } +#endif -static void start_dma_out(struct sync_port *port, - const char *data, int count) +#ifdef SYNC_SER_DMA +static void start_dma_out(struct sync_port *port, const char *data, int count) { - port->active_tr_descr->buf = (char *) virt_to_phys((char *) data); + port->active_tr_descr->buf = (char *)virt_to_phys((char *)data); port->active_tr_descr->after = port->active_tr_descr->buf + count; port->active_tr_descr->intr = 1; port->active_tr_descr->eol = 1; port->prev_tr_descr->eol = 0; - DEBUGTRDMA(printk(KERN_DEBUG "Inserting eolr:%p eol@:%p\n", + DEBUGTRDMA(pr_info("Inserting eolr:%p eol@:%p\n", port->prev_tr_descr, port->active_tr_descr)); port->prev_tr_descr = port->active_tr_descr; - port->active_tr_descr = phys_to_virt((int) port->active_tr_descr->next); + port->active_tr_descr = phys_to_virt((int)port->active_tr_descr->next); if (!port->tr_running) { reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg); - port->out_context.next = 0; + port->out_context.next = NULL; port->out_context.saved_data = (dma_descr_data *)virt_to_phys(port->prev_tr_descr); port->out_context.saved_data_buf = port->prev_tr_descr->buf; @@ -1263,57 +1286,58 @@ static void start_dma_out(struct sync_port *port, tr_cfg.tr_en = regk_sser_yes; REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg); - DEBUGTRDMA(printk(KERN_DEBUG "dma s\n");); + DEBUGTRDMA(pr_info(KERN_INFO "dma s\n");); } else { DMA_CONTINUE_DATA(port->regi_dmaout); - DEBUGTRDMA(printk(KERN_DEBUG "dma c\n");); + DEBUGTRDMA(pr_info("dma c\n");); } port->tr_running = 1; } -static void start_dma_in(sync_port *port) +static void start_dma_in(struct sync_port *port) { int i; char *buf; + unsigned long flags; + spin_lock_irqsave(&port->lock, flags); port->writep = port->flip; + spin_unlock_irqrestore(&port->lock, flags); - if (port->writep > port->flip + port->in_buffer_size) { - panic("Offset too large in sync serial driver\n"); - return; - } - buf = (char*)virt_to_phys(port->in_buffer); + buf = (char *)virt_to_phys(port->in_buffer); for (i = 0; i < NBR_IN_DESCR; i++) { port->in_descr[i].buf = buf; port->in_descr[i].after = buf + port->inbufchunk; port->in_descr[i].intr = 1; - port->in_descr[i].next = (dma_descr_data*)virt_to_phys(&port->in_descr[i+1]); + port->in_descr[i].next = + (dma_descr_data *)virt_to_phys(&port->in_descr[i+1]); port->in_descr[i].buf = buf; buf += port->inbufchunk; } /* Link the last descriptor to the first */ - port->in_descr[i-1].next = (dma_descr_data*)virt_to_phys(&port->in_descr[0]); + port->in_descr[i-1].next = + (dma_descr_data *)virt_to_phys(&port->in_descr[0]); port->in_descr[i-1].eol = regk_sser_yes; port->next_rx_desc = &port->in_descr[0]; port->prev_rx_desc = &port->in_descr[NBR_IN_DESCR - 1]; - port->in_context.saved_data = (dma_descr_data*)virt_to_phys(&port->in_descr[0]); + port->in_context.saved_data = + (dma_descr_data *)virt_to_phys(&port->in_descr[0]); port->in_context.saved_data_buf = port->in_descr[0].buf; DMA_START_CONTEXT(port->regi_dmain, virt_to_phys(&port->in_context)); } -#ifdef SYNC_SER_DMA static irqreturn_t tr_interrupt(int irq, void *dev_id) { reg_dma_r_masked_intr masked; - reg_dma_rw_ack_intr ack_intr = {.data = regk_dma_yes}; + reg_dma_rw_ack_intr ack_intr = { .data = regk_dma_yes }; reg_dma_rw_stat stat; int i; int found = 0; int stop_sser = 0; for (i = 0; i < NBR_PORTS; i++) { - sync_port *port = &ports[i]; - if (!port->enabled || !port->use_dma) + struct sync_port *port = &ports[i]; + if (!port->enabled || !port->use_dma) continue; /* IRQ active for the port? */ @@ -1338,19 +1362,20 @@ static irqreturn_t tr_interrupt(int irq, void *dev_id) int sent; sent = port->catch_tr_descr->after - port->catch_tr_descr->buf; - DEBUGTXINT(printk(KERN_DEBUG "%-4d - %-4d = %-4d\t" - "in descr %p (ac: %p)\n", - port->out_buf_count, sent, - port->out_buf_count - sent, - port->catch_tr_descr, - port->active_tr_descr);); + DEBUGTXINT(pr_info("%-4d - %-4d = %-4d\t" + "in descr %p (ac: %p)\n", + port->out_buf_count, sent, + port->out_buf_count - sent, + port->catch_tr_descr, + port->active_tr_descr);); port->out_buf_count -= sent; port->catch_tr_descr = phys_to_virt((int) port->catch_tr_descr->next); port->out_rd_ptr = phys_to_virt((int) port->catch_tr_descr->buf); } else { - int i, sent; + reg_sser_rw_tr_cfg tr_cfg; + int j, sent; /* EOL handler. * Note that if an EOL was encountered during the irq * locked section of sync_ser_write the DMA will be @@ -1358,11 +1383,11 @@ static irqreturn_t tr_interrupt(int irq, void *dev_id) * The remaining descriptors will be traversed by * the descriptor interrupts as usual. */ - i = 0; + j = 0; while (!port->catch_tr_descr->eol) { sent = port->catch_tr_descr->after - port->catch_tr_descr->buf; - DEBUGOUTBUF(printk(KERN_DEBUG + DEBUGOUTBUF(pr_info( "traversing descr %p -%d (%d)\n", port->catch_tr_descr, sent, @@ -1370,16 +1395,15 @@ static irqreturn_t tr_interrupt(int irq, void *dev_id) port->out_buf_count -= sent; port->catch_tr_descr = phys_to_virt( (int)port->catch_tr_descr->next); - i++; - if (i >= NBR_OUT_DESCR) { + j++; + if (j >= NBR_OUT_DESCR) { /* TODO: Reset and recover */ panic("sync_serial: missing eol"); } } sent = port->catch_tr_descr->after - port->catch_tr_descr->buf; - DEBUGOUTBUF(printk(KERN_DEBUG - "eol at descr %p -%d (%d)\n", + DEBUGOUTBUF(pr_info("eol at descr %p -%d (%d)\n", port->catch_tr_descr, sent, port->out_buf_count)); @@ -1394,15 +1418,13 @@ static irqreturn_t tr_interrupt(int irq, void *dev_id) OUT_BUFFER_SIZE) port->out_rd_ptr = port->out_buffer; - reg_sser_rw_tr_cfg tr_cfg = - REG_RD(sser, port->regi_sser, rw_tr_cfg); - DEBUGTXINT(printk(KERN_DEBUG + tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg); + DEBUGTXINT(pr_info( "tr_int DMA stop %d, set catch @ %p\n", port->out_buf_count, port->active_tr_descr)); if (port->out_buf_count != 0) - printk(KERN_CRIT "sync_ser: buffer not " - "empty after eol.\n"); + pr_err("sync_ser: buf not empty after eol\n"); port->catch_tr_descr = port->active_tr_descr; port->tr_running = 0; tr_cfg.tr_en = regk_sser_no; @@ -1414,62 +1436,79 @@ static irqreturn_t tr_interrupt(int irq, void *dev_id) return IRQ_RETVAL(found); } /* tr_interrupt */ + +static inline void handle_rx_packet(struct sync_port *port) +{ + int idx; + reg_dma_rw_ack_intr ack_intr = { .data = regk_dma_yes }; + unsigned long flags; + + DEBUGRXINT(pr_info(KERN_INFO "!")); + spin_lock_irqsave(&port->lock, flags); + + /* If we overrun the user experience is crap regardless if we + * drop new or old data. Its much easier to get it right when + * dropping new data so lets do that. + */ + if ((port->writep + port->inbufchunk <= + port->flip + port->in_buffer_size) && + (port->in_buffer_len + port->inbufchunk < IN_BUFFER_SIZE)) { + memcpy(port->writep, + phys_to_virt((unsigned)port->next_rx_desc->buf), + port->inbufchunk); + port->writep += port->inbufchunk; + if (port->writep >= port->flip + port->in_buffer_size) + port->writep = port->flip; + + /* Timestamp the new data chunk. */ + if (port->write_ts_idx == NBR_IN_DESCR) + port->write_ts_idx = 0; + idx = port->write_ts_idx++; + do_posix_clock_monotonic_gettime(&port->timestamp[idx]); + port->in_buffer_len += port->inbufchunk; + } + spin_unlock_irqrestore(&port->lock, flags); + + port->next_rx_desc->eol = 1; + port->prev_rx_desc->eol = 0; + /* Cache bug workaround */ + flush_dma_descr(port->prev_rx_desc, 0); + port->prev_rx_desc = port->next_rx_desc; + port->next_rx_desc = phys_to_virt((unsigned)port->next_rx_desc->next); + /* Cache bug workaround */ + flush_dma_descr(port->prev_rx_desc, 1); + /* wake up the waiting process */ + wake_up_interruptible(&port->in_wait_q); + DMA_CONTINUE(port->regi_dmain); + REG_WR(dma, port->regi_dmain, rw_ack_intr, ack_intr); + +} + static irqreturn_t rx_interrupt(int irq, void *dev_id) { reg_dma_r_masked_intr masked; - reg_dma_rw_ack_intr ack_intr = {.data = regk_dma_yes}; int i; int found = 0; - for (i = 0; i < NBR_PORTS; i++) - { - sync_port *port = &ports[i]; + DEBUG(pr_info("rx_interrupt\n")); + + for (i = 0; i < NBR_PORTS; i++) { + struct sync_port *port = &ports[i]; - if (!port->enabled || !port->use_dma ) + if (!port->enabled || !port->use_dma) continue; masked = REG_RD(dma, port->regi_dmain, r_masked_intr); - if (masked.data) /* Descriptor interrupt */ - { - found = 1; - while (REG_RD(dma, port->regi_dmain, rw_data) != - virt_to_phys(port->next_rx_desc)) { - DEBUGRXINT(printk(KERN_DEBUG "!")); - if (port->writep + port->inbufchunk > port->flip + port->in_buffer_size) { - int first_size = port->flip + port->in_buffer_size - port->writep; - memcpy((char*)port->writep, phys_to_virt((unsigned)port->next_rx_desc->buf), first_size); - memcpy(port->flip, phys_to_virt((unsigned)port->next_rx_desc->buf+first_size), port->inbufchunk - first_size); - port->writep = port->flip + port->inbufchunk - first_size; - } else { - memcpy((char*)port->writep, - phys_to_virt((unsigned)port->next_rx_desc->buf), - port->inbufchunk); - port->writep += port->inbufchunk; - if (port->writep >= port->flip + port->in_buffer_size) - port->writep = port->flip; - } - if (port->writep == port->readp) - { - port->full = 1; - } - - port->next_rx_desc->eol = 1; - port->prev_rx_desc->eol = 0; - /* Cache bug workaround */ - flush_dma_descr(port->prev_rx_desc, 0); - port->prev_rx_desc = port->next_rx_desc; - port->next_rx_desc = phys_to_virt((unsigned)port->next_rx_desc->next); - /* Cache bug workaround */ - flush_dma_descr(port->prev_rx_desc, 1); - /* wake up the waiting process */ - wake_up_interruptible(&port->in_wait_q); - DMA_CONTINUE(port->regi_dmain); - REG_WR(dma, port->regi_dmain, rw_ack_intr, ack_intr); + if (!masked.data) + continue; - } - } + /* Descriptor interrupt */ + found = 1; + while (REG_RD(dma, port->regi_dmain, rw_data) != + virt_to_phys(port->next_rx_desc)) + handle_rx_packet(port); } return IRQ_RETVAL(found); } /* rx_interrupt */ @@ -1478,75 +1517,83 @@ static irqreturn_t rx_interrupt(int irq, void *dev_id) #ifdef SYNC_SER_MANUAL static irqreturn_t manual_interrupt(int irq, void *dev_id) { + unsigned long flags; int i; int found = 0; reg_sser_r_masked_intr masked; - for (i = 0; i < NBR_PORTS; i++) - { - sync_port *port = &ports[i]; + for (i = 0; i < NBR_PORTS; i++) { + struct sync_port *port = &ports[i]; if (!port->enabled || port->use_dma) - { continue; - } masked = REG_RD(sser, port->regi_sser, r_masked_intr); - if (masked.rdav) /* Data received? */ - { - reg_sser_rw_rec_cfg rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg); - reg_sser_r_rec_data data = REG_RD(sser, port->regi_sser, r_rec_data); + /* Data received? */ + if (masked.rdav) { + reg_sser_rw_rec_cfg rec_cfg = + REG_RD(sser, port->regi_sser, rw_rec_cfg); + reg_sser_r_rec_data data = REG_RD(sser, + port->regi_sser, r_rec_data); found = 1; /* Read data */ - switch(rec_cfg.sample_size) - { + spin_lock_irqsave(&port->lock, flags); + switch (rec_cfg.sample_size) { case 8: *port->writep++ = data.data & 0xff; break; case 12: *port->writep = (data.data & 0x0ff0) >> 4; *(port->writep + 1) = data.data & 0x0f; - port->writep+=2; + port->writep += 2; break; case 16: - *(unsigned short*)port->writep = data.data; - port->writep+=2; + *(unsigned short *)port->writep = data.data; + port->writep += 2; break; case 24: - *(unsigned int*)port->writep = data.data; - port->writep+=3; + *(unsigned int *)port->writep = data.data; + port->writep += 3; break; case 32: - *(unsigned int*)port->writep = data.data; - port->writep+=4; + *(unsigned int *)port->writep = data.data; + port->writep += 4; break; } - if (port->writep >= port->flip + port->in_buffer_size) /* Wrap? */ + /* Wrap? */ + if (port->writep >= port->flip + port->in_buffer_size) port->writep = port->flip; if (port->writep == port->readp) { - /* receive buffer overrun, discard oldest data - */ + /* Receive buf overrun, discard oldest data */ port->readp++; - if (port->readp >= port->flip + port->in_buffer_size) /* Wrap? */ + /* Wrap? */ + if (port->readp >= port->flip + + port->in_buffer_size) port->readp = port->flip; } + spin_unlock_irqrestore(&port->lock, flags); if (sync_data_avail(port) >= port->inbufchunk) - wake_up_interruptible(&port->in_wait_q); /* Wake up application */ + /* Wake up application */ + wake_up_interruptible(&port->in_wait_q); } - if (masked.trdy) /* Transmitter ready? */ - { + /* Transmitter ready? */ + if (masked.trdy) { found = 1; - if (port->out_buf_count > 0) /* More data to send */ + /* More data to send */ + if (port->out_buf_count > 0) send_word(port); - else /* transmission finished */ - { + else { + /* Transmission finished */ reg_sser_rw_intr_mask intr_mask; - intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask); + intr_mask = REG_RD(sser, port->regi_sser, + rw_intr_mask); intr_mask.trdy = 0; - REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask); - wake_up_interruptible(&port->out_wait_q); /* Wake up application */ + REG_WR(sser, port->regi_sser, + rw_intr_mask, intr_mask); + /* Wake up application */ + wake_up_interruptible(&port->out_wait_q); } } } @@ -1554,4 +1601,109 @@ static irqreturn_t manual_interrupt(int irq, void *dev_id) } #endif +static int __init etrax_sync_serial_init(void) +{ +#if 1 + /* This code will be removed when we move to udev for all devices. */ + syncser_first = MKDEV(SYNC_SERIAL_MAJOR, 0); + if (register_chrdev_region(syncser_first, minor_count, SYNCSER_NAME)) { + pr_err("Failed to register major %d\n", SYNC_SERIAL_MAJOR); + return -1; + } +#else + /* Allocate dynamic major number. */ + if (alloc_chrdev_region(&syncser_first, 0, minor_count, SYNCSER_NAME)) { + pr_err("Failed to allocate character device region\n"); + return -1; + } +#endif + syncser_cdev = cdev_alloc(); + if (!syncser_cdev) { + pr_err("Failed to allocate cdev for syncser\n"); + unregister_chrdev_region(syncser_first, minor_count); + return -1; + } + cdev_init(syncser_cdev, &syncser_fops); + + /* Create a sysfs class for syncser */ + syncser_class = class_create(THIS_MODULE, "syncser_class"); + + /* Initialize Ports */ +#if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) + if (artpec_pinmux_alloc_fixed(PINMUX_SSER0)) { + pr_warn("Unable to alloc pins for synchronous serial port 0\n"); + unregister_chrdev_region(syncser_first, minor_count); + return -EIO; + } + initialize_port(0); + ports[0].enabled = 1; + /* Register with sysfs so udev can pick it up. */ + device_create(syncser_class, NULL, syncser_first, NULL, + "%s%d", SYNCSER_NAME, 0); +#endif + +#if defined(CONFIG_ETRAXFS) && defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) + if (artpec_pinmux_alloc_fixed(PINMUX_SSER1)) { + pr_warn("Unable to alloc pins for synchronous serial port 1\n"); + unregister_chrdev_region(syncser_first, minor_count); + class_destroy(syncser_class); + return -EIO; + } + initialize_port(1); + ports[1].enabled = 1; + /* Register with sysfs so udev can pick it up. */ + device_create(syncser_class, NULL, syncser_first, NULL, + "%s%d", SYNCSER_NAME, 0); +#endif + + /* Add it to system */ + if (cdev_add(syncser_cdev, syncser_first, minor_count) < 0) { + pr_err("Failed to add syncser as char device\n"); + device_destroy(syncser_class, syncser_first); + class_destroy(syncser_class); + cdev_del(syncser_cdev); + unregister_chrdev_region(syncser_first, minor_count); + return -1; + } + + + pr_info("ARTPEC synchronous serial port (%s: %d, %d)\n", + SYNCSER_NAME, MAJOR(syncser_first), MINOR(syncser_first)); + + return 0; +} + +static void __exit etrax_sync_serial_exit(void) +{ + int i; + device_destroy(syncser_class, syncser_first); + class_destroy(syncser_class); + + if (syncser_cdev) { + cdev_del(syncser_cdev); + unregister_chrdev_region(syncser_first, minor_count); + } + for (i = 0; i < NBR_PORTS; i++) { + struct sync_port *port = &ports[i]; + if (port->init_irqs == dma_irq_setup) { + /* Free dma irqs and dma channels. */ +#ifdef SYNC_SER_DMA + artpec_free_dma(port->dma_in_nbr); + artpec_free_dma(port->dma_out_nbr); + free_irq(port->dma_out_intr_vect, port); + free_irq(port->dma_in_intr_vect, port); +#endif + } else if (port->init_irqs == manual_irq_setup) { + /* Free manual irq. */ + free_irq(port->syncser_intr_vect, port); + } + } + + pr_info("ARTPEC synchronous serial port unregistered\n"); +} + module_init(etrax_sync_serial_init); +module_exit(etrax_sync_serial_exit); + +MODULE_LICENSE("GPL"); + diff --git a/arch/cris/arch-v32/kernel/debugport.c b/arch/cris/arch-v32/kernel/debugport.c index 610909b003f6..02e33ebe51ec 100644 --- a/arch/cris/arch-v32/kernel/debugport.c +++ b/arch/cris/arch-v32/kernel/debugport.c @@ -3,7 +3,9 @@ */ #include <linux/console.h> +#include <linux/kernel.h> #include <linux/init.h> +#include <linux/string.h> #include <hwregs/reg_rdwr.h> #include <hwregs/reg_map.h> #include <hwregs/ser_defs.h> @@ -65,6 +67,7 @@ struct dbg_port ports[] = }, #endif }; + static struct dbg_port *port = #if defined(CONFIG_ETRAX_DEBUG_PORT0) &ports[0]; @@ -97,14 +100,19 @@ static struct dbg_port *kgdb_port = #endif #endif -static void -start_port(struct dbg_port* p) +static void start_port(struct dbg_port *p) { - if (!p) - return; + /* Set up serial port registers */ + reg_ser_rw_tr_ctrl tr_ctrl = {0}; + reg_ser_rw_tr_dma_en tr_dma_en = {0}; - if (p->started) + reg_ser_rw_rec_ctrl rec_ctrl = {0}; + reg_ser_rw_tr_baud_div tr_baud_div = {0}; + reg_ser_rw_rec_baud_div rec_baud_div = {0}; + + if (!p || p->started) return; + p->started = 1; if (p->nbr == 1) @@ -118,36 +126,24 @@ start_port(struct dbg_port* p) crisv32_pinmux_alloc_fixed(pinmux_ser4); #endif - /* Set up serial port registers */ - reg_ser_rw_tr_ctrl tr_ctrl = {0}; - reg_ser_rw_tr_dma_en tr_dma_en = {0}; - - reg_ser_rw_rec_ctrl rec_ctrl = {0}; - reg_ser_rw_tr_baud_div tr_baud_div = {0}; - reg_ser_rw_rec_baud_div rec_baud_div = {0}; - tr_ctrl.base_freq = rec_ctrl.base_freq = regk_ser_f29_493; tr_dma_en.en = rec_ctrl.dma_mode = regk_ser_no; tr_baud_div.div = rec_baud_div.div = 29493000 / p->baudrate / 8; tr_ctrl.en = rec_ctrl.en = 1; - if (p->parity == 'O') - { + if (p->parity == 'O') { tr_ctrl.par_en = regk_ser_yes; tr_ctrl.par = regk_ser_odd; rec_ctrl.par_en = regk_ser_yes; rec_ctrl.par = regk_ser_odd; - } - else if (p->parity == 'E') - { + } else if (p->parity == 'E') { tr_ctrl.par_en = regk_ser_yes; tr_ctrl.par = regk_ser_even; rec_ctrl.par_en = regk_ser_yes; rec_ctrl.par = regk_ser_odd; } - if (p->bits == 7) - { + if (p->bits == 7) { tr_ctrl.data_bits = regk_ser_bits7; rec_ctrl.data_bits = regk_ser_bits7; } @@ -161,8 +157,7 @@ start_port(struct dbg_port* p) #ifdef CONFIG_ETRAX_KGDB /* Use polling to get a single character from the kernel debug port */ -int -getDebugChar(void) +int getDebugChar(void) { reg_ser_rs_stat_din stat; reg_ser_rw_ack_intr ack_intr = { 0 }; @@ -179,8 +174,7 @@ getDebugChar(void) } /* Use polling to put a single character to the kernel debug port */ -void -putDebugChar(int val) +void putDebugChar(int val) { reg_ser_r_stat_din stat; do { @@ -190,12 +184,48 @@ putDebugChar(int val) } #endif /* CONFIG_ETRAX_KGDB */ +static void __init early_putch(int c) +{ + reg_ser_r_stat_din stat; + /* Wait until transmitter is ready and send. */ + do + stat = REG_RD(ser, port->instance, r_stat_din); + while (!stat.tr_rdy); + REG_WR_INT(ser, port->instance, rw_dout, c); +} + +static void __init +early_console_write(struct console *con, const char *s, unsigned n) +{ + extern void reset_watchdog(void); + int i; + + /* Send data. */ + for (i = 0; i < n; i++) { + /* TODO: the '\n' -> '\n\r' translation should be done at the + receiver. Remove it when the serial driver removes it. */ + if (s[i] == '\n') + early_putch('\r'); + early_putch(s[i]); + reset_watchdog(); + } +} + +static struct console early_console_dev __initdata = { + .name = "early", + .write = early_console_write, + .flags = CON_PRINTBUFFER | CON_BOOT, + .index = -1 +}; + /* Register console for printk's, etc. */ -int __init -init_etrax_debug(void) +int __init init_etrax_debug(void) { start_port(port); + /* Register an early console if a debug port was chosen. */ + register_console(&early_console_dev); + #ifdef CONFIG_ETRAX_KGDB start_port(kgdb_port); #endif /* CONFIG_ETRAX_KGDB */ diff --git a/arch/cris/arch-v32/kernel/time.c b/arch/cris/arch-v32/kernel/time.c index ee66866538f8..eb74dabbeb96 100644 --- a/arch/cris/arch-v32/kernel/time.c +++ b/arch/cris/arch-v32/kernel/time.c @@ -14,6 +14,7 @@ #include <linux/init.h> #include <linux/threads.h> #include <linux/cpufreq.h> +#include <linux/mm.h> #include <asm/types.h> #include <asm/signal.h> #include <asm/io.h> @@ -56,7 +57,6 @@ static int __init etrax_init_cont_rotime(void) } arch_initcall(etrax_init_cont_rotime); - unsigned long timer_regs[NR_CPUS] = { regi_timer0, @@ -68,9 +68,8 @@ unsigned long timer_regs[NR_CPUS] = extern int set_rtc_mmss(unsigned long nowtime); #ifdef CONFIG_CPU_FREQ -static int -cris_time_freq_notifier(struct notifier_block *nb, unsigned long val, - void *data); +static int cris_time_freq_notifier(struct notifier_block *nb, + unsigned long val, void *data); static struct notifier_block cris_time_freq_notifier_block = { .notifier_call = cris_time_freq_notifier, @@ -87,7 +86,6 @@ unsigned long get_ns_in_jiffie(void) return ns; } - /* From timer MDS describing the hardware watchdog: * 4.3.1 Watchdog Operation * The watchdog timer is an 8-bit timer with a configurable start value. @@ -109,11 +107,18 @@ static short int watchdog_key = 42; /* arbitrary 7 bit number */ * is used though, so set this really low. */ #define WATCHDOG_MIN_FREE_PAGES 8 +/* for reliable NICE_DOGGY behaviour */ +static int bite_in_progress; + void reset_watchdog(void) { #if defined(CONFIG_ETRAX_WATCHDOG) reg_timer_rw_wd_ctrl wd_ctrl = { 0 }; +#if defined(CONFIG_ETRAX_WATCHDOG_NICE_DOGGY) + if (unlikely(bite_in_progress)) + return; +#endif /* Only keep watchdog happy as long as we have memory left! */ if(nr_free_pages() > WATCHDOG_MIN_FREE_PAGES) { /* Reset the watchdog with the inverse of the old key */ @@ -148,7 +153,9 @@ void handle_watchdog_bite(struct pt_regs *regs) #if defined(CONFIG_ETRAX_WATCHDOG) extern int cause_of_death; + nmi_enter(); oops_in_progress = 1; + bite_in_progress = 1; printk(KERN_WARNING "Watchdog bite\n"); /* Check if forced restart or unexpected watchdog */ @@ -170,6 +177,7 @@ void handle_watchdog_bite(struct pt_regs *regs) printk(KERN_WARNING "Oops: bitten by watchdog\n"); show_registers(regs); oops_in_progress = 0; + printk("\n"); /* Flush mtdoops. */ #ifndef CONFIG_ETRAX_WATCHDOG_NICE_DOGGY reset_watchdog(); #endif @@ -202,7 +210,7 @@ static inline irqreturn_t timer_interrupt(int irq, void *dev_id) /* Reset watchdog otherwise it resets us! */ reset_watchdog(); - /* Update statistics. */ + /* Update statistics. */ update_process_times(user_mode(regs)); cris_do_profile(regs); /* Save profiling information */ @@ -213,7 +221,7 @@ static inline irqreturn_t timer_interrupt(int irq, void *dev_id) /* Call the real timer interrupt handler */ xtime_update(1); - return IRQ_HANDLED; + return IRQ_HANDLED; } /* Timer is IRQF_SHARED so drivers can add stuff to the timer irq chain. */ @@ -293,14 +301,13 @@ void __init time_init(void) #ifdef CONFIG_CPU_FREQ cpufreq_register_notifier(&cris_time_freq_notifier_block, - CPUFREQ_TRANSITION_NOTIFIER); + CPUFREQ_TRANSITION_NOTIFIER); #endif } #ifdef CONFIG_CPU_FREQ -static int -cris_time_freq_notifier(struct notifier_block *nb, unsigned long val, - void *data) +static int cris_time_freq_notifier(struct notifier_block *nb, + unsigned long val, void *data) { struct cpufreq_freqs *freqs = data; if (val == CPUFREQ_POSTCHANGE) { diff --git a/arch/cris/arch-v32/lib/usercopy.c b/arch/cris/arch-v32/lib/usercopy.c index 0b5b70d5f58a..f0f335d8aa79 100644 --- a/arch/cris/arch-v32/lib/usercopy.c +++ b/arch/cris/arch-v32/lib/usercopy.c @@ -26,8 +26,7 @@ /* Copy to userspace. This is based on the memcpy used for kernel-to-kernel copying; see "string.c". */ -unsigned long -__copy_user (void __user *pdst, const void *psrc, unsigned long pn) +unsigned long __copy_user(void __user *pdst, const void *psrc, unsigned long pn) { /* We want the parameters put in special registers. Make sure the compiler is able to make something useful of this. @@ -155,13 +154,13 @@ __copy_user (void __user *pdst, const void *psrc, unsigned long pn) return retn; } +EXPORT_SYMBOL(__copy_user); /* Copy from user to kernel, zeroing the bytes that were inaccessible in userland. The return-value is the number of bytes that were inaccessible. */ - -unsigned long -__copy_user_zeroing(void *pdst, const void __user *psrc, unsigned long pn) +unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, + unsigned long pn) { /* We want the parameters put in special registers. Make sure the compiler is able to make something useful of this. @@ -321,11 +320,10 @@ copy_exception_bytes: return retn + n; } +EXPORT_SYMBOL(__copy_user_zeroing); /* Zero userspace. */ - -unsigned long -__do_clear_user (void __user *pto, unsigned long pn) +unsigned long __do_clear_user(void __user *pto, unsigned long pn) { /* We want the parameters put in special registers. Make sure the compiler is able to make something useful of this. @@ -468,3 +466,4 @@ __do_clear_user (void __user *pto, unsigned long pn) return retn; } +EXPORT_SYMBOL(__do_clear_user); diff --git a/arch/cris/arch-v32/mach-fs/pinmux.c b/arch/cris/arch-v32/mach-fs/pinmux.c index 38f29eec14a6..05a04708b8eb 100644 --- a/arch/cris/arch-v32/mach-fs/pinmux.c +++ b/arch/cris/arch-v32/mach-fs/pinmux.c @@ -26,7 +26,29 @@ static DEFINE_SPINLOCK(pinmux_lock); static void crisv32_pinmux_set(int port); -int crisv32_pinmux_init(void) +static int __crisv32_pinmux_alloc(int port, int first_pin, int last_pin, + enum pin_mode mode) +{ + int i; + + for (i = first_pin; i <= last_pin; i++) { + if ((pins[port][i] != pinmux_none) + && (pins[port][i] != pinmux_gpio) + && (pins[port][i] != mode)) { +#ifdef DEBUG + panic("Pinmux alloc failed!\n"); +#endif + return -EPERM; + } + } + + for (i = first_pin; i <= last_pin; i++) + pins[port][i] = mode; + + crisv32_pinmux_set(port); +} + +static int crisv32_pinmux_init(void) { static int initialized; @@ -37,20 +59,20 @@ int crisv32_pinmux_init(void) pa.pa0 = pa.pa1 = pa.pa2 = pa.pa3 = pa.pa4 = pa.pa5 = pa.pa6 = pa.pa7 = regk_pinmux_yes; REG_WR(pinmux, regi_pinmux, rw_pa, pa); - crisv32_pinmux_alloc(PORT_B, 0, PORT_PINS - 1, pinmux_gpio); - crisv32_pinmux_alloc(PORT_C, 0, PORT_PINS - 1, pinmux_gpio); - crisv32_pinmux_alloc(PORT_D, 0, PORT_PINS - 1, pinmux_gpio); - crisv32_pinmux_alloc(PORT_E, 0, PORT_PINS - 1, pinmux_gpio); + __crisv32_pinmux_alloc(PORT_B, 0, PORT_PINS - 1, pinmux_gpio); + __crisv32_pinmux_alloc(PORT_C, 0, PORT_PINS - 1, pinmux_gpio); + __crisv32_pinmux_alloc(PORT_D, 0, PORT_PINS - 1, pinmux_gpio); + __crisv32_pinmux_alloc(PORT_E, 0, PORT_PINS - 1, pinmux_gpio); } return 0; } -int -crisv32_pinmux_alloc(int port, int first_pin, int last_pin, enum pin_mode mode) +int crisv32_pinmux_alloc(int port, int first_pin, int last_pin, + enum pin_mode mode) { - int i; unsigned long flags; + int ret; crisv32_pinmux_init(); @@ -59,26 +81,11 @@ crisv32_pinmux_alloc(int port, int first_pin, int last_pin, enum pin_mode mode) spin_lock_irqsave(&pinmux_lock, flags); - for (i = first_pin; i <= last_pin; i++) { - if ((pins[port][i] != pinmux_none) - && (pins[port][i] != pinmux_gpio) - && (pins[port][i] != mode)) { - spin_unlock_irqrestore(&pinmux_lock, flags); -#ifdef DEBUG - panic("Pinmux alloc failed!\n"); -#endif - return -EPERM; - } - } - - for (i = first_pin; i <= last_pin; i++) - pins[port][i] = mode; - - crisv32_pinmux_set(port); + ret = __crisv32_pinmux_alloc(port, first_pin, last_pin, mode); spin_unlock_irqrestore(&pinmux_lock, flags); - return 0; + return ret; } int crisv32_pinmux_alloc_fixed(enum fixed_function function) @@ -98,58 +105,58 @@ int crisv32_pinmux_alloc_fixed(enum fixed_function function) switch (function) { case pinmux_ser1: - ret = crisv32_pinmux_alloc(PORT_C, 4, 7, pinmux_fixed); + ret = __crisv32_pinmux_alloc(PORT_C, 4, 7, pinmux_fixed); hwprot.ser1 = regk_pinmux_yes; break; case pinmux_ser2: - ret = crisv32_pinmux_alloc(PORT_C, 8, 11, pinmux_fixed); + ret = __crisv32_pinmux_alloc(PORT_C, 8, 11, pinmux_fixed); hwprot.ser2 = regk_pinmux_yes; break; case pinmux_ser3: - ret = crisv32_pinmux_alloc(PORT_C, 12, 15, pinmux_fixed); + ret = __crisv32_pinmux_alloc(PORT_C, 12, 15, pinmux_fixed); hwprot.ser3 = regk_pinmux_yes; break; case pinmux_sser0: - ret = crisv32_pinmux_alloc(PORT_C, 0, 3, pinmux_fixed); - ret |= crisv32_pinmux_alloc(PORT_C, 16, 16, pinmux_fixed); + ret = __crisv32_pinmux_alloc(PORT_C, 0, 3, pinmux_fixed); + ret |= __crisv32_pinmux_alloc(PORT_C, 16, 16, pinmux_fixed); hwprot.sser0 = regk_pinmux_yes; break; case pinmux_sser1: - ret = crisv32_pinmux_alloc(PORT_D, 0, 4, pinmux_fixed); + ret = __crisv32_pinmux_alloc(PORT_D, 0, 4, pinmux_fixed); hwprot.sser1 = regk_pinmux_yes; break; case pinmux_ata0: - ret = crisv32_pinmux_alloc(PORT_D, 5, 7, pinmux_fixed); - ret |= crisv32_pinmux_alloc(PORT_D, 15, 17, pinmux_fixed); + ret = __crisv32_pinmux_alloc(PORT_D, 5, 7, pinmux_fixed); + ret |= __crisv32_pinmux_alloc(PORT_D, 15, 17, pinmux_fixed); hwprot.ata0 = regk_pinmux_yes; break; case pinmux_ata1: - ret = crisv32_pinmux_alloc(PORT_D, 0, 4, pinmux_fixed); - ret |= crisv32_pinmux_alloc(PORT_E, 17, 17, pinmux_fixed); + ret = __crisv32_pinmux_alloc(PORT_D, 0, 4, pinmux_fixed); + ret |= __crisv32_pinmux_alloc(PORT_E, 17, 17, pinmux_fixed); hwprot.ata1 = regk_pinmux_yes; break; case pinmux_ata2: - ret = crisv32_pinmux_alloc(PORT_C, 11, 15, pinmux_fixed); - ret |= crisv32_pinmux_alloc(PORT_E, 3, 3, pinmux_fixed); + ret = __crisv32_pinmux_alloc(PORT_C, 11, 15, pinmux_fixed); + ret |= __crisv32_pinmux_alloc(PORT_E, 3, 3, pinmux_fixed); hwprot.ata2 = regk_pinmux_yes; break; case pinmux_ata3: - ret = crisv32_pinmux_alloc(PORT_C, 8, 10, pinmux_fixed); - ret |= crisv32_pinmux_alloc(PORT_C, 0, 2, pinmux_fixed); + ret = __crisv32_pinmux_alloc(PORT_C, 8, 10, pinmux_fixed); + ret |= __crisv32_pinmux_alloc(PORT_C, 0, 2, pinmux_fixed); hwprot.ata2 = regk_pinmux_yes; break; case pinmux_ata: - ret = crisv32_pinmux_alloc(PORT_B, 0, 15, pinmux_fixed); - ret |= crisv32_pinmux_alloc(PORT_D, 8, 15, pinmux_fixed); + ret = __crisv32_pinmux_alloc(PORT_B, 0, 15, pinmux_fixed); + ret |= __crisv32_pinmux_alloc(PORT_D, 8, 15, pinmux_fixed); hwprot.ata = regk_pinmux_yes; break; case pinmux_eth1: - ret = crisv32_pinmux_alloc(PORT_E, 0, 17, pinmux_fixed); + ret = __crisv32_pinmux_alloc(PORT_E, 0, 17, pinmux_fixed); hwprot.eth1 = regk_pinmux_yes; hwprot.eth1_mgm = regk_pinmux_yes; break; case pinmux_timer: - ret = crisv32_pinmux_alloc(PORT_C, 16, 16, pinmux_fixed); + ret = __crisv32_pinmux_alloc(PORT_C, 16, 16, pinmux_fixed); hwprot.timer = regk_pinmux_yes; spin_unlock_irqrestore(&pinmux_lock, flags); return ret; @@ -188,9 +195,19 @@ void crisv32_pinmux_set(int port) #endif } -int crisv32_pinmux_dealloc(int port, int first_pin, int last_pin) +static int __crisv32_pinmux_dealloc(int port, int first_pin, int last_pin) { int i; + + for (i = first_pin; i <= last_pin; i++) + pins[port][i] = pinmux_none; + + crisv32_pinmux_set(port); + return 0; +} + +int crisv32_pinmux_dealloc(int port, int first_pin, int last_pin) +{ unsigned long flags; crisv32_pinmux_init(); @@ -199,11 +216,7 @@ int crisv32_pinmux_dealloc(int port, int first_pin, int last_pin) return -EINVAL; spin_lock_irqsave(&pinmux_lock, flags); - - for (i = first_pin; i <= last_pin; i++) - pins[port][i] = pinmux_none; - - crisv32_pinmux_set(port); + __crisv32_pinmux_dealloc(port, first_pin, last_pin); spin_unlock_irqrestore(&pinmux_lock, flags); return 0; @@ -226,58 +239,58 @@ int crisv32_pinmux_dealloc_fixed(enum fixed_function function) switch (function) { case pinmux_ser1: - ret = crisv32_pinmux_dealloc(PORT_C, 4, 7); + ret = __crisv32_pinmux_dealloc(PORT_C, 4, 7); hwprot.ser1 = regk_pinmux_no; break; case pinmux_ser2: - ret = crisv32_pinmux_dealloc(PORT_C, 8, 11); + ret = __crisv32_pinmux_dealloc(PORT_C, 8, 11); hwprot.ser2 = regk_pinmux_no; break; case pinmux_ser3: - ret = crisv32_pinmux_dealloc(PORT_C, 12, 15); + ret = __crisv32_pinmux_dealloc(PORT_C, 12, 15); hwprot.ser3 = regk_pinmux_no; break; case pinmux_sser0: - ret = crisv32_pinmux_dealloc(PORT_C, 0, 3); - ret |= crisv32_pinmux_dealloc(PORT_C, 16, 16); + ret = __crisv32_pinmux_dealloc(PORT_C, 0, 3); + ret |= __crisv32_pinmux_dealloc(PORT_C, 16, 16); hwprot.sser0 = regk_pinmux_no; break; case pinmux_sser1: - ret = crisv32_pinmux_dealloc(PORT_D, 0, 4); + ret = __crisv32_pinmux_dealloc(PORT_D, 0, 4); hwprot.sser1 = regk_pinmux_no; break; case pinmux_ata0: - ret = crisv32_pinmux_dealloc(PORT_D, 5, 7); - ret |= crisv32_pinmux_dealloc(PORT_D, 15, 17); + ret = __crisv32_pinmux_dealloc(PORT_D, 5, 7); + ret |= __crisv32_pinmux_dealloc(PORT_D, 15, 17); hwprot.ata0 = regk_pinmux_no; break; case pinmux_ata1: - ret = crisv32_pinmux_dealloc(PORT_D, 0, 4); - ret |= crisv32_pinmux_dealloc(PORT_E, 17, 17); + ret = __crisv32_pinmux_dealloc(PORT_D, 0, 4); + ret |= __crisv32_pinmux_dealloc(PORT_E, 17, 17); hwprot.ata1 = regk_pinmux_no; break; case pinmux_ata2: - ret = crisv32_pinmux_dealloc(PORT_C, 11, 15); - ret |= crisv32_pinmux_dealloc(PORT_E, 3, 3); + ret = __crisv32_pinmux_dealloc(PORT_C, 11, 15); + ret |= __crisv32_pinmux_dealloc(PORT_E, 3, 3); hwprot.ata2 = regk_pinmux_no; break; case pinmux_ata3: - ret = crisv32_pinmux_dealloc(PORT_C, 8, 10); - ret |= crisv32_pinmux_dealloc(PORT_C, 0, 2); + ret = __crisv32_pinmux_dealloc(PORT_C, 8, 10); + ret |= __crisv32_pinmux_dealloc(PORT_C, 0, 2); hwprot.ata2 = regk_pinmux_no; break; case pinmux_ata: - ret = crisv32_pinmux_dealloc(PORT_B, 0, 15); - ret |= crisv32_pinmux_dealloc(PORT_D, 8, 15); + ret = __crisv32_pinmux_dealloc(PORT_B, 0, 15); + ret |= __crisv32_pinmux_dealloc(PORT_D, 8, 15); hwprot.ata = regk_pinmux_no; break; case pinmux_eth1: - ret = crisv32_pinmux_dealloc(PORT_E, 0, 17); + ret = __crisv32_pinmux_dealloc(PORT_E, 0, 17); hwprot.eth1 = regk_pinmux_no; hwprot.eth1_mgm = regk_pinmux_no; break; case pinmux_timer: - ret = crisv32_pinmux_dealloc(PORT_C, 16, 16); + ret = __crisv32_pinmux_dealloc(PORT_C, 16, 16); hwprot.timer = regk_pinmux_no; spin_unlock_irqrestore(&pinmux_lock, flags); return ret; @@ -293,7 +306,8 @@ int crisv32_pinmux_dealloc_fixed(enum fixed_function function) return ret; } -void crisv32_pinmux_dump(void) +#ifdef DEBUG +static void crisv32_pinmux_dump(void) { int i, j; @@ -305,5 +319,5 @@ void crisv32_pinmux_dump(void) printk(KERN_DEBUG " Pin %d = %d\n", j, pins[i][j]); } } - +#endif __initcall(crisv32_pinmux_init); diff --git a/arch/cris/include/arch-v32/mach-fs/mach/pinmux.h b/arch/cris/include/arch-v32/mach-fs/mach/pinmux.h index c2b3036779df..09bf0c90d2d3 100644 --- a/arch/cris/include/arch-v32/mach-fs/mach/pinmux.h +++ b/arch/cris/include/arch-v32/mach-fs/mach/pinmux.h @@ -28,11 +28,9 @@ enum fixed_function { pinmux_timer }; -int crisv32_pinmux_init(void); int crisv32_pinmux_alloc(int port, int first_pin, int last_pin, enum pin_mode); int crisv32_pinmux_alloc_fixed(enum fixed_function function); int crisv32_pinmux_dealloc(int port, int first_pin, int last_pin); int crisv32_pinmux_dealloc_fixed(enum fixed_function function); -void crisv32_pinmux_dump(void); #endif diff --git a/arch/cris/include/asm/Kbuild b/arch/cris/include/asm/Kbuild index d5f124832fd1..889f2de050a3 100644 --- a/arch/cris/include/asm/Kbuild +++ b/arch/cris/include/asm/Kbuild @@ -1,8 +1,4 @@ -header-y += arch-v10/ -header-y += arch-v32/ - - generic-y += barrier.h generic-y += clkdev.h generic-y += cputime.h diff --git a/arch/cris/include/uapi/asm/Kbuild b/arch/cris/include/uapi/asm/Kbuild index 7d47b366ad82..01f66b8f15e5 100644 --- a/arch/cris/include/uapi/asm/Kbuild +++ b/arch/cris/include/uapi/asm/Kbuild @@ -1,8 +1,8 @@ # UAPI Header export list include include/uapi/asm-generic/Kbuild.asm -header-y += arch-v10/ -header-y += arch-v32/ +header-y += ../arch-v10/arch/ +header-y += ../arch-v32/arch/ header-y += auxvec.h header-y += bitsperlong.h header-y += byteorder.h diff --git a/arch/cris/kernel/crisksyms.c b/arch/cris/kernel/crisksyms.c index 5868cee20ebd..3908b942fd4c 100644 --- a/arch/cris/kernel/crisksyms.c +++ b/arch/cris/kernel/crisksyms.c @@ -47,16 +47,16 @@ EXPORT_SYMBOL(__negdi2); EXPORT_SYMBOL(__ioremap); EXPORT_SYMBOL(iounmap); -/* Userspace access functions */ -EXPORT_SYMBOL(__copy_user_zeroing); -EXPORT_SYMBOL(__copy_user); - #undef memcpy #undef memset extern void * memset(void *, int, __kernel_size_t); extern void * memcpy(void *, const void *, __kernel_size_t); EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(memset); +#ifdef CONFIG_ETRAX_ARCH_V32 +#undef strcmp +EXPORT_SYMBOL(strcmp); +#endif #ifdef CONFIG_ETRAX_FAST_TIMER /* Fast timer functions */ @@ -66,3 +66,4 @@ EXPORT_SYMBOL(del_fast_timer); EXPORT_SYMBOL(schedule_usleep); #endif EXPORT_SYMBOL(csum_partial); +EXPORT_SYMBOL(csum_partial_copy_from_user); diff --git a/arch/cris/kernel/traps.c b/arch/cris/kernel/traps.c index 0ffda73734f5..da4c72401e27 100644 --- a/arch/cris/kernel/traps.c +++ b/arch/cris/kernel/traps.c @@ -14,6 +14,10 @@ #include <linux/init.h> #include <linux/module.h> +#include <linux/utsname.h> +#ifdef CONFIG_KALLSYMS +#include <linux/kallsyms.h> +#endif #include <asm/pgtable.h> #include <asm/uaccess.h> @@ -34,25 +38,24 @@ static int kstack_depth_to_print = 24; void (*nmi_handler)(struct pt_regs *); -void -show_trace(unsigned long *stack) +void show_trace(unsigned long *stack) { unsigned long addr, module_start, module_end; extern char _stext, _etext; int i; - printk("\nCall Trace: "); + pr_err("\nCall Trace: "); i = 1; module_start = VMALLOC_START; module_end = VMALLOC_END; - while (((long)stack & (THREAD_SIZE-1)) != 0) { + while (((long)stack & (THREAD_SIZE - 1)) != 0) { if (__get_user(addr, stack)) { /* This message matches "failing address" marked s390 in ksymoops, so lines containing it will not be filtered out by ksymoops. */ - printk("Failing address 0x%lx\n", (unsigned long)stack); + pr_err("Failing address 0x%lx\n", (unsigned long)stack); break; } stack++; @@ -68,10 +71,14 @@ show_trace(unsigned long *stack) if (((addr >= (unsigned long)&_stext) && (addr <= (unsigned long)&_etext)) || ((addr >= module_start) && (addr <= module_end))) { +#ifdef CONFIG_KALLSYMS + print_ip_sym(addr); +#else if (i && ((i % 8) == 0)) - printk("\n "); - printk("[<%08lx>] ", addr); + pr_err("\n "); + pr_err("[<%08lx>] ", addr); i++; +#endif } } } @@ -111,21 +118,21 @@ show_stack(struct task_struct *task, unsigned long *sp) stack = sp; - printk("\nStack from %08lx:\n ", (unsigned long)stack); + pr_err("\nStack from %08lx:\n ", (unsigned long)stack); for (i = 0; i < kstack_depth_to_print; i++) { if (((long)stack & (THREAD_SIZE-1)) == 0) break; if (i && ((i % 8) == 0)) - printk("\n "); + pr_err("\n "); if (__get_user(addr, stack)) { /* This message matches "failing address" marked s390 in ksymoops, so lines containing it will not be filtered out by ksymoops. */ - printk("Failing address 0x%lx\n", (unsigned long)stack); + pr_err("Failing address 0x%lx\n", (unsigned long)stack); break; } stack++; - printk("%08lx ", addr); + pr_err("%08lx ", addr); } show_trace(sp); } @@ -139,33 +146,32 @@ show_stack(void) unsigned long *sp = (unsigned long *)rdusp(); int i; - printk("Stack dump [0x%08lx]:\n", (unsigned long)sp); + pr_err("Stack dump [0x%08lx]:\n", (unsigned long)sp); for (i = 0; i < 16; i++) - printk("sp + %d: 0x%08lx\n", i*4, sp[i]); + pr_err("sp + %d: 0x%08lx\n", i*4, sp[i]); return 0; } #endif -void -set_nmi_handler(void (*handler)(struct pt_regs *)) +void set_nmi_handler(void (*handler)(struct pt_regs *)) { nmi_handler = handler; arch_enable_nmi(); } #ifdef CONFIG_DEBUG_NMI_OOPS -void -oops_nmi_handler(struct pt_regs *regs) +void oops_nmi_handler(struct pt_regs *regs) { stop_watchdog(); oops_in_progress = 1; - printk("NMI!\n"); + pr_err("NMI!\n"); show_registers(regs); oops_in_progress = 0; + oops_exit(); + pr_err("\n"); /* Flush mtdoops. */ } -static int __init -oops_nmi_register(void) +static int __init oops_nmi_register(void) { set_nmi_handler(oops_nmi_handler); return 0; @@ -180,8 +186,7 @@ __initcall(oops_nmi_register); * similar to an Oops dump, and if the kernel is configured to be a nice * doggy, then halt instead of reboot. */ -void -watchdog_bite_hook(struct pt_regs *regs) +void watchdog_bite_hook(struct pt_regs *regs) { #ifdef CONFIG_ETRAX_WATCHDOG_NICE_DOGGY local_irq_disable(); @@ -196,8 +201,7 @@ watchdog_bite_hook(struct pt_regs *regs) } /* This is normally the Oops function. */ -void -die_if_kernel(const char *str, struct pt_regs *regs, long err) +void die_if_kernel(const char *str, struct pt_regs *regs, long err) { if (user_mode(regs)) return; @@ -211,13 +215,17 @@ die_if_kernel(const char *str, struct pt_regs *regs, long err) stop_watchdog(); #endif + oops_enter(); handle_BUG(regs); - printk("%s: %04lx\n", str, err & 0xffff); + pr_err("Linux %s %s\n", utsname()->release, utsname()->version); + pr_err("%s: %04lx\n", str, err & 0xffff); show_registers(regs); + oops_exit(); oops_in_progress = 0; + pr_err("\n"); /* Flush mtdoops. */ #ifdef CONFIG_ETRAX_WATCHDOG_NICE_DOGGY reset_watchdog(); @@ -225,8 +233,7 @@ die_if_kernel(const char *str, struct pt_regs *regs, long err) do_exit(SIGSEGV); } -void __init -trap_init(void) +void __init trap_init(void) { /* Nothing needs to be done */ } diff --git a/arch/cris/mm/init.c b/arch/cris/mm/init.c index c81af5bd9167..1e7fd45b60f8 100644 --- a/arch/cris/mm/init.c +++ b/arch/cris/mm/init.c @@ -11,13 +11,15 @@ #include <linux/gfp.h> #include <linux/init.h> #include <linux/bootmem.h> +#include <linux/proc_fs.h> +#include <linux/kcore.h> #include <asm/tlb.h> #include <asm/sections.h> unsigned long empty_zero_page; +EXPORT_SYMBOL(empty_zero_page); -void __init -mem_init(void) +void __init mem_init(void) { BUG_ON(!mem_map); @@ -31,10 +33,36 @@ mem_init(void) mem_init_print_info(NULL); } -/* free the pages occupied by initialization code */ +/* Free a range of init pages. Virtual addresses. */ -void -free_initmem(void) +void free_init_pages(const char *what, unsigned long begin, unsigned long end) +{ + unsigned long addr; + + for (addr = begin; addr < end; addr += PAGE_SIZE) { + ClearPageReserved(virt_to_page(addr)); + init_page_count(virt_to_page(addr)); + free_page(addr); + totalram_pages++; + } + + printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10); +} + +/* Free the pages occupied by initialization code. */ + +void free_initmem(void) { free_initmem_default(-1); } + +/* Free the pages occupied by initrd code. */ + +#ifdef CONFIG_BLK_DEV_INITRD +void free_initrd_mem(unsigned long start, unsigned long end) +{ + free_init_pages("initrd memory", + start, + end); +} +#endif diff --git a/arch/cris/mm/ioremap.c b/arch/cris/mm/ioremap.c index f9ca44bdea20..80fdb995a8ce 100644 --- a/arch/cris/mm/ioremap.c +++ b/arch/cris/mm/ioremap.c @@ -76,10 +76,11 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l * Must be freed with iounmap. */ -void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size) +void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size) { return __ioremap(phys_addr | MEM_NON_CACHEABLE, size, 0); } +EXPORT_SYMBOL(ioremap_nocache); void iounmap(volatile void __iomem *addr) { diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h index 263511719a4a..69952c184207 100644 --- a/arch/hexagon/include/asm/cache.h +++ b/arch/hexagon/include/asm/cache.h @@ -1,7 +1,7 @@ /* * Cache definitions for the Hexagon architecture * - * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. + * Copyright (c) 2010-2011,2014 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -25,6 +25,8 @@ #define L1_CACHE_SHIFT (5) #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) +#define ARCH_DMA_MINALIGN L1_CACHE_BYTES + #define __cacheline_aligned __aligned(L1_CACHE_BYTES) #define ____cacheline_aligned __aligned(L1_CACHE_BYTES) diff --git a/arch/hexagon/include/asm/cacheflush.h b/arch/hexagon/include/asm/cacheflush.h index 49e0896ec240..b86f9f300e94 100644 --- a/arch/hexagon/include/asm/cacheflush.h +++ b/arch/hexagon/include/asm/cacheflush.h @@ -21,10 +21,7 @@ #ifndef _ASM_CACHEFLUSH_H #define _ASM_CACHEFLUSH_H -#include <linux/cache.h> -#include <linux/mm.h> -#include <asm/string.h> -#include <asm-generic/cacheflush.h> +#include <linux/mm_types.h> /* Cache flushing: * @@ -41,6 +38,20 @@ #define LINESIZE 32 #define LINEBITS 5 +#define flush_cache_all() do { } while (0) +#define flush_cache_mm(mm) do { } while (0) +#define flush_cache_dup_mm(mm) do { } while (0) +#define flush_cache_range(vma, start, end) do { } while (0) +#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 +#define flush_dcache_page(page) do { } while (0) +#define flush_dcache_mmap_lock(mapping) do { } while (0) +#define flush_dcache_mmap_unlock(mapping) do { } while (0) +#define flush_icache_page(vma, pg) do { } while (0) +#define flush_icache_user_range(vma, pg, adr, len) do { } while (0) +#define flush_cache_vmap(start, end) do { } while (0) +#define flush_cache_vunmap(start, end) do { } while (0) + /* * Flush Dcache range through current map. */ @@ -49,7 +60,6 @@ extern void flush_dcache_range(unsigned long start, unsigned long end); /* * Flush Icache range through current map. */ -#undef flush_icache_range extern void flush_icache_range(unsigned long start, unsigned long end); /* @@ -79,19 +89,11 @@ static inline void update_mmu_cache(struct vm_area_struct *vma, /* generic_ptrace_pokedata doesn't wind up here, does it? */ } -#undef copy_to_user_page -static inline void copy_to_user_page(struct vm_area_struct *vma, - struct page *page, - unsigned long vaddr, - void *dst, void *src, int len) -{ - memcpy(dst, src, len); - if (vma->vm_flags & VM_EXEC) { - flush_icache_range((unsigned long) dst, - (unsigned long) dst + len); - } -} +void copy_to_user_page(struct vm_area_struct *vma, struct page *page, + unsigned long vaddr, void *dst, void *src, int len); +#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ + memcpy(dst, src, len) extern void hexagon_inv_dcache_range(unsigned long start, unsigned long end); extern void hexagon_clean_dcache_range(unsigned long start, unsigned long end); diff --git a/arch/hexagon/include/asm/io.h b/arch/hexagon/include/asm/io.h index 70298996e9b2..66f5e9a61efc 100644 --- a/arch/hexagon/include/asm/io.h +++ b/arch/hexagon/include/asm/io.h @@ -24,14 +24,9 @@ #ifdef __KERNEL__ #include <linux/types.h> -#include <linux/delay.h> -#include <linux/vmalloc.h> -#include <asm/string.h> -#include <asm/mem-layout.h> #include <asm/iomap.h> #include <asm/page.h> #include <asm/cacheflush.h> -#include <asm/tlbflush.h> /* * We don't have PCI yet. diff --git a/arch/hexagon/kernel/setup.c b/arch/hexagon/kernel/setup.c index 0e7c1dbb37b2..6981949f5df3 100644 --- a/arch/hexagon/kernel/setup.c +++ b/arch/hexagon/kernel/setup.c @@ -19,6 +19,7 @@ */ #include <linux/init.h> +#include <linux/delay.h> #include <linux/bootmem.h> #include <linux/mmzone.h> #include <linux/mm.h> diff --git a/arch/hexagon/kernel/traps.c b/arch/hexagon/kernel/traps.c index 7858663352b9..110dab152f82 100644 --- a/arch/hexagon/kernel/traps.c +++ b/arch/hexagon/kernel/traps.c @@ -1,7 +1,7 @@ /* * Kernel traps/events for Hexagon processor * - * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved. + * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -423,7 +423,7 @@ void do_trap0(struct pt_regs *regs) */ info.si_code = TRAP_BRKPT; info.si_addr = (void __user *) pt_elr(regs); - send_sig_info(SIGTRAP, &info, current); + force_sig_info(SIGTRAP, &info, current); } else { #ifdef CONFIG_KGDB kgdb_handle_exception(pt_cause(regs), SIGTRAP, diff --git a/arch/hexagon/kernel/vmlinux.lds.S b/arch/hexagon/kernel/vmlinux.lds.S index 44d8c47bae2f..5f268c1071b3 100644 --- a/arch/hexagon/kernel/vmlinux.lds.S +++ b/arch/hexagon/kernel/vmlinux.lds.S @@ -1,7 +1,7 @@ /* * Linker script for Hexagon kernel * - * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved. + * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -59,7 +59,7 @@ SECTIONS INIT_DATA_SECTION(PAGE_SIZE) _sdata = .; - RW_DATA_SECTION(32,PAGE_SIZE,PAGE_SIZE) + RW_DATA_SECTION(32,PAGE_SIZE,_THREAD_SIZE) RO_DATA_SECTION(PAGE_SIZE) _edata = .; diff --git a/arch/hexagon/mm/cache.c b/arch/hexagon/mm/cache.c index 0c76c802e31c..a7c6d827d8b6 100644 --- a/arch/hexagon/mm/cache.c +++ b/arch/hexagon/mm/cache.c @@ -127,3 +127,13 @@ void flush_cache_all_hexagon(void) local_irq_restore(flags); mb(); } + +void copy_to_user_page(struct vm_area_struct *vma, struct page *page, + unsigned long vaddr, void *dst, void *src, int len) +{ + memcpy(dst, src, len); + if (vma->vm_flags & VM_EXEC) { + flush_icache_range((unsigned long) dst, + (unsigned long) dst + len); + } +} diff --git a/arch/hexagon/mm/ioremap.c b/arch/hexagon/mm/ioremap.c index 5905fd5f97f6..d27d67224046 100644 --- a/arch/hexagon/mm/ioremap.c +++ b/arch/hexagon/mm/ioremap.c @@ -20,6 +20,7 @@ #include <linux/io.h> #include <linux/vmalloc.h> +#include <linux/mm.h> void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size) { diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 536d13b0bea6..074e52bf815c 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -20,7 +20,6 @@ config IA64 select HAVE_DYNAMIC_FTRACE if (!ITANIUM) select HAVE_FUNCTION_TRACER select HAVE_DMA_ATTRS - select HAVE_KVM select TTY select HAVE_ARCH_TRACEHOOK select HAVE_DMA_API_DEBUG @@ -232,7 +231,7 @@ config IA64_SGI_UV config IA64_HP_SIM bool "Ski-simulator" select SWIOTLB - depends on !PM_RUNTIME + depends on !PM endchoice @@ -640,8 +639,6 @@ source "security/Kconfig" source "crypto/Kconfig" -source "arch/ia64/kvm/Kconfig" - source "lib/Kconfig" config IOMMU_HELPER diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile index 5441b14994fc..970d0bd99621 100644 --- a/arch/ia64/Makefile +++ b/arch/ia64/Makefile @@ -53,7 +53,6 @@ core-$(CONFIG_IA64_HP_ZX1) += arch/ia64/dig/ core-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += arch/ia64/dig/ core-$(CONFIG_IA64_SGI_SN2) += arch/ia64/sn/ core-$(CONFIG_IA64_SGI_UV) += arch/ia64/uv/ -core-$(CONFIG_KVM) += arch/ia64/kvm/ drivers-$(CONFIG_PCI) += arch/ia64/pci/ drivers-$(CONFIG_IA64_HP_SIM) += arch/ia64/hp/sim/ diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h deleted file mode 100644 index 4729752b7256..000000000000 --- a/arch/ia64/include/asm/kvm_host.h +++ /dev/null @@ -1,609 +0,0 @@ -/* - * kvm_host.h: used for kvm module, and hold ia64-specific sections. - * - * Copyright (C) 2007, Intel Corporation. - * - * Xiantao Zhang <xiantao.zhang@intel.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., 59 Temple - * Place - Suite 330, Boston, MA 02111-1307 USA. - * - */ - -#ifndef __ASM_KVM_HOST_H -#define __ASM_KVM_HOST_H - -#define KVM_USER_MEM_SLOTS 32 - -#define KVM_COALESCED_MMIO_PAGE_OFFSET 1 -#define KVM_IRQCHIP_NUM_PINS KVM_IOAPIC_NUM_PINS - -/* define exit reasons from vmm to kvm*/ -#define EXIT_REASON_VM_PANIC 0 -#define EXIT_REASON_MMIO_INSTRUCTION 1 -#define EXIT_REASON_PAL_CALL 2 -#define EXIT_REASON_SAL_CALL 3 -#define EXIT_REASON_SWITCH_RR6 4 -#define EXIT_REASON_VM_DESTROY 5 -#define EXIT_REASON_EXTERNAL_INTERRUPT 6 -#define EXIT_REASON_IPI 7 -#define EXIT_REASON_PTC_G 8 -#define EXIT_REASON_DEBUG 20 - -/*Define vmm address space and vm data space.*/ -#define KVM_VMM_SIZE (__IA64_UL_CONST(16)<<20) -#define KVM_VMM_SHIFT 24 -#define KVM_VMM_BASE 0xD000000000000000 -#define VMM_SIZE (__IA64_UL_CONST(8)<<20) - -/* - * Define vm_buffer, used by PAL Services, base address. - * Note: vm_buffer is in the VMM-BLOCK, the size must be < 8M - */ -#define KVM_VM_BUFFER_BASE (KVM_VMM_BASE + VMM_SIZE) -#define KVM_VM_BUFFER_SIZE (__IA64_UL_CONST(8)<<20) - -/* - * kvm guest's data area looks as follow: - * - * +----------------------+ ------- KVM_VM_DATA_SIZE - * | vcpu[n]'s data | | ___________________KVM_STK_OFFSET - * | | | / | - * | .......... | | /vcpu's struct&stack | - * | .......... | | /---------------------|---- 0 - * | vcpu[5]'s data | | / vpd | - * | vcpu[4]'s data | |/-----------------------| - * | vcpu[3]'s data | / vtlb | - * | vcpu[2]'s data | /|------------------------| - * | vcpu[1]'s data |/ | vhpt | - * | vcpu[0]'s data |____________________________| - * +----------------------+ | - * | memory dirty log | | - * +----------------------+ | - * | vm's data struct | | - * +----------------------+ | - * | | | - * | | | - * | | | - * | | | - * | | | - * | | | - * | | | - * | vm's p2m table | | - * | | | - * | | | - * | | | | - * vm's data->| | | | - * +----------------------+ ------- 0 - * To support large memory, needs to increase the size of p2m. - * To support more vcpus, needs to ensure it has enough space to - * hold vcpus' data. - */ - -#define KVM_VM_DATA_SHIFT 26 -#define KVM_VM_DATA_SIZE (__IA64_UL_CONST(1) << KVM_VM_DATA_SHIFT) -#define KVM_VM_DATA_BASE (KVM_VMM_BASE + KVM_VM_DATA_SIZE) - -#define KVM_P2M_BASE KVM_VM_DATA_BASE -#define KVM_P2M_SIZE (__IA64_UL_CONST(24) << 20) - -#define VHPT_SHIFT 16 -#define VHPT_SIZE (__IA64_UL_CONST(1) << VHPT_SHIFT) -#define VHPT_NUM_ENTRIES (__IA64_UL_CONST(1) << (VHPT_SHIFT-5)) - -#define VTLB_SHIFT 16 -#define VTLB_SIZE (__IA64_UL_CONST(1) << VTLB_SHIFT) -#define VTLB_NUM_ENTRIES (1UL << (VHPT_SHIFT-5)) - -#define VPD_SHIFT 16 -#define VPD_SIZE (__IA64_UL_CONST(1) << VPD_SHIFT) - -#define VCPU_STRUCT_SHIFT 16 -#define VCPU_STRUCT_SIZE (__IA64_UL_CONST(1) << VCPU_STRUCT_SHIFT) - -/* - * This must match KVM_IA64_VCPU_STACK_{SHIFT,SIZE} arch/ia64/include/asm/kvm.h - */ -#define KVM_STK_SHIFT 16 -#define KVM_STK_OFFSET (__IA64_UL_CONST(1)<< KVM_STK_SHIFT) - -#define KVM_VM_STRUCT_SHIFT 19 -#define KVM_VM_STRUCT_SIZE (__IA64_UL_CONST(1) << KVM_VM_STRUCT_SHIFT) - -#define KVM_MEM_DIRY_LOG_SHIFT 19 -#define KVM_MEM_DIRTY_LOG_SIZE (__IA64_UL_CONST(1) << KVM_MEM_DIRY_LOG_SHIFT) - -#ifndef __ASSEMBLY__ - -/*Define the max vcpus and memory for Guests.*/ -#define KVM_MAX_VCPUS (KVM_VM_DATA_SIZE - KVM_P2M_SIZE - KVM_VM_STRUCT_SIZE -\ - KVM_MEM_DIRTY_LOG_SIZE) / sizeof(struct kvm_vcpu_data) -#define KVM_MAX_MEM_SIZE (KVM_P2M_SIZE >> 3 << PAGE_SHIFT) - -#define VMM_LOG_LEN 256 - -#include <linux/types.h> -#include <linux/mm.h> -#include <linux/kvm.h> -#include <linux/kvm_para.h> -#include <linux/kvm_types.h> - -#include <asm/pal.h> -#include <asm/sal.h> -#include <asm/page.h> - -struct kvm_vcpu_data { - char vcpu_vhpt[VHPT_SIZE]; - char vcpu_vtlb[VTLB_SIZE]; - char vcpu_vpd[VPD_SIZE]; - char vcpu_struct[VCPU_STRUCT_SIZE]; -}; - -struct kvm_vm_data { - char kvm_p2m[KVM_P2M_SIZE]; - char kvm_vm_struct[KVM_VM_STRUCT_SIZE]; - char kvm_mem_dirty_log[KVM_MEM_DIRTY_LOG_SIZE]; - struct kvm_vcpu_data vcpu_data[KVM_MAX_VCPUS]; -}; - -#define VCPU_BASE(n) (KVM_VM_DATA_BASE + \ - offsetof(struct kvm_vm_data, vcpu_data[n])) -#define KVM_VM_BASE (KVM_VM_DATA_BASE + \ - offsetof(struct kvm_vm_data, kvm_vm_struct)) -#define KVM_MEM_DIRTY_LOG_BASE KVM_VM_DATA_BASE + \ - offsetof(struct kvm_vm_data, kvm_mem_dirty_log) - -#define VHPT_BASE(n) (VCPU_BASE(n) + offsetof(struct kvm_vcpu_data, vcpu_vhpt)) -#define VTLB_BASE(n) (VCPU_BASE(n) + offsetof(struct kvm_vcpu_data, vcpu_vtlb)) -#define VPD_BASE(n) (VCPU_BASE(n) + offsetof(struct kvm_vcpu_data, vcpu_vpd)) -#define VCPU_STRUCT_BASE(n) (VCPU_BASE(n) + \ - offsetof(struct kvm_vcpu_data, vcpu_struct)) - -/*IO section definitions*/ -#define IOREQ_READ 1 -#define IOREQ_WRITE 0 - -#define STATE_IOREQ_NONE 0 -#define STATE_IOREQ_READY 1 -#define STATE_IOREQ_INPROCESS 2 -#define STATE_IORESP_READY 3 - -/*Guest Physical address layout.*/ -#define GPFN_MEM (0UL << 60) /* Guest pfn is normal mem */ -#define GPFN_FRAME_BUFFER (1UL << 60) /* VGA framebuffer */ -#define GPFN_LOW_MMIO (2UL << 60) /* Low MMIO range */ -#define GPFN_PIB (3UL << 60) /* PIB base */ -#define GPFN_IOSAPIC (4UL << 60) /* IOSAPIC base */ -#define GPFN_LEGACY_IO (5UL << 60) /* Legacy I/O base */ -#define GPFN_GFW (6UL << 60) /* Guest Firmware */ -#define GPFN_PHYS_MMIO (7UL << 60) /* Directed MMIO Range */ - -#define GPFN_IO_MASK (7UL << 60) /* Guest pfn is I/O type */ -#define GPFN_INV_MASK (1UL << 63) /* Guest pfn is invalid */ -#define INVALID_MFN (~0UL) -#define MEM_G (1UL << 30) -#define MEM_M (1UL << 20) -#define MMIO_START (3 * MEM_G) -#define MMIO_SIZE (512 * MEM_M) -#define VGA_IO_START 0xA0000UL -#define VGA_IO_SIZE 0x20000 -#define LEGACY_IO_START (MMIO_START + MMIO_SIZE) -#define LEGACY_IO_SIZE (64 * MEM_M) -#define IO_SAPIC_START 0xfec00000UL -#define IO_SAPIC_SIZE 0x100000 -#define PIB_START 0xfee00000UL -#define PIB_SIZE 0x200000 -#define GFW_START (4 * MEM_G - 16 * MEM_M) -#define GFW_SIZE (16 * MEM_M) - -/*Deliver mode, defined for ioapic.c*/ -#define dest_Fixed IOSAPIC_FIXED -#define dest_LowestPrio IOSAPIC_LOWEST_PRIORITY - -#define NMI_VECTOR 2 -#define ExtINT_VECTOR 0 -#define NULL_VECTOR (-1) -#define IA64_SPURIOUS_INT_VECTOR 0x0f - -#define VCPU_LID(v) (((u64)(v)->vcpu_id) << 24) - -/* - *Delivery mode - */ -#define SAPIC_DELIV_SHIFT 8 -#define SAPIC_FIXED 0x0 -#define SAPIC_LOWEST_PRIORITY 0x1 -#define SAPIC_PMI 0x2 -#define SAPIC_NMI 0x4 -#define SAPIC_INIT 0x5 -#define SAPIC_EXTINT 0x7 - -/* - * vcpu->requests bit members for arch - */ -#define KVM_REQ_PTC_G 32 -#define KVM_REQ_RESUME 33 - -struct kvm_mmio_req { - uint64_t addr; /* physical address */ - uint64_t size; /* size in bytes */ - uint64_t data; /* data (or paddr of data) */ - uint8_t state:4; - uint8_t dir:1; /* 1=read, 0=write */ -}; - -/*Pal data struct */ -struct kvm_pal_call{ - /*In area*/ - uint64_t gr28; - uint64_t gr29; - uint64_t gr30; - uint64_t gr31; - /*Out area*/ - struct ia64_pal_retval ret; -}; - -/* Sal data structure */ -struct kvm_sal_call{ - /*In area*/ - uint64_t in0; - uint64_t in1; - uint64_t in2; - uint64_t in3; - uint64_t in4; - uint64_t in5; - uint64_t in6; - uint64_t in7; - struct sal_ret_values ret; -}; - -/*Guest change rr6*/ -struct kvm_switch_rr6 { - uint64_t old_rr; - uint64_t new_rr; -}; - -union ia64_ipi_a{ - unsigned long val; - struct { - unsigned long rv : 3; - unsigned long ir : 1; - unsigned long eid : 8; - unsigned long id : 8; - unsigned long ib_base : 44; - }; -}; - -union ia64_ipi_d { - unsigned long val; - struct { - unsigned long vector : 8; - unsigned long dm : 3; - unsigned long ig : 53; - }; -}; - -/*ipi check exit data*/ -struct kvm_ipi_data{ - union ia64_ipi_a addr; - union ia64_ipi_d data; -}; - -/*global purge data*/ -struct kvm_ptc_g { - unsigned long vaddr; - unsigned long rr; - unsigned long ps; - struct kvm_vcpu *vcpu; -}; - -/*Exit control data */ -struct exit_ctl_data{ - uint32_t exit_reason; - uint32_t vm_status; - union { - struct kvm_mmio_req ioreq; - struct kvm_pal_call pal_data; - struct kvm_sal_call sal_data; - struct kvm_switch_rr6 rr_data; - struct kvm_ipi_data ipi_data; - struct kvm_ptc_g ptc_g_data; - } u; -}; - -union pte_flags { - unsigned long val; - struct { - unsigned long p : 1; /*0 */ - unsigned long : 1; /* 1 */ - unsigned long ma : 3; /* 2-4 */ - unsigned long a : 1; /* 5 */ - unsigned long d : 1; /* 6 */ - unsigned long pl : 2; /* 7-8 */ - unsigned long ar : 3; /* 9-11 */ - unsigned long ppn : 38; /* 12-49 */ - unsigned long : 2; /* 50-51 */ - unsigned long ed : 1; /* 52 */ - }; -}; - -union ia64_pta { - unsigned long val; - struct { - unsigned long ve : 1; - unsigned long reserved0 : 1; - unsigned long size : 6; - unsigned long vf : 1; - unsigned long reserved1 : 6; - unsigned long base : 49; - }; -}; - -struct thash_cb { - /* THASH base information */ - struct thash_data *hash; /* hash table pointer */ - union ia64_pta pta; - int num; -}; - -struct kvm_vcpu_stat { - u32 halt_wakeup; -}; - -struct kvm_vcpu_arch { - int launched; - int last_exit; - int last_run_cpu; - int vmm_tr_slot; - int vm_tr_slot; - int sn_rtc_tr_slot; - -#define KVM_MP_STATE_RUNNABLE 0 -#define KVM_MP_STATE_UNINITIALIZED 1 -#define KVM_MP_STATE_INIT_RECEIVED 2 -#define KVM_MP_STATE_HALTED 3 - int mp_state; - -#define MAX_PTC_G_NUM 3 - int ptc_g_count; - struct kvm_ptc_g ptc_g_data[MAX_PTC_G_NUM]; - - /*halt timer to wake up sleepy vcpus*/ - struct hrtimer hlt_timer; - long ht_active; - - struct kvm_lapic *apic; /* kernel irqchip context */ - struct vpd *vpd; - - /* Exit data for vmm_transition*/ - struct exit_ctl_data exit_data; - - cpumask_t cache_coherent_map; - - unsigned long vmm_rr; - unsigned long host_rr6; - unsigned long psbits[8]; - unsigned long cr_iipa; - unsigned long cr_isr; - unsigned long vsa_base; - unsigned long dirty_log_lock_pa; - unsigned long __gp; - /* TR and TC. */ - struct thash_data itrs[NITRS]; - struct thash_data dtrs[NDTRS]; - /* Bit is set if there is a tr/tc for the region. */ - unsigned char itr_regions; - unsigned char dtr_regions; - unsigned char tc_regions; - /* purge all */ - unsigned long ptce_base; - unsigned long ptce_count[2]; - unsigned long ptce_stride[2]; - /* itc/itm */ - unsigned long last_itc; - long itc_offset; - unsigned long itc_check; - unsigned long timer_check; - unsigned int timer_pending; - unsigned int timer_fired; - - unsigned long vrr[8]; - unsigned long ibr[8]; - unsigned long dbr[8]; - unsigned long insvc[4]; /* Interrupt in service. */ - unsigned long xtp; - - unsigned long metaphysical_rr0; /* from kvm_arch (so is pinned) */ - unsigned long metaphysical_rr4; /* from kvm_arch (so is pinned) */ - unsigned long metaphysical_saved_rr0; /* from kvm_arch */ - unsigned long metaphysical_saved_rr4; /* from kvm_arch */ - unsigned long fp_psr; /*used for lazy float register */ - unsigned long saved_gp; - /*for phycial emulation */ - int mode_flags; - struct thash_cb vtlb; - struct thash_cb vhpt; - char irq_check; - char irq_new_pending; - - unsigned long opcode; - unsigned long cause; - char log_buf[VMM_LOG_LEN]; - union context host; - union context guest; - - char mmio_data[8]; -}; - -struct kvm_vm_stat { - u64 remote_tlb_flush; -}; - -struct kvm_sal_data { - unsigned long boot_ip; - unsigned long boot_gp; -}; - -struct kvm_arch_memory_slot { -}; - -struct kvm_arch { - spinlock_t dirty_log_lock; - - unsigned long vm_base; - unsigned long metaphysical_rr0; - unsigned long metaphysical_rr4; - unsigned long vmm_init_rr; - - int is_sn2; - - struct kvm_ioapic *vioapic; - struct kvm_vm_stat stat; - struct kvm_sal_data rdv_sal_data; - - struct list_head assigned_dev_head; - struct iommu_domain *iommu_domain; - bool iommu_noncoherent; - - unsigned long irq_sources_bitmap; - unsigned long irq_states[KVM_IOAPIC_NUM_PINS]; -}; - -union cpuid3_t { - u64 value; - struct { - u64 number : 8; - u64 revision : 8; - u64 model : 8; - u64 family : 8; - u64 archrev : 8; - u64 rv : 24; - }; -}; - -struct kvm_pt_regs { - /* The following registers are saved by SAVE_MIN: */ - unsigned long b6; /* scratch */ - unsigned long b7; /* scratch */ - - unsigned long ar_csd; /* used by cmp8xchg16 (scratch) */ - unsigned long ar_ssd; /* reserved for future use (scratch) */ - - unsigned long r8; /* scratch (return value register 0) */ - unsigned long r9; /* scratch (return value register 1) */ - unsigned long r10; /* scratch (return value register 2) */ - unsigned long r11; /* scratch (return value register 3) */ - - unsigned long cr_ipsr; /* interrupted task's psr */ - unsigned long cr_iip; /* interrupted task's instruction pointer */ - unsigned long cr_ifs; /* interrupted task's function state */ - - unsigned long ar_unat; /* interrupted task's NaT register (preserved) */ - unsigned long ar_pfs; /* prev function state */ - unsigned long ar_rsc; /* RSE configuration */ - /* The following two are valid only if cr_ipsr.cpl > 0: */ - unsigned long ar_rnat; /* RSE NaT */ - unsigned long ar_bspstore; /* RSE bspstore */ - - unsigned long pr; /* 64 predicate registers (1 bit each) */ - unsigned long b0; /* return pointer (bp) */ - unsigned long loadrs; /* size of dirty partition << 16 */ - - unsigned long r1; /* the gp pointer */ - unsigned long r12; /* interrupted task's memory stack pointer */ - unsigned long r13; /* thread pointer */ - - unsigned long ar_fpsr; /* floating point status (preserved) */ - unsigned long r15; /* scratch */ - - /* The remaining registers are NOT saved for system calls. */ - unsigned long r14; /* scratch */ - unsigned long r2; /* scratch */ - unsigned long r3; /* scratch */ - unsigned long r16; /* scratch */ - unsigned long r17; /* scratch */ - unsigned long r18; /* scratch */ - unsigned long r19; /* scratch */ - unsigned long r20; /* scratch */ - unsigned long r21; /* scratch */ - unsigned long r22; /* scratch */ - unsigned long r23; /* scratch */ - unsigned long r24; /* scratch */ - unsigned long r25; /* scratch */ - unsigned long r26; /* scratch */ - unsigned long r27; /* scratch */ - unsigned long r28; /* scratch */ - unsigned long r29; /* scratch */ - unsigned long r30; /* scratch */ - unsigned long r31; /* scratch */ - unsigned long ar_ccv; /* compare/exchange value (scratch) */ - - /* - * Floating point registers that the kernel considers scratch: - */ - struct ia64_fpreg f6; /* scratch */ - struct ia64_fpreg f7; /* scratch */ - struct ia64_fpreg f8; /* scratch */ - struct ia64_fpreg f9; /* scratch */ - struct ia64_fpreg f10; /* scratch */ - struct ia64_fpreg f11; /* scratch */ - - unsigned long r4; /* preserved */ - unsigned long r5; /* preserved */ - unsigned long r6; /* preserved */ - unsigned long r7; /* preserved */ - unsigned long eml_unat; /* used for emulating instruction */ - unsigned long pad0; /* alignment pad */ -}; - -static inline struct kvm_pt_regs *vcpu_regs(struct kvm_vcpu *v) -{ - return (struct kvm_pt_regs *) ((unsigned long) v + KVM_STK_OFFSET) - 1; -} - -typedef int kvm_vmm_entry(void); -typedef void kvm_tramp_entry(union context *host, union context *guest); - -struct kvm_vmm_info{ - struct module *module; - kvm_vmm_entry *vmm_entry; - kvm_tramp_entry *tramp_entry; - unsigned long vmm_ivt; - unsigned long patch_mov_ar; - unsigned long patch_mov_ar_sn2; -}; - -int kvm_highest_pending_irq(struct kvm_vcpu *vcpu); -int kvm_emulate_halt(struct kvm_vcpu *vcpu); -int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run); -void kvm_sal_emul(struct kvm_vcpu *vcpu); - -#define __KVM_HAVE_ARCH_VM_ALLOC 1 -struct kvm *kvm_arch_alloc_vm(void); -void kvm_arch_free_vm(struct kvm *kvm); - -static inline void kvm_arch_sync_events(struct kvm *kvm) {} -static inline void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) {} -static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu) {} -static inline void kvm_arch_free_memslot(struct kvm *kvm, - struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {} -static inline void kvm_arch_memslots_updated(struct kvm *kvm) {} -static inline void kvm_arch_commit_memory_region(struct kvm *kvm, - struct kvm_userspace_memory_region *mem, - const struct kvm_memory_slot *old, - enum kvm_mr_change change) {} -static inline void kvm_arch_hardware_unsetup(void) {} - -#endif /* __ASSEMBLY__*/ - -#endif diff --git a/arch/ia64/include/asm/percpu.h b/arch/ia64/include/asm/percpu.h index 14aa1c58912b..0ec484d2dcbc 100644 --- a/arch/ia64/include/asm/percpu.h +++ b/arch/ia64/include/asm/percpu.h @@ -35,8 +35,8 @@ extern void *per_cpu_init(void); /* * Be extremely careful when taking the address of this variable! Due to virtual - * remapping, it is different from the canonical address returned by __get_cpu_var(var)! - * On the positive side, using __ia64_per_cpu_var() instead of __get_cpu_var() is slightly + * remapping, it is different from the canonical address returned by this_cpu_ptr(&var)! + * On the positive side, using __ia64_per_cpu_var() instead of this_cpu_ptr() is slightly * more efficient. */ #define __ia64_per_cpu_var(var) (*({ \ diff --git a/arch/ia64/include/asm/pvclock-abi.h b/arch/ia64/include/asm/pvclock-abi.h deleted file mode 100644 index 42b233bedeb5..000000000000 --- a/arch/ia64/include/asm/pvclock-abi.h +++ /dev/null @@ -1,48 +0,0 @@ -/* - * same structure to x86's - * Hopefully asm-x86/pvclock-abi.h would be moved to somewhere more generic. - * For now, define same duplicated definitions. - */ - -#ifndef _ASM_IA64__PVCLOCK_ABI_H -#define _ASM_IA64__PVCLOCK_ABI_H -#ifndef __ASSEMBLY__ - -/* - * These structs MUST NOT be changed. - * They are the ABI between hypervisor and guest OS. - * KVM is using this. - * - * pvclock_vcpu_time_info holds the system time and the tsc timestamp - * of the last update. So the guest can use the tsc delta to get a - * more precise system time. There is one per virtual cpu. - * - * pvclock_wall_clock references the point in time when the system - * time was zero (usually boot time), thus the guest calculates the - * current wall clock by adding the system time. - * - * Protocol for the "version" fields is: hypervisor raises it (making - * it uneven) before it starts updating the fields and raises it again - * (making it even) when it is done. Thus the guest can make sure the - * time values it got are consistent by checking the version before - * and after reading them. - */ - -struct pvclock_vcpu_time_info { - u32 version; - u32 pad0; - u64 tsc_timestamp; - u64 system_time; - u32 tsc_to_system_mul; - s8 tsc_shift; - u8 pad[3]; -} __attribute__((__packed__)); /* 32 bytes */ - -struct pvclock_wall_clock { - u32 version; - u32 sec; - u32 nsec; -} __attribute__((__packed__)); - -#endif /* __ASSEMBLY__ */ -#endif /* _ASM_IA64__PVCLOCK_ABI_H */ diff --git a/arch/ia64/include/uapi/asm/kvm.h b/arch/ia64/include/uapi/asm/kvm.h deleted file mode 100644 index 99503c284400..000000000000 --- a/arch/ia64/include/uapi/asm/kvm.h +++ /dev/null @@ -1,268 +0,0 @@ -#ifndef __ASM_IA64_KVM_H -#define __ASM_IA64_KVM_H - -/* - * kvm structure definitions for ia64 - * - * Copyright (C) 2007 Xiantao Zhang <xiantao.zhang@intel.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., 59 Temple - * Place - Suite 330, Boston, MA 02111-1307 USA. - * - */ - -#include <linux/types.h> -#include <linux/ioctl.h> - -/* Select x86 specific features in <linux/kvm.h> */ -#define __KVM_HAVE_IOAPIC -#define __KVM_HAVE_IRQ_LINE - -/* Architectural interrupt line count. */ -#define KVM_NR_INTERRUPTS 256 - -#define KVM_IOAPIC_NUM_PINS 48 - -struct kvm_ioapic_state { - __u64 base_address; - __u32 ioregsel; - __u32 id; - __u32 irr; - __u32 pad; - union { - __u64 bits; - struct { - __u8 vector; - __u8 delivery_mode:3; - __u8 dest_mode:1; - __u8 delivery_status:1; - __u8 polarity:1; - __u8 remote_irr:1; - __u8 trig_mode:1; - __u8 mask:1; - __u8 reserve:7; - __u8 reserved[4]; - __u8 dest_id; - } fields; - } redirtbl[KVM_IOAPIC_NUM_PINS]; -}; - -#define KVM_IRQCHIP_PIC_MASTER 0 -#define KVM_IRQCHIP_PIC_SLAVE 1 -#define KVM_IRQCHIP_IOAPIC 2 -#define KVM_NR_IRQCHIPS 3 - -#define KVM_CONTEXT_SIZE 8*1024 - -struct kvm_fpreg { - union { - unsigned long bits[2]; - long double __dummy; /* force 16-byte alignment */ - } u; -}; - -union context { - /* 8K size */ - char dummy[KVM_CONTEXT_SIZE]; - struct { - unsigned long psr; - unsigned long pr; - unsigned long caller_unat; - unsigned long pad; - unsigned long gr[32]; - unsigned long ar[128]; - unsigned long br[8]; - unsigned long cr[128]; - unsigned long rr[8]; - unsigned long ibr[8]; - unsigned long dbr[8]; - unsigned long pkr[8]; - struct kvm_fpreg fr[128]; - }; -}; - -struct thash_data { - union { - struct { - unsigned long p : 1; /* 0 */ - unsigned long rv1 : 1; /* 1 */ - unsigned long ma : 3; /* 2-4 */ - unsigned long a : 1; /* 5 */ - unsigned long d : 1; /* 6 */ - unsigned long pl : 2; /* 7-8 */ - unsigned long ar : 3; /* 9-11 */ - unsigned long ppn : 38; /* 12-49 */ - unsigned long rv2 : 2; /* 50-51 */ - unsigned long ed : 1; /* 52 */ - unsigned long ig1 : 11; /* 53-63 */ - }; - struct { - unsigned long __rv1 : 53; /* 0-52 */ - unsigned long contiguous : 1; /*53 */ - unsigned long tc : 1; /* 54 TR or TC */ - unsigned long cl : 1; - /* 55 I side or D side cache line */ - unsigned long len : 4; /* 56-59 */ - unsigned long io : 1; /* 60 entry is for io or not */ - unsigned long nomap : 1; - /* 61 entry cann't be inserted into machine TLB.*/ - unsigned long checked : 1; - /* 62 for VTLB/VHPT sanity check */ - unsigned long invalid : 1; - /* 63 invalid entry */ - }; - unsigned long page_flags; - }; /* same for VHPT and TLB */ - - union { - struct { - unsigned long rv3 : 2; - unsigned long ps : 6; - unsigned long key : 24; - unsigned long rv4 : 32; - }; - unsigned long itir; - }; - union { - struct { - unsigned long ig2 : 12; - unsigned long vpn : 49; - unsigned long vrn : 3; - }; - unsigned long ifa; - unsigned long vadr; - struct { - unsigned long tag : 63; - unsigned long ti : 1; - }; - unsigned long etag; - }; - union { - struct thash_data *next; - unsigned long rid; - unsigned long gpaddr; - }; -}; - -#define NITRS 8 -#define NDTRS 8 - -struct saved_vpd { - unsigned long vhpi; - unsigned long vgr[16]; - unsigned long vbgr[16]; - unsigned long vnat; - unsigned long vbnat; - unsigned long vcpuid[5]; - unsigned long vpsr; - unsigned long vpr; - union { - unsigned long vcr[128]; - struct { - unsigned long dcr; - unsigned long itm; - unsigned long iva; - unsigned long rsv1[5]; - unsigned long pta; - unsigned long rsv2[7]; - unsigned long ipsr; - unsigned long isr; - unsigned long rsv3; - unsigned long iip; - unsigned long ifa; - unsigned long itir; - unsigned long iipa; - unsigned long ifs; - unsigned long iim; - unsigned long iha; - unsigned long rsv4[38]; - unsigned long lid; - unsigned long ivr; - unsigned long tpr; - unsigned long eoi; - unsigned long irr[4]; - unsigned long itv; - unsigned long pmv; - unsigned long cmcv; - unsigned long rsv5[5]; - unsigned long lrr0; - unsigned long lrr1; - unsigned long rsv6[46]; - }; - }; -}; - -struct kvm_regs { - struct saved_vpd vpd; - /*Arch-regs*/ - int mp_state; - unsigned long vmm_rr; - /* TR and TC. */ - struct thash_data itrs[NITRS]; - struct thash_data dtrs[NDTRS]; - /* Bit is set if there is a tr/tc for the region. */ - unsigned char itr_regions; - unsigned char dtr_regions; - unsigned char tc_regions; - - char irq_check; - unsigned long saved_itc; - unsigned long itc_check; - unsigned long timer_check; - unsigned long timer_pending; - unsigned long last_itc; - - unsigned long vrr[8]; - unsigned long ibr[8]; - unsigned long dbr[8]; - unsigned long insvc[4]; /* Interrupt in service. */ - unsigned long xtp; - - unsigned long metaphysical_rr0; /* from kvm_arch (so is pinned) */ - unsigned long metaphysical_rr4; /* from kvm_arch (so is pinned) */ - unsigned long metaphysical_saved_rr0; /* from kvm_arch */ - unsigned long metaphysical_saved_rr4; /* from kvm_arch */ - unsigned long fp_psr; /*used for lazy float register */ - unsigned long saved_gp; - /*for phycial emulation */ - - union context saved_guest; - - unsigned long reserved[64]; /* for future use */ -}; - -struct kvm_sregs { -}; - -struct kvm_fpu { -}; - -#define KVM_IA64_VCPU_STACK_SHIFT 16 -#define KVM_IA64_VCPU_STACK_SIZE (1UL << KVM_IA64_VCPU_STACK_SHIFT) - -struct kvm_ia64_vcpu_stack { - unsigned char stack[KVM_IA64_VCPU_STACK_SIZE]; -}; - -struct kvm_debug_exit_arch { -}; - -/* for KVM_SET_GUEST_DEBUG */ -struct kvm_guest_debug_arch { -}; - -/* definition of registers in kvm_run */ -struct kvm_sync_regs { -}; - -#endif diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index dc063fe6646a..5f4243f0acfa 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -2145,22 +2145,12 @@ doit: return 0; } -static int -pfm_no_open(struct inode *irrelevant, struct file *dontcare) -{ - DPRINT(("pfm_no_open called\n")); - return -ENXIO; -} - - - static const struct file_operations pfm_file_ops = { .llseek = no_llseek, .read = pfm_read, .write = pfm_write, .poll = pfm_poll, .unlocked_ioctl = pfm_ioctl, - .open = pfm_no_open, /* special open code to disallow open via /proc */ .fasync = pfm_fasync, .release = pfm_close, .flush = pfm_flush diff --git a/arch/ia64/kvm/Kconfig b/arch/ia64/kvm/Kconfig deleted file mode 100644 index 3d50ea955c4c..000000000000 --- a/arch/ia64/kvm/Kconfig +++ /dev/null @@ -1,66 +0,0 @@ -# -# KVM configuration -# - -source "virt/kvm/Kconfig" - -menuconfig VIRTUALIZATION - bool "Virtualization" - depends on HAVE_KVM || IA64 - default y - ---help--- - Say Y here to get to see options for using your Linux host to run other - operating systems inside virtual machines (guests). - This option alone does not add any kernel code. - - If you say N, all options in this submenu will be skipped and disabled. - -if VIRTUALIZATION - -config KVM - tristate "Kernel-based Virtual Machine (KVM) support" - depends on BROKEN - depends on HAVE_KVM && MODULES - depends on BROKEN - select PREEMPT_NOTIFIERS - select ANON_INODES - select HAVE_KVM_IRQCHIP - select HAVE_KVM_IRQFD - select HAVE_KVM_IRQ_ROUTING - select KVM_APIC_ARCHITECTURE - select KVM_MMIO - ---help--- - Support hosting fully virtualized guest machines using hardware - virtualization extensions. You will need a fairly recent - processor equipped with virtualization extensions. You will also - need to select one or more of the processor modules below. - - This module provides access to the hardware capabilities through - a character device node named /dev/kvm. - - To compile this as a module, choose M here: the module - will be called kvm. - - If unsure, say N. - -config KVM_INTEL - tristate "KVM for Intel Itanium 2 processors support" - depends on KVM && m - ---help--- - Provides support for KVM on Itanium 2 processors equipped with the VT - extensions. - -config KVM_DEVICE_ASSIGNMENT - bool "KVM legacy PCI device assignment support" - depends on KVM && PCI && IOMMU_API - default y - ---help--- - Provide support for legacy PCI device assignment through KVM. The - kernel now also supports a full featured userspace device driver - framework through VFIO, which supersedes much of this support. - - If unsure, say Y. - -source drivers/vhost/Kconfig - -endif # VIRTUALIZATION diff --git a/arch/ia64/kvm/Makefile b/arch/ia64/kvm/Makefile deleted file mode 100644 index 18e45ec49bbf..000000000000 --- a/arch/ia64/kvm/Makefile +++ /dev/null @@ -1,67 +0,0 @@ -#This Make file is to generate asm-offsets.h and build source. -# - -#Generate asm-offsets.h for vmm module build -offsets-file := asm-offsets.h - -always := $(offsets-file) -targets := $(offsets-file) -targets += arch/ia64/kvm/asm-offsets.s - -# Default sed regexp - multiline due to syntax constraints -define sed-y - "/^->/{s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; s:->::; p;}" -endef - -quiet_cmd_offsets = GEN $@ -define cmd_offsets - (set -e; \ - echo "#ifndef __ASM_KVM_OFFSETS_H__"; \ - echo "#define __ASM_KVM_OFFSETS_H__"; \ - echo "/*"; \ - echo " * DO NOT MODIFY."; \ - echo " *"; \ - echo " * This file was generated by Makefile"; \ - echo " *"; \ - echo " */"; \ - echo ""; \ - sed -ne $(sed-y) $<; \ - echo ""; \ - echo "#endif" ) > $@ -endef - -# We use internal rules to avoid the "is up to date" message from make -arch/ia64/kvm/asm-offsets.s: arch/ia64/kvm/asm-offsets.c \ - $(wildcard $(srctree)/arch/ia64/include/asm/*.h)\ - $(wildcard $(srctree)/include/linux/*.h) - $(call if_changed_dep,cc_s_c) - -$(obj)/$(offsets-file): arch/ia64/kvm/asm-offsets.s - $(call cmd,offsets) - -FORCE : $(obj)/$(offsets-file) - -# -# Makefile for Kernel-based Virtual Machine module -# - -ccflags-y := -Ivirt/kvm -Iarch/ia64/kvm/ -asflags-y := -Ivirt/kvm -Iarch/ia64/kvm/ -KVM := ../../../virt/kvm - -common-objs = $(KVM)/kvm_main.o $(KVM)/ioapic.o \ - $(KVM)/coalesced_mmio.o $(KVM)/irq_comm.o - -ifeq ($(CONFIG_KVM_DEVICE_ASSIGNMENT),y) -common-objs += $(KVM)/assigned-dev.o $(KVM)/iommu.o -endif - -kvm-objs := $(common-objs) kvm-ia64.o kvm_fw.o -obj-$(CONFIG_KVM) += kvm.o - -CFLAGS_vcpu.o += -mfixed-range=f2-f5,f12-f127 -kvm-intel-objs = vmm.o vmm_ivt.o trampoline.o vcpu.o optvfault.o mmio.o \ - vtlb.o process.o kvm_lib.o -#Add link memcpy and memset to avoid possible structure assignment error -kvm-intel-objs += memcpy.o memset.o -obj-$(CONFIG_KVM_INTEL) += kvm-intel.o diff --git a/arch/ia64/kvm/asm-offsets.c b/arch/ia64/kvm/asm-offsets.c deleted file mode 100644 index 9324c875caf5..000000000000 --- a/arch/ia64/kvm/asm-offsets.c +++ /dev/null @@ -1,241 +0,0 @@ -/* - * asm-offsets.c Generate definitions needed by assembly language modules. - * This code generates raw asm output which is post-processed - * to extract and format the required data. - * - * Anthony Xu <anthony.xu@intel.com> - * Xiantao Zhang <xiantao.zhang@intel.com> - * Copyright (c) 2007 Intel Corporation KVM support. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., 59 Temple - * Place - Suite 330, Boston, MA 02111-1307 USA. - * - */ - -#include <linux/kvm_host.h> -#include <linux/kbuild.h> - -#include "vcpu.h" - -void foo(void) -{ - DEFINE(VMM_TASK_SIZE, sizeof(struct kvm_vcpu)); - DEFINE(VMM_PT_REGS_SIZE, sizeof(struct kvm_pt_regs)); - - BLANK(); - - DEFINE(VMM_VCPU_META_RR0_OFFSET, - offsetof(struct kvm_vcpu, arch.metaphysical_rr0)); - DEFINE(VMM_VCPU_META_SAVED_RR0_OFFSET, - offsetof(struct kvm_vcpu, - arch.metaphysical_saved_rr0)); - DEFINE(VMM_VCPU_VRR0_OFFSET, - offsetof(struct kvm_vcpu, arch.vrr[0])); - DEFINE(VMM_VPD_IRR0_OFFSET, - offsetof(struct vpd, irr[0])); - DEFINE(VMM_VCPU_ITC_CHECK_OFFSET, - offsetof(struct kvm_vcpu, arch.itc_check)); - DEFINE(VMM_VCPU_IRQ_CHECK_OFFSET, - offsetof(struct kvm_vcpu, arch.irq_check)); - DEFINE(VMM_VPD_VHPI_OFFSET, - offsetof(struct vpd, vhpi)); - DEFINE(VMM_VCPU_VSA_BASE_OFFSET, - offsetof(struct kvm_vcpu, arch.vsa_base)); - DEFINE(VMM_VCPU_VPD_OFFSET, - offsetof(struct kvm_vcpu, arch.vpd)); - DEFINE(VMM_VCPU_IRQ_CHECK, - offsetof(struct kvm_vcpu, arch.irq_check)); - DEFINE(VMM_VCPU_TIMER_PENDING, - offsetof(struct kvm_vcpu, arch.timer_pending)); - DEFINE(VMM_VCPU_META_SAVED_RR0_OFFSET, - offsetof(struct kvm_vcpu, arch.metaphysical_saved_rr0)); - DEFINE(VMM_VCPU_MODE_FLAGS_OFFSET, - offsetof(struct kvm_vcpu, arch.mode_flags)); - DEFINE(VMM_VCPU_ITC_OFS_OFFSET, - offsetof(struct kvm_vcpu, arch.itc_offset)); - DEFINE(VMM_VCPU_LAST_ITC_OFFSET, - offsetof(struct kvm_vcpu, arch.last_itc)); - DEFINE(VMM_VCPU_SAVED_GP_OFFSET, - offsetof(struct kvm_vcpu, arch.saved_gp)); - - BLANK(); - - DEFINE(VMM_PT_REGS_B6_OFFSET, - offsetof(struct kvm_pt_regs, b6)); - DEFINE(VMM_PT_REGS_B7_OFFSET, - offsetof(struct kvm_pt_regs, b7)); - DEFINE(VMM_PT_REGS_AR_CSD_OFFSET, - offsetof(struct kvm_pt_regs, ar_csd)); - DEFINE(VMM_PT_REGS_AR_SSD_OFFSET, - offsetof(struct kvm_pt_regs, ar_ssd)); - DEFINE(VMM_PT_REGS_R8_OFFSET, - offsetof(struct kvm_pt_regs, r8)); - DEFINE(VMM_PT_REGS_R9_OFFSET, - offsetof(struct kvm_pt_regs, r9)); - DEFINE(VMM_PT_REGS_R10_OFFSET, - offsetof(struct kvm_pt_regs, r10)); - DEFINE(VMM_PT_REGS_R11_OFFSET, - offsetof(struct kvm_pt_regs, r11)); - DEFINE(VMM_PT_REGS_CR_IPSR_OFFSET, - offsetof(struct kvm_pt_regs, cr_ipsr)); - DEFINE(VMM_PT_REGS_CR_IIP_OFFSET, - offsetof(struct kvm_pt_regs, cr_iip)); - DEFINE(VMM_PT_REGS_CR_IFS_OFFSET, - offsetof(struct kvm_pt_regs, cr_ifs)); - DEFINE(VMM_PT_REGS_AR_UNAT_OFFSET, - offsetof(struct kvm_pt_regs, ar_unat)); - DEFINE(VMM_PT_REGS_AR_PFS_OFFSET, - offsetof(struct kvm_pt_regs, ar_pfs)); - DEFINE(VMM_PT_REGS_AR_RSC_OFFSET, - offsetof(struct kvm_pt_regs, ar_rsc)); - DEFINE(VMM_PT_REGS_AR_RNAT_OFFSET, - offsetof(struct kvm_pt_regs, ar_rnat)); - - DEFINE(VMM_PT_REGS_AR_BSPSTORE_OFFSET, - offsetof(struct kvm_pt_regs, ar_bspstore)); - DEFINE(VMM_PT_REGS_PR_OFFSET, - offsetof(struct kvm_pt_regs, pr)); - DEFINE(VMM_PT_REGS_B0_OFFSET, - offsetof(struct kvm_pt_regs, b0)); - DEFINE(VMM_PT_REGS_LOADRS_OFFSET, - offsetof(struct kvm_pt_regs, loadrs)); - DEFINE(VMM_PT_REGS_R1_OFFSET, - offsetof(struct kvm_pt_regs, r1)); - DEFINE(VMM_PT_REGS_R12_OFFSET, - offsetof(struct kvm_pt_regs, r12)); - DEFINE(VMM_PT_REGS_R13_OFFSET, - offsetof(struct kvm_pt_regs, r13)); - DEFINE(VMM_PT_REGS_AR_FPSR_OFFSET, - offsetof(struct kvm_pt_regs, ar_fpsr)); - DEFINE(VMM_PT_REGS_R15_OFFSET, - offsetof(struct kvm_pt_regs, r15)); - DEFINE(VMM_PT_REGS_R14_OFFSET, - offsetof(struct kvm_pt_regs, r14)); - DEFINE(VMM_PT_REGS_R2_OFFSET, - offsetof(struct kvm_pt_regs, r2)); - DEFINE(VMM_PT_REGS_R3_OFFSET, - offsetof(struct kvm_pt_regs, r3)); - DEFINE(VMM_PT_REGS_R16_OFFSET, - offsetof(struct kvm_pt_regs, r16)); - DEFINE(VMM_PT_REGS_R17_OFFSET, - offsetof(struct kvm_pt_regs, r17)); - DEFINE(VMM_PT_REGS_R18_OFFSET, - offsetof(struct kvm_pt_regs, r18)); - DEFINE(VMM_PT_REGS_R19_OFFSET, - offsetof(struct kvm_pt_regs, r19)); - DEFINE(VMM_PT_REGS_R20_OFFSET, - offsetof(struct kvm_pt_regs, r20)); - DEFINE(VMM_PT_REGS_R21_OFFSET, - offsetof(struct kvm_pt_regs, r21)); - DEFINE(VMM_PT_REGS_R22_OFFSET, - offsetof(struct kvm_pt_regs, r22)); - DEFINE(VMM_PT_REGS_R23_OFFSET, - offsetof(struct kvm_pt_regs, r23)); - DEFINE(VMM_PT_REGS_R24_OFFSET, - offsetof(struct kvm_pt_regs, r24)); - DEFINE(VMM_PT_REGS_R25_OFFSET, - offsetof(struct kvm_pt_regs, r25)); - DEFINE(VMM_PT_REGS_R26_OFFSET, - offsetof(struct kvm_pt_regs, r26)); - DEFINE(VMM_PT_REGS_R27_OFFSET, - offsetof(struct kvm_pt_regs, r27)); - DEFINE(VMM_PT_REGS_R28_OFFSET, - offsetof(struct kvm_pt_regs, r28)); - DEFINE(VMM_PT_REGS_R29_OFFSET, - offsetof(struct kvm_pt_regs, r29)); - DEFINE(VMM_PT_REGS_R30_OFFSET, - offsetof(struct kvm_pt_regs, r30)); - DEFINE(VMM_PT_REGS_R31_OFFSET, - offsetof(struct kvm_pt_regs, r31)); - DEFINE(VMM_PT_REGS_AR_CCV_OFFSET, - offsetof(struct kvm_pt_regs, ar_ccv)); - DEFINE(VMM_PT_REGS_F6_OFFSET, - offsetof(struct kvm_pt_regs, f6)); - DEFINE(VMM_PT_REGS_F7_OFFSET, - offsetof(struct kvm_pt_regs, f7)); - DEFINE(VMM_PT_REGS_F8_OFFSET, - offsetof(struct kvm_pt_regs, f8)); - DEFINE(VMM_PT_REGS_F9_OFFSET, - offsetof(struct kvm_pt_regs, f9)); - DEFINE(VMM_PT_REGS_F10_OFFSET, - offsetof(struct kvm_pt_regs, f10)); - DEFINE(VMM_PT_REGS_F11_OFFSET, - offsetof(struct kvm_pt_regs, f11)); - DEFINE(VMM_PT_REGS_R4_OFFSET, - offsetof(struct kvm_pt_regs, r4)); - DEFINE(VMM_PT_REGS_R5_OFFSET, - offsetof(struct kvm_pt_regs, r5)); - DEFINE(VMM_PT_REGS_R6_OFFSET, - offsetof(struct kvm_pt_regs, r6)); - DEFINE(VMM_PT_REGS_R7_OFFSET, - offsetof(struct kvm_pt_regs, r7)); - DEFINE(VMM_PT_REGS_EML_UNAT_OFFSET, - offsetof(struct kvm_pt_regs, eml_unat)); - DEFINE(VMM_VCPU_IIPA_OFFSET, - offsetof(struct kvm_vcpu, arch.cr_iipa)); - DEFINE(VMM_VCPU_OPCODE_OFFSET, - offsetof(struct kvm_vcpu, arch.opcode)); - DEFINE(VMM_VCPU_CAUSE_OFFSET, offsetof(struct kvm_vcpu, arch.cause)); - DEFINE(VMM_VCPU_ISR_OFFSET, - offsetof(struct kvm_vcpu, arch.cr_isr)); - DEFINE(VMM_PT_REGS_R16_SLOT, - (((offsetof(struct kvm_pt_regs, r16) - - sizeof(struct kvm_pt_regs)) >> 3) & 0x3f)); - DEFINE(VMM_VCPU_MODE_FLAGS_OFFSET, - offsetof(struct kvm_vcpu, arch.mode_flags)); - DEFINE(VMM_VCPU_GP_OFFSET, offsetof(struct kvm_vcpu, arch.__gp)); - BLANK(); - - DEFINE(VMM_VPD_BASE_OFFSET, offsetof(struct kvm_vcpu, arch.vpd)); - DEFINE(VMM_VPD_VIFS_OFFSET, offsetof(struct vpd, ifs)); - DEFINE(VMM_VLSAPIC_INSVC_BASE_OFFSET, - offsetof(struct kvm_vcpu, arch.insvc[0])); - DEFINE(VMM_VPD_VPTA_OFFSET, offsetof(struct vpd, pta)); - DEFINE(VMM_VPD_VPSR_OFFSET, offsetof(struct vpd, vpsr)); - - DEFINE(VMM_CTX_R4_OFFSET, offsetof(union context, gr[4])); - DEFINE(VMM_CTX_R5_OFFSET, offsetof(union context, gr[5])); - DEFINE(VMM_CTX_R12_OFFSET, offsetof(union context, gr[12])); - DEFINE(VMM_CTX_R13_OFFSET, offsetof(union context, gr[13])); - DEFINE(VMM_CTX_KR0_OFFSET, offsetof(union context, ar[0])); - DEFINE(VMM_CTX_KR1_OFFSET, offsetof(union context, ar[1])); - DEFINE(VMM_CTX_B0_OFFSET, offsetof(union context, br[0])); - DEFINE(VMM_CTX_B1_OFFSET, offsetof(union context, br[1])); - DEFINE(VMM_CTX_B2_OFFSET, offsetof(union context, br[2])); - DEFINE(VMM_CTX_RR0_OFFSET, offsetof(union context, rr[0])); - DEFINE(VMM_CTX_RSC_OFFSET, offsetof(union context, ar[16])); - DEFINE(VMM_CTX_BSPSTORE_OFFSET, offsetof(union context, ar[18])); - DEFINE(VMM_CTX_RNAT_OFFSET, offsetof(union context, ar[19])); - DEFINE(VMM_CTX_FCR_OFFSET, offsetof(union context, ar[21])); - DEFINE(VMM_CTX_EFLAG_OFFSET, offsetof(union context, ar[24])); - DEFINE(VMM_CTX_CFLG_OFFSET, offsetof(union context, ar[27])); - DEFINE(VMM_CTX_FSR_OFFSET, offsetof(union context, ar[28])); - DEFINE(VMM_CTX_FIR_OFFSET, offsetof(union context, ar[29])); - DEFINE(VMM_CTX_FDR_OFFSET, offsetof(union context, ar[30])); - DEFINE(VMM_CTX_UNAT_OFFSET, offsetof(union context, ar[36])); - DEFINE(VMM_CTX_FPSR_OFFSET, offsetof(union context, ar[40])); - DEFINE(VMM_CTX_PFS_OFFSET, offsetof(union context, ar[64])); - DEFINE(VMM_CTX_LC_OFFSET, offsetof(union context, ar[65])); - DEFINE(VMM_CTX_DCR_OFFSET, offsetof(union context, cr[0])); - DEFINE(VMM_CTX_IVA_OFFSET, offsetof(union context, cr[2])); - DEFINE(VMM_CTX_PTA_OFFSET, offsetof(union context, cr[8])); - DEFINE(VMM_CTX_IBR0_OFFSET, offsetof(union context, ibr[0])); - DEFINE(VMM_CTX_DBR0_OFFSET, offsetof(union context, dbr[0])); - DEFINE(VMM_CTX_F2_OFFSET, offsetof(union context, fr[2])); - DEFINE(VMM_CTX_F3_OFFSET, offsetof(union context, fr[3])); - DEFINE(VMM_CTX_F32_OFFSET, offsetof(union context, fr[32])); - DEFINE(VMM_CTX_F33_OFFSET, offsetof(union context, fr[33])); - DEFINE(VMM_CTX_PKR0_OFFSET, offsetof(union context, pkr[0])); - DEFINE(VMM_CTX_PSR_OFFSET, offsetof(union context, psr)); - BLANK(); -} diff --git a/arch/ia64/kvm/irq.h b/arch/ia64/kvm/irq.h deleted file mode 100644 index c0785a728271..000000000000 --- a/arch/ia64/kvm/irq.h +++ /dev/null @@ -1,33 +0,0 @@ -/* - * irq.h: In-kernel interrupt controller related definitions - * Copyright (c) 2008, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., 59 Temple - * Place - Suite 330, Boston, MA 02111-1307 USA. - * - * Authors: - * Xiantao Zhang <xiantao.zhang@intel.com> - * - */ - -#ifndef __IRQ_H -#define __IRQ_H - -#include "lapic.h" - -static inline int irqchip_in_kernel(struct kvm *kvm) -{ - return 1; -} - -#endif diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c deleted file mode 100644 index dbe46f43884d..000000000000 --- a/arch/ia64/kvm/kvm-ia64.c +++ /dev/null @@ -1,1942 +0,0 @@ -/* - * kvm_ia64.c: Basic KVM support On Itanium series processors - * - * - * Copyright (C) 2007, Intel Corporation. - * Xiantao Zhang (xiantao.zhang@intel.com) - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., 59 Temple - * Place - Suite 330, Boston, MA 02111-1307 USA. - * - */ - -#include <linux/module.h> -#include <linux/errno.h> -#include <linux/percpu.h> -#include <linux/fs.h> -#include <linux/slab.h> -#include <linux/smp.h> -#include <linux/kvm_host.h> -#include <linux/kvm.h> -#include <linux/bitops.h> -#include <linux/hrtimer.h> -#include <linux/uaccess.h> -#include <linux/iommu.h> -#include <linux/intel-iommu.h> -#include <linux/pci.h> - -#include <asm/pgtable.h> -#include <asm/gcc_intrin.h> -#include <asm/pal.h> -#include <asm/cacheflush.h> -#include <asm/div64.h> -#include <asm/tlb.h> -#include <asm/elf.h> -#include <asm/sn/addrs.h> -#include <asm/sn/clksupport.h> -#include <asm/sn/shub_mmr.h> - -#include "misc.h" -#include "vti.h" -#include "iodev.h" -#include "ioapic.h" -#include "lapic.h" -#include "irq.h" - -static unsigned long kvm_vmm_base; -static unsigned long kvm_vsa_base; -static unsigned long kvm_vm_buffer; -static unsigned long kvm_vm_buffer_size; -unsigned long kvm_vmm_gp; - -static long vp_env_info; - -static struct kvm_vmm_info *kvm_vmm_info; - -static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu); - -struct kvm_stats_debugfs_item debugfs_entries[] = { - { NULL } -}; - -static unsigned long kvm_get_itc(struct kvm_vcpu *vcpu) -{ -#if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC) - if (vcpu->kvm->arch.is_sn2) - return rtc_time(); - else -#endif - return ia64_getreg(_IA64_REG_AR_ITC); -} - -static void kvm_flush_icache(unsigned long start, unsigned long len) -{ - int l; - - for (l = 0; l < (len + 32); l += 32) - ia64_fc((void *)(start + l)); - - ia64_sync_i(); - ia64_srlz_i(); -} - -static void kvm_flush_tlb_all(void) -{ - unsigned long i, j, count0, count1, stride0, stride1, addr; - long flags; - - addr = local_cpu_data->ptce_base; - count0 = local_cpu_data->ptce_count[0]; - count1 = local_cpu_data->ptce_count[1]; - stride0 = local_cpu_data->ptce_stride[0]; - stride1 = local_cpu_data->ptce_stride[1]; - - local_irq_save(flags); - for (i = 0; i < count0; ++i) { - for (j = 0; j < count1; ++j) { - ia64_ptce(addr); - addr += stride1; - } - addr += stride0; - } - local_irq_restore(flags); - ia64_srlz_i(); /* srlz.i implies srlz.d */ -} - -long ia64_pal_vp_create(u64 *vpd, u64 *host_iva, u64 *opt_handler) -{ - struct ia64_pal_retval iprv; - - PAL_CALL_STK(iprv, PAL_VP_CREATE, (u64)vpd, (u64)host_iva, - (u64)opt_handler); - - return iprv.status; -} - -static DEFINE_SPINLOCK(vp_lock); - -int kvm_arch_hardware_enable(void) -{ - long status; - long tmp_base; - unsigned long pte; - unsigned long saved_psr; - int slot; - - pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), PAGE_KERNEL)); - local_irq_save(saved_psr); - slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT); - local_irq_restore(saved_psr); - if (slot < 0) - return -EINVAL; - - spin_lock(&vp_lock); - status = ia64_pal_vp_init_env(kvm_vsa_base ? - VP_INIT_ENV : VP_INIT_ENV_INITALIZE, - __pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, &tmp_base); - if (status != 0) { - spin_unlock(&vp_lock); - printk(KERN_WARNING"kvm: Failed to Enable VT Support!!!!\n"); - return -EINVAL; - } - - if (!kvm_vsa_base) { - kvm_vsa_base = tmp_base; - printk(KERN_INFO"kvm: kvm_vsa_base:0x%lx\n", kvm_vsa_base); - } - spin_unlock(&vp_lock); - ia64_ptr_entry(0x3, slot); - - return 0; -} - -void kvm_arch_hardware_disable(void) -{ - - long status; - int slot; - unsigned long pte; - unsigned long saved_psr; - unsigned long host_iva = ia64_getreg(_IA64_REG_CR_IVA); - - pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), - PAGE_KERNEL)); - - local_irq_save(saved_psr); - slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT); - local_irq_restore(saved_psr); - if (slot < 0) - return; - - status = ia64_pal_vp_exit_env(host_iva); - if (status) - printk(KERN_DEBUG"kvm: Failed to disable VT support! :%ld\n", - status); - ia64_ptr_entry(0x3, slot); -} - -void kvm_arch_check_processor_compat(void *rtn) -{ - *(int *)rtn = 0; -} - -int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) -{ - - int r; - - switch (ext) { - case KVM_CAP_IRQCHIP: - case KVM_CAP_MP_STATE: - case KVM_CAP_IRQ_INJECT_STATUS: - case KVM_CAP_IOAPIC_POLARITY_IGNORED: - r = 1; - break; - case KVM_CAP_COALESCED_MMIO: - r = KVM_COALESCED_MMIO_PAGE_OFFSET; - break; -#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT - case KVM_CAP_IOMMU: - r = iommu_present(&pci_bus_type); - break; -#endif - default: - r = 0; - } - return r; - -} - -static int handle_vm_error(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) -{ - kvm_run->exit_reason = KVM_EXIT_UNKNOWN; - kvm_run->hw.hardware_exit_reason = 1; - return 0; -} - -static int handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) -{ - struct kvm_mmio_req *p; - struct kvm_io_device *mmio_dev; - int r; - - p = kvm_get_vcpu_ioreq(vcpu); - - if ((p->addr & PAGE_MASK) == IOAPIC_DEFAULT_BASE_ADDRESS) - goto mmio; - vcpu->mmio_needed = 1; - vcpu->mmio_fragments[0].gpa = kvm_run->mmio.phys_addr = p->addr; - vcpu->mmio_fragments[0].len = kvm_run->mmio.len = p->size; - vcpu->mmio_is_write = kvm_run->mmio.is_write = !p->dir; - - if (vcpu->mmio_is_write) - memcpy(vcpu->arch.mmio_data, &p->data, p->size); - memcpy(kvm_run->mmio.data, &p->data, p->size); - kvm_run->exit_reason = KVM_EXIT_MMIO; - return 0; -mmio: - if (p->dir) - r = kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, p->addr, - p->size, &p->data); - else - r = kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, p->addr, - p->size, &p->data); - if (r) - printk(KERN_ERR"kvm: No iodevice found! addr:%lx\n", p->addr); - p->state = STATE_IORESP_READY; - - return 1; -} - -static int handle_pal_call(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) -{ - struct exit_ctl_data *p; - - p = kvm_get_exit_data(vcpu); - - if (p->exit_reason == EXIT_REASON_PAL_CALL) - return kvm_pal_emul(vcpu, kvm_run); - else { - kvm_run->exit_reason = KVM_EXIT_UNKNOWN; - kvm_run->hw.hardware_exit_reason = 2; - return 0; - } -} - -static int handle_sal_call(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) -{ - struct exit_ctl_data *p; - - p = kvm_get_exit_data(vcpu); - - if (p->exit_reason == EXIT_REASON_SAL_CALL) { - kvm_sal_emul(vcpu); - return 1; - } else { - kvm_run->exit_reason = KVM_EXIT_UNKNOWN; - kvm_run->hw.hardware_exit_reason = 3; - return 0; - } - -} - -static int __apic_accept_irq(struct kvm_vcpu *vcpu, uint64_t vector) -{ - struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); - - if (!test_and_set_bit(vector, &vpd->irr[0])) { - vcpu->arch.irq_new_pending = 1; - kvm_vcpu_kick(vcpu); - return 1; - } - return 0; -} - -/* - * offset: address offset to IPI space. - * value: deliver value. - */ -static void vcpu_deliver_ipi(struct kvm_vcpu *vcpu, uint64_t dm, - uint64_t vector) -{ - switch (dm) { - case SAPIC_FIXED: - break; - case SAPIC_NMI: - vector = 2; - break; - case SAPIC_EXTINT: - vector = 0; - break; - case SAPIC_INIT: - case SAPIC_PMI: - default: - printk(KERN_ERR"kvm: Unimplemented Deliver reserved IPI!\n"); - return; - } - __apic_accept_irq(vcpu, vector); -} - -static struct kvm_vcpu *lid_to_vcpu(struct kvm *kvm, unsigned long id, - unsigned long eid) -{ - union ia64_lid lid; - int i; - struct kvm_vcpu *vcpu; - - kvm_for_each_vcpu(i, vcpu, kvm) { - lid.val = VCPU_LID(vcpu); - if (lid.id == id && lid.eid == eid) - return vcpu; - } - - return NULL; -} - -static int handle_ipi(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) -{ - struct exit_ctl_data *p = kvm_get_exit_data(vcpu); - struct kvm_vcpu *target_vcpu; - struct kvm_pt_regs *regs; - union ia64_ipi_a addr = p->u.ipi_data.addr; - union ia64_ipi_d data = p->u.ipi_data.data; - - target_vcpu = lid_to_vcpu(vcpu->kvm, addr.id, addr.eid); - if (!target_vcpu) - return handle_vm_error(vcpu, kvm_run); - - if (!target_vcpu->arch.launched) { - regs = vcpu_regs(target_vcpu); - - regs->cr_iip = vcpu->kvm->arch.rdv_sal_data.boot_ip; - regs->r1 = vcpu->kvm->arch.rdv_sal_data.boot_gp; - - target_vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; - if (waitqueue_active(&target_vcpu->wq)) - wake_up_interruptible(&target_vcpu->wq); - } else { - vcpu_deliver_ipi(target_vcpu, data.dm, data.vector); - if (target_vcpu != vcpu) - kvm_vcpu_kick(target_vcpu); - } - - return 1; -} - -struct call_data { - struct kvm_ptc_g ptc_g_data; - struct kvm_vcpu *vcpu; -}; - -static void vcpu_global_purge(void *info) -{ - struct call_data *p = (struct call_data *)info; - struct kvm_vcpu *vcpu = p->vcpu; - - if (test_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests)) - return; - - set_bit(KVM_REQ_PTC_G, &vcpu->requests); - if (vcpu->arch.ptc_g_count < MAX_PTC_G_NUM) { - vcpu->arch.ptc_g_data[vcpu->arch.ptc_g_count++] = - p->ptc_g_data; - } else { - clear_bit(KVM_REQ_PTC_G, &vcpu->requests); - vcpu->arch.ptc_g_count = 0; - set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests); - } -} - -static int handle_global_purge(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) -{ - struct exit_ctl_data *p = kvm_get_exit_data(vcpu); - struct kvm *kvm = vcpu->kvm; - struct call_data call_data; - int i; - struct kvm_vcpu *vcpui; - - call_data.ptc_g_data = p->u.ptc_g_data; - - kvm_for_each_vcpu(i, vcpui, kvm) { - if (vcpui->arch.mp_state == KVM_MP_STATE_UNINITIALIZED || - vcpu == vcpui) - continue; - - if (waitqueue_active(&vcpui->wq)) - wake_up_interruptible(&vcpui->wq); - - if (vcpui->cpu != -1) { - call_data.vcpu = vcpui; - smp_call_function_single(vcpui->cpu, - vcpu_global_purge, &call_data, 1); - } else - printk(KERN_WARNING"kvm: Uninit vcpu received ipi!\n"); - - } - return 1; -} - -static int handle_switch_rr6(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) -{ - return 1; -} - -static int kvm_sn2_setup_mappings(struct kvm_vcpu *vcpu) -{ - unsigned long pte, rtc_phys_addr, map_addr; - int slot; - - map_addr = KVM_VMM_BASE + (1UL << KVM_VMM_SHIFT); - rtc_phys_addr = LOCAL_MMR_OFFSET | SH_RTC; - pte = pte_val(mk_pte_phys(rtc_phys_addr, PAGE_KERNEL_UC)); - slot = ia64_itr_entry(0x3, map_addr, pte, PAGE_SHIFT); - vcpu->arch.sn_rtc_tr_slot = slot; - if (slot < 0) { - printk(KERN_ERR "Mayday mayday! RTC mapping failed!\n"); - slot = 0; - } - return slot; -} - -int kvm_emulate_halt(struct kvm_vcpu *vcpu) -{ - - ktime_t kt; - long itc_diff; - unsigned long vcpu_now_itc; - unsigned long expires; - struct hrtimer *p_ht = &vcpu->arch.hlt_timer; - unsigned long cyc_per_usec = local_cpu_data->cyc_per_usec; - struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); - - if (irqchip_in_kernel(vcpu->kvm)) { - - vcpu_now_itc = kvm_get_itc(vcpu) + vcpu->arch.itc_offset; - - if (time_after(vcpu_now_itc, vpd->itm)) { - vcpu->arch.timer_check = 1; - return 1; - } - itc_diff = vpd->itm - vcpu_now_itc; - if (itc_diff < 0) - itc_diff = -itc_diff; - - expires = div64_u64(itc_diff, cyc_per_usec); - kt = ktime_set(0, 1000 * expires); - - vcpu->arch.ht_active = 1; - hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS); - - vcpu->arch.mp_state = KVM_MP_STATE_HALTED; - kvm_vcpu_block(vcpu); - hrtimer_cancel(p_ht); - vcpu->arch.ht_active = 0; - - if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests) || - kvm_cpu_has_pending_timer(vcpu)) - if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) - vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; - - if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE) - return -EINTR; - return 1; - } else { - printk(KERN_ERR"kvm: Unsupported userspace halt!"); - return 0; - } -} - -static int handle_vm_shutdown(struct kvm_vcpu *vcpu, - struct kvm_run *kvm_run) -{ - kvm_run->exit_reason = KVM_EXIT_SHUTDOWN; - return 0; -} - -static int handle_external_interrupt(struct kvm_vcpu *vcpu, - struct kvm_run *kvm_run) -{ - return 1; -} - -static int handle_vcpu_debug(struct kvm_vcpu *vcpu, - struct kvm_run *kvm_run) -{ - printk("VMM: %s", vcpu->arch.log_buf); - return 1; -} - -static int (*kvm_vti_exit_handlers[])(struct kvm_vcpu *vcpu, - struct kvm_run *kvm_run) = { - [EXIT_REASON_VM_PANIC] = handle_vm_error, - [EXIT_REASON_MMIO_INSTRUCTION] = handle_mmio, - [EXIT_REASON_PAL_CALL] = handle_pal_call, - [EXIT_REASON_SAL_CALL] = handle_sal_call, - [EXIT_REASON_SWITCH_RR6] = handle_switch_rr6, - [EXIT_REASON_VM_DESTROY] = handle_vm_shutdown, - [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt, - [EXIT_REASON_IPI] = handle_ipi, - [EXIT_REASON_PTC_G] = handle_global_purge, - [EXIT_REASON_DEBUG] = handle_vcpu_debug, - -}; - -static const int kvm_vti_max_exit_handlers = - sizeof(kvm_vti_exit_handlers)/sizeof(*kvm_vti_exit_handlers); - -static uint32_t kvm_get_exit_reason(struct kvm_vcpu *vcpu) -{ - struct exit_ctl_data *p_exit_data; - - p_exit_data = kvm_get_exit_data(vcpu); - return p_exit_data->exit_reason; -} - -/* - * The guest has exited. See if we can fix it or if we need userspace - * assistance. - */ -static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) -{ - u32 exit_reason = kvm_get_exit_reason(vcpu); - vcpu->arch.last_exit = exit_reason; - - if (exit_reason < kvm_vti_max_exit_handlers - && kvm_vti_exit_handlers[exit_reason]) - return kvm_vti_exit_handlers[exit_reason](vcpu, kvm_run); - else { - kvm_run->exit_reason = KVM_EXIT_UNKNOWN; - kvm_run->hw.hardware_exit_reason = exit_reason; - } - return 0; -} - -static inline void vti_set_rr6(unsigned long rr6) -{ - ia64_set_rr(RR6, rr6); - ia64_srlz_i(); -} - -static int kvm_insert_vmm_mapping(struct kvm_vcpu *vcpu) -{ - unsigned long pte; - struct kvm *kvm = vcpu->kvm; - int r; - - /*Insert a pair of tr to map vmm*/ - pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), PAGE_KERNEL)); - r = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT); - if (r < 0) - goto out; - vcpu->arch.vmm_tr_slot = r; - /*Insert a pairt of tr to map data of vm*/ - pte = pte_val(mk_pte_phys(__pa(kvm->arch.vm_base), PAGE_KERNEL)); - r = ia64_itr_entry(0x3, KVM_VM_DATA_BASE, - pte, KVM_VM_DATA_SHIFT); - if (r < 0) - goto out; - vcpu->arch.vm_tr_slot = r; - -#if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC) - if (kvm->arch.is_sn2) { - r = kvm_sn2_setup_mappings(vcpu); - if (r < 0) - goto out; - } -#endif - - r = 0; -out: - return r; -} - -static void kvm_purge_vmm_mapping(struct kvm_vcpu *vcpu) -{ - struct kvm *kvm = vcpu->kvm; - ia64_ptr_entry(0x3, vcpu->arch.vmm_tr_slot); - ia64_ptr_entry(0x3, vcpu->arch.vm_tr_slot); -#if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC) - if (kvm->arch.is_sn2) - ia64_ptr_entry(0x3, vcpu->arch.sn_rtc_tr_slot); -#endif -} - -static int kvm_vcpu_pre_transition(struct kvm_vcpu *vcpu) -{ - unsigned long psr; - int r; - int cpu = smp_processor_id(); - - if (vcpu->arch.last_run_cpu != cpu || - per_cpu(last_vcpu, cpu) != vcpu) { - per_cpu(last_vcpu, cpu) = vcpu; - vcpu->arch.last_run_cpu = cpu; - kvm_flush_tlb_all(); - } - - vcpu->arch.host_rr6 = ia64_get_rr(RR6); - vti_set_rr6(vcpu->arch.vmm_rr); - local_irq_save(psr); - r = kvm_insert_vmm_mapping(vcpu); - local_irq_restore(psr); - return r; -} - -static void kvm_vcpu_post_transition(struct kvm_vcpu *vcpu) -{ - kvm_purge_vmm_mapping(vcpu); - vti_set_rr6(vcpu->arch.host_rr6); -} - -static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) -{ - union context *host_ctx, *guest_ctx; - int r, idx; - - idx = srcu_read_lock(&vcpu->kvm->srcu); - -again: - if (signal_pending(current)) { - r = -EINTR; - kvm_run->exit_reason = KVM_EXIT_INTR; - goto out; - } - - preempt_disable(); - local_irq_disable(); - - /*Get host and guest context with guest address space.*/ - host_ctx = kvm_get_host_context(vcpu); - guest_ctx = kvm_get_guest_context(vcpu); - - clear_bit(KVM_REQ_KICK, &vcpu->requests); - - r = kvm_vcpu_pre_transition(vcpu); - if (r < 0) - goto vcpu_run_fail; - - srcu_read_unlock(&vcpu->kvm->srcu, idx); - vcpu->mode = IN_GUEST_MODE; - kvm_guest_enter(); - - /* - * Transition to the guest - */ - kvm_vmm_info->tramp_entry(host_ctx, guest_ctx); - - kvm_vcpu_post_transition(vcpu); - - vcpu->arch.launched = 1; - set_bit(KVM_REQ_KICK, &vcpu->requests); - local_irq_enable(); - - /* - * We must have an instruction between local_irq_enable() and - * kvm_guest_exit(), so the timer interrupt isn't delayed by - * the interrupt shadow. The stat.exits increment will do nicely. - * But we need to prevent reordering, hence this barrier(): - */ - barrier(); - kvm_guest_exit(); - vcpu->mode = OUTSIDE_GUEST_MODE; - preempt_enable(); - - idx = srcu_read_lock(&vcpu->kvm->srcu); - - r = kvm_handle_exit(kvm_run, vcpu); - - if (r > 0) { - if (!need_resched()) - goto again; - } - -out: - srcu_read_unlock(&vcpu->kvm->srcu, idx); - if (r > 0) { - cond_resched(); - idx = srcu_read_lock(&vcpu->kvm->srcu); - goto again; - } - - return r; - -vcpu_run_fail: - local_irq_enable(); - preempt_enable(); - kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; - goto out; -} - -static void kvm_set_mmio_data(struct kvm_vcpu *vcpu) -{ - struct kvm_mmio_req *p = kvm_get_vcpu_ioreq(vcpu); - - if (!vcpu->mmio_is_write) - memcpy(&p->data, vcpu->arch.mmio_data, 8); - p->state = STATE_IORESP_READY; -} - -int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) -{ - int r; - sigset_t sigsaved; - - if (vcpu->sigset_active) - sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); - - if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { - kvm_vcpu_block(vcpu); - clear_bit(KVM_REQ_UNHALT, &vcpu->requests); - r = -EAGAIN; - goto out; - } - - if (vcpu->mmio_needed) { - memcpy(vcpu->arch.mmio_data, kvm_run->mmio.data, 8); - kvm_set_mmio_data(vcpu); - vcpu->mmio_read_completed = 1; - vcpu->mmio_needed = 0; - } - r = __vcpu_run(vcpu, kvm_run); -out: - if (vcpu->sigset_active) - sigprocmask(SIG_SETMASK, &sigsaved, NULL); - - return r; -} - -struct kvm *kvm_arch_alloc_vm(void) -{ - - struct kvm *kvm; - uint64_t vm_base; - - BUG_ON(sizeof(struct kvm) > KVM_VM_STRUCT_SIZE); - - vm_base = __get_free_pages(GFP_KERNEL, get_order(KVM_VM_DATA_SIZE)); - - if (!vm_base) - return NULL; - - memset((void *)vm_base, 0, KVM_VM_DATA_SIZE); - kvm = (struct kvm *)(vm_base + - offsetof(struct kvm_vm_data, kvm_vm_struct)); - kvm->arch.vm_base = vm_base; - printk(KERN_DEBUG"kvm: vm's data area:0x%lx\n", vm_base); - - return kvm; -} - -struct kvm_ia64_io_range { - unsigned long start; - unsigned long size; - unsigned long type; -}; - -static const struct kvm_ia64_io_range io_ranges[] = { - {VGA_IO_START, VGA_IO_SIZE, GPFN_FRAME_BUFFER}, - {MMIO_START, MMIO_SIZE, GPFN_LOW_MMIO}, - {LEGACY_IO_START, LEGACY_IO_SIZE, GPFN_LEGACY_IO}, - {IO_SAPIC_START, IO_SAPIC_SIZE, GPFN_IOSAPIC}, - {PIB_START, PIB_SIZE, GPFN_PIB}, -}; - -static void kvm_build_io_pmt(struct kvm *kvm) -{ - unsigned long i, j; - - /* Mark I/O ranges */ - for (i = 0; i < (sizeof(io_ranges) / sizeof(struct kvm_io_range)); - i++) { - for (j = io_ranges[i].start; - j < io_ranges[i].start + io_ranges[i].size; - j += PAGE_SIZE) - kvm_set_pmt_entry(kvm, j >> PAGE_SHIFT, - io_ranges[i].type, 0); - } - -} - -/*Use unused rids to virtualize guest rid.*/ -#define GUEST_PHYSICAL_RR0 0x1739 -#define GUEST_PHYSICAL_RR4 0x2739 -#define VMM_INIT_RR 0x1660 - -int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) -{ - BUG_ON(!kvm); - - if (type) - return -EINVAL; - - kvm->arch.is_sn2 = ia64_platform_is("sn2"); - - kvm->arch.metaphysical_rr0 = GUEST_PHYSICAL_RR0; - kvm->arch.metaphysical_rr4 = GUEST_PHYSICAL_RR4; - kvm->arch.vmm_init_rr = VMM_INIT_RR; - - /* - *Fill P2M entries for MMIO/IO ranges - */ - kvm_build_io_pmt(kvm); - - INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); - - /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */ - set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap); - - return 0; -} - -static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, - struct kvm_irqchip *chip) -{ - int r; - - r = 0; - switch (chip->chip_id) { - case KVM_IRQCHIP_IOAPIC: - r = kvm_get_ioapic(kvm, &chip->chip.ioapic); - break; - default: - r = -EINVAL; - break; - } - return r; -} - -static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) -{ - int r; - - r = 0; - switch (chip->chip_id) { - case KVM_IRQCHIP_IOAPIC: - r = kvm_set_ioapic(kvm, &chip->chip.ioapic); - break; - default: - r = -EINVAL; - break; - } - return r; -} - -#define RESTORE_REGS(_x) vcpu->arch._x = regs->_x - -int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) -{ - struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); - int i; - - for (i = 0; i < 16; i++) { - vpd->vgr[i] = regs->vpd.vgr[i]; - vpd->vbgr[i] = regs->vpd.vbgr[i]; - } - for (i = 0; i < 128; i++) - vpd->vcr[i] = regs->vpd.vcr[i]; - vpd->vhpi = regs->vpd.vhpi; - vpd->vnat = regs->vpd.vnat; - vpd->vbnat = regs->vpd.vbnat; - vpd->vpsr = regs->vpd.vpsr; - - vpd->vpr = regs->vpd.vpr; - - memcpy(&vcpu->arch.guest, ®s->saved_guest, sizeof(union context)); - - RESTORE_REGS(mp_state); - RESTORE_REGS(vmm_rr); - memcpy(vcpu->arch.itrs, regs->itrs, sizeof(struct thash_data) * NITRS); - memcpy(vcpu->arch.dtrs, regs->dtrs, sizeof(struct thash_data) * NDTRS); - RESTORE_REGS(itr_regions); - RESTORE_REGS(dtr_regions); - RESTORE_REGS(tc_regions); - RESTORE_REGS(irq_check); - RESTORE_REGS(itc_check); - RESTORE_REGS(timer_check); - RESTORE_REGS(timer_pending); - RESTORE_REGS(last_itc); - for (i = 0; i < 8; i++) { - vcpu->arch.vrr[i] = regs->vrr[i]; - vcpu->arch.ibr[i] = regs->ibr[i]; - vcpu->arch.dbr[i] = regs->dbr[i]; - } - for (i = 0; i < 4; i++) - vcpu->arch.insvc[i] = regs->insvc[i]; - RESTORE_REGS(xtp); - RESTORE_REGS(metaphysical_rr0); - RESTORE_REGS(metaphysical_rr4); - RESTORE_REGS(metaphysical_saved_rr0); - RESTORE_REGS(metaphysical_saved_rr4); - RESTORE_REGS(fp_psr); - RESTORE_REGS(saved_gp); - - vcpu->arch.irq_new_pending = 1; - vcpu->arch.itc_offset = regs->saved_itc - kvm_get_itc(vcpu); - set_bit(KVM_REQ_RESUME, &vcpu->requests); - - return 0; -} - -int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event, - bool line_status) -{ - if (!irqchip_in_kernel(kvm)) - return -ENXIO; - - irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, - irq_event->irq, irq_event->level, - line_status); - return 0; -} - -long kvm_arch_vm_ioctl(struct file *filp, - unsigned int ioctl, unsigned long arg) -{ - struct kvm *kvm = filp->private_data; - void __user *argp = (void __user *)arg; - int r = -ENOTTY; - - switch (ioctl) { - case KVM_CREATE_IRQCHIP: - r = -EFAULT; - r = kvm_ioapic_init(kvm); - if (r) - goto out; - r = kvm_setup_default_irq_routing(kvm); - if (r) { - mutex_lock(&kvm->slots_lock); - kvm_ioapic_destroy(kvm); - mutex_unlock(&kvm->slots_lock); - goto out; - } - break; - case KVM_GET_IRQCHIP: { - /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ - struct kvm_irqchip chip; - - r = -EFAULT; - if (copy_from_user(&chip, argp, sizeof chip)) - goto out; - r = -ENXIO; - if (!irqchip_in_kernel(kvm)) - goto out; - r = kvm_vm_ioctl_get_irqchip(kvm, &chip); - if (r) - goto out; - r = -EFAULT; - if (copy_to_user(argp, &chip, sizeof chip)) - goto out; - r = 0; - break; - } - case KVM_SET_IRQCHIP: { - /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ - struct kvm_irqchip chip; - - r = -EFAULT; - if (copy_from_user(&chip, argp, sizeof chip)) - goto out; - r = -ENXIO; - if (!irqchip_in_kernel(kvm)) - goto out; - r = kvm_vm_ioctl_set_irqchip(kvm, &chip); - if (r) - goto out; - r = 0; - break; - } - default: - ; - } -out: - return r; -} - -int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, - struct kvm_sregs *sregs) -{ - return -EINVAL; -} - -int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, - struct kvm_sregs *sregs) -{ - return -EINVAL; - -} -int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, - struct kvm_translation *tr) -{ - - return -EINVAL; -} - -static int kvm_alloc_vmm_area(void) -{ - if (!kvm_vmm_base && (kvm_vm_buffer_size < KVM_VM_BUFFER_SIZE)) { - kvm_vmm_base = __get_free_pages(GFP_KERNEL, - get_order(KVM_VMM_SIZE)); - if (!kvm_vmm_base) - return -ENOMEM; - - memset((void *)kvm_vmm_base, 0, KVM_VMM_SIZE); - kvm_vm_buffer = kvm_vmm_base + VMM_SIZE; - - printk(KERN_DEBUG"kvm:VMM's Base Addr:0x%lx, vm_buffer:0x%lx\n", - kvm_vmm_base, kvm_vm_buffer); - } - - return 0; -} - -static void kvm_free_vmm_area(void) -{ - if (kvm_vmm_base) { - /*Zero this area before free to avoid bits leak!!*/ - memset((void *)kvm_vmm_base, 0, KVM_VMM_SIZE); - free_pages(kvm_vmm_base, get_order(KVM_VMM_SIZE)); - kvm_vmm_base = 0; - kvm_vm_buffer = 0; - kvm_vsa_base = 0; - } -} - -static int vti_init_vpd(struct kvm_vcpu *vcpu) -{ - int i; - union cpuid3_t cpuid3; - struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); - - if (IS_ERR(vpd)) - return PTR_ERR(vpd); - - /* CPUID init */ - for (i = 0; i < 5; i++) - vpd->vcpuid[i] = ia64_get_cpuid(i); - - /* Limit the CPUID number to 5 */ - cpuid3.value = vpd->vcpuid[3]; - cpuid3.number = 4; /* 5 - 1 */ - vpd->vcpuid[3] = cpuid3.value; - - /*Set vac and vdc fields*/ - vpd->vac.a_from_int_cr = 1; - vpd->vac.a_to_int_cr = 1; - vpd->vac.a_from_psr = 1; - vpd->vac.a_from_cpuid = 1; - vpd->vac.a_cover = 1; - vpd->vac.a_bsw = 1; - vpd->vac.a_int = 1; - vpd->vdc.d_vmsw = 1; - - /*Set virtual buffer*/ - vpd->virt_env_vaddr = KVM_VM_BUFFER_BASE; - - return 0; -} - -static int vti_create_vp(struct kvm_vcpu *vcpu) -{ - long ret; - struct vpd *vpd = vcpu->arch.vpd; - unsigned long vmm_ivt; - - vmm_ivt = kvm_vmm_info->vmm_ivt; - - printk(KERN_DEBUG "kvm: vcpu:%p,ivt: 0x%lx\n", vcpu, vmm_ivt); - - ret = ia64_pal_vp_create((u64 *)vpd, (u64 *)vmm_ivt, 0); - - if (ret) { - printk(KERN_ERR"kvm: ia64_pal_vp_create failed!\n"); - return -EINVAL; - } - return 0; -} - -static void init_ptce_info(struct kvm_vcpu *vcpu) -{ - ia64_ptce_info_t ptce = {0}; - - ia64_get_ptce(&ptce); - vcpu->arch.ptce_base = ptce.base; - vcpu->arch.ptce_count[0] = ptce.count[0]; - vcpu->arch.ptce_count[1] = ptce.count[1]; - vcpu->arch.ptce_stride[0] = ptce.stride[0]; - vcpu->arch.ptce_stride[1] = ptce.stride[1]; -} - -static void kvm_migrate_hlt_timer(struct kvm_vcpu *vcpu) -{ - struct hrtimer *p_ht = &vcpu->arch.hlt_timer; - - if (hrtimer_cancel(p_ht)) - hrtimer_start_expires(p_ht, HRTIMER_MODE_ABS); -} - -static enum hrtimer_restart hlt_timer_fn(struct hrtimer *data) -{ - struct kvm_vcpu *vcpu; - wait_queue_head_t *q; - - vcpu = container_of(data, struct kvm_vcpu, arch.hlt_timer); - q = &vcpu->wq; - - if (vcpu->arch.mp_state != KVM_MP_STATE_HALTED) - goto out; - - if (waitqueue_active(q)) - wake_up_interruptible(q); - -out: - vcpu->arch.timer_fired = 1; - vcpu->arch.timer_check = 1; - return HRTIMER_NORESTART; -} - -#define PALE_RESET_ENTRY 0x80000000ffffffb0UL - -bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) -{ - return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL); -} - -int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) -{ - struct kvm_vcpu *v; - int r; - int i; - long itc_offset; - struct kvm *kvm = vcpu->kvm; - struct kvm_pt_regs *regs = vcpu_regs(vcpu); - - union context *p_ctx = &vcpu->arch.guest; - struct kvm_vcpu *vmm_vcpu = to_guest(vcpu->kvm, vcpu); - - /*Init vcpu context for first run.*/ - if (IS_ERR(vmm_vcpu)) - return PTR_ERR(vmm_vcpu); - - if (kvm_vcpu_is_bsp(vcpu)) { - vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; - - /*Set entry address for first run.*/ - regs->cr_iip = PALE_RESET_ENTRY; - - /*Initialize itc offset for vcpus*/ - itc_offset = 0UL - kvm_get_itc(vcpu); - for (i = 0; i < KVM_MAX_VCPUS; i++) { - v = (struct kvm_vcpu *)((char *)vcpu + - sizeof(struct kvm_vcpu_data) * i); - v->arch.itc_offset = itc_offset; - v->arch.last_itc = 0; - } - } else - vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED; - - r = -ENOMEM; - vcpu->arch.apic = kzalloc(sizeof(struct kvm_lapic), GFP_KERNEL); - if (!vcpu->arch.apic) - goto out; - vcpu->arch.apic->vcpu = vcpu; - - p_ctx->gr[1] = 0; - p_ctx->gr[12] = (unsigned long)((char *)vmm_vcpu + KVM_STK_OFFSET); - p_ctx->gr[13] = (unsigned long)vmm_vcpu; - p_ctx->psr = 0x1008522000UL; - p_ctx->ar[40] = FPSR_DEFAULT; /*fpsr*/ - p_ctx->caller_unat = 0; - p_ctx->pr = 0x0; - p_ctx->ar[36] = 0x0; /*unat*/ - p_ctx->ar[19] = 0x0; /*rnat*/ - p_ctx->ar[18] = (unsigned long)vmm_vcpu + - ((sizeof(struct kvm_vcpu)+15) & ~15); - p_ctx->ar[64] = 0x0; /*pfs*/ - p_ctx->cr[0] = 0x7e04UL; - p_ctx->cr[2] = (unsigned long)kvm_vmm_info->vmm_ivt; - p_ctx->cr[8] = 0x3c; - - /*Initialize region register*/ - p_ctx->rr[0] = 0x30; - p_ctx->rr[1] = 0x30; - p_ctx->rr[2] = 0x30; - p_ctx->rr[3] = 0x30; - p_ctx->rr[4] = 0x30; - p_ctx->rr[5] = 0x30; - p_ctx->rr[7] = 0x30; - - /*Initialize branch register 0*/ - p_ctx->br[0] = *(unsigned long *)kvm_vmm_info->vmm_entry; - - vcpu->arch.vmm_rr = kvm->arch.vmm_init_rr; - vcpu->arch.metaphysical_rr0 = kvm->arch.metaphysical_rr0; - vcpu->arch.metaphysical_rr4 = kvm->arch.metaphysical_rr4; - - hrtimer_init(&vcpu->arch.hlt_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); - vcpu->arch.hlt_timer.function = hlt_timer_fn; - - vcpu->arch.last_run_cpu = -1; - vcpu->arch.vpd = (struct vpd *)VPD_BASE(vcpu->vcpu_id); - vcpu->arch.vsa_base = kvm_vsa_base; - vcpu->arch.__gp = kvm_vmm_gp; - vcpu->arch.dirty_log_lock_pa = __pa(&kvm->arch.dirty_log_lock); - vcpu->arch.vhpt.hash = (struct thash_data *)VHPT_BASE(vcpu->vcpu_id); - vcpu->arch.vtlb.hash = (struct thash_data *)VTLB_BASE(vcpu->vcpu_id); - init_ptce_info(vcpu); - - r = 0; -out: - return r; -} - -static int vti_vcpu_setup(struct kvm_vcpu *vcpu, int id) -{ - unsigned long psr; - int r; - - local_irq_save(psr); - r = kvm_insert_vmm_mapping(vcpu); - local_irq_restore(psr); - if (r) - goto fail; - r = kvm_vcpu_init(vcpu, vcpu->kvm, id); - if (r) - goto fail; - - r = vti_init_vpd(vcpu); - if (r) { - printk(KERN_DEBUG"kvm: vpd init error!!\n"); - goto uninit; - } - - r = vti_create_vp(vcpu); - if (r) - goto uninit; - - kvm_purge_vmm_mapping(vcpu); - - return 0; -uninit: - kvm_vcpu_uninit(vcpu); -fail: - return r; -} - -struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, - unsigned int id) -{ - struct kvm_vcpu *vcpu; - unsigned long vm_base = kvm->arch.vm_base; - int r; - int cpu; - - BUG_ON(sizeof(struct kvm_vcpu) > VCPU_STRUCT_SIZE/2); - - r = -EINVAL; - if (id >= KVM_MAX_VCPUS) { - printk(KERN_ERR"kvm: Can't configure vcpus > %ld", - KVM_MAX_VCPUS); - goto fail; - } - - r = -ENOMEM; - if (!vm_base) { - printk(KERN_ERR"kvm: Create vcpu[%d] error!\n", id); - goto fail; - } - vcpu = (struct kvm_vcpu *)(vm_base + offsetof(struct kvm_vm_data, - vcpu_data[id].vcpu_struct)); - vcpu->kvm = kvm; - - cpu = get_cpu(); - r = vti_vcpu_setup(vcpu, id); - put_cpu(); - - if (r) { - printk(KERN_DEBUG"kvm: vcpu_setup error!!\n"); - goto fail; - } - - return vcpu; -fail: - return ERR_PTR(r); -} - -int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) -{ - return 0; -} - -int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) -{ - return 0; -} - -int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) -{ - return -EINVAL; -} - -int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) -{ - return -EINVAL; -} - -int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, - struct kvm_guest_debug *dbg) -{ - return -EINVAL; -} - -void kvm_arch_free_vm(struct kvm *kvm) -{ - unsigned long vm_base = kvm->arch.vm_base; - - if (vm_base) { - memset((void *)vm_base, 0, KVM_VM_DATA_SIZE); - free_pages(vm_base, get_order(KVM_VM_DATA_SIZE)); - } - -} - -static void kvm_release_vm_pages(struct kvm *kvm) -{ - struct kvm_memslots *slots; - struct kvm_memory_slot *memslot; - int j; - - slots = kvm_memslots(kvm); - kvm_for_each_memslot(memslot, slots) { - for (j = 0; j < memslot->npages; j++) { - if (memslot->rmap[j]) - put_page((struct page *)memslot->rmap[j]); - } - } -} - -void kvm_arch_destroy_vm(struct kvm *kvm) -{ - kvm_iommu_unmap_guest(kvm); - kvm_free_all_assigned_devices(kvm); - kfree(kvm->arch.vioapic); - kvm_release_vm_pages(kvm); -} - -void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) -{ - if (cpu != vcpu->cpu) { - vcpu->cpu = cpu; - if (vcpu->arch.ht_active) - kvm_migrate_hlt_timer(vcpu); - } -} - -#define SAVE_REGS(_x) regs->_x = vcpu->arch._x - -int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) -{ - struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); - int i; - - vcpu_load(vcpu); - - for (i = 0; i < 16; i++) { - regs->vpd.vgr[i] = vpd->vgr[i]; - regs->vpd.vbgr[i] = vpd->vbgr[i]; - } - for (i = 0; i < 128; i++) - regs->vpd.vcr[i] = vpd->vcr[i]; - regs->vpd.vhpi = vpd->vhpi; - regs->vpd.vnat = vpd->vnat; - regs->vpd.vbnat = vpd->vbnat; - regs->vpd.vpsr = vpd->vpsr; - regs->vpd.vpr = vpd->vpr; - - memcpy(®s->saved_guest, &vcpu->arch.guest, sizeof(union context)); - - SAVE_REGS(mp_state); - SAVE_REGS(vmm_rr); - memcpy(regs->itrs, vcpu->arch.itrs, sizeof(struct thash_data) * NITRS); - memcpy(regs->dtrs, vcpu->arch.dtrs, sizeof(struct thash_data) * NDTRS); - SAVE_REGS(itr_regions); - SAVE_REGS(dtr_regions); - SAVE_REGS(tc_regions); - SAVE_REGS(irq_check); - SAVE_REGS(itc_check); - SAVE_REGS(timer_check); - SAVE_REGS(timer_pending); - SAVE_REGS(last_itc); - for (i = 0; i < 8; i++) { - regs->vrr[i] = vcpu->arch.vrr[i]; - regs->ibr[i] = vcpu->arch.ibr[i]; - regs->dbr[i] = vcpu->arch.dbr[i]; - } - for (i = 0; i < 4; i++) - regs->insvc[i] = vcpu->arch.insvc[i]; - regs->saved_itc = vcpu->arch.itc_offset + kvm_get_itc(vcpu); - SAVE_REGS(xtp); - SAVE_REGS(metaphysical_rr0); - SAVE_REGS(metaphysical_rr4); - SAVE_REGS(metaphysical_saved_rr0); - SAVE_REGS(metaphysical_saved_rr4); - SAVE_REGS(fp_psr); - SAVE_REGS(saved_gp); - - vcpu_put(vcpu); - return 0; -} - -int kvm_arch_vcpu_ioctl_get_stack(struct kvm_vcpu *vcpu, - struct kvm_ia64_vcpu_stack *stack) -{ - memcpy(stack, vcpu, sizeof(struct kvm_ia64_vcpu_stack)); - return 0; -} - -int kvm_arch_vcpu_ioctl_set_stack(struct kvm_vcpu *vcpu, - struct kvm_ia64_vcpu_stack *stack) -{ - memcpy(vcpu + 1, &stack->stack[0] + sizeof(struct kvm_vcpu), - sizeof(struct kvm_ia64_vcpu_stack) - sizeof(struct kvm_vcpu)); - - vcpu->arch.exit_data = ((struct kvm_vcpu *)stack)->arch.exit_data; - return 0; -} - -void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) -{ - - hrtimer_cancel(&vcpu->arch.hlt_timer); - kfree(vcpu->arch.apic); -} - -long kvm_arch_vcpu_ioctl(struct file *filp, - unsigned int ioctl, unsigned long arg) -{ - struct kvm_vcpu *vcpu = filp->private_data; - void __user *argp = (void __user *)arg; - struct kvm_ia64_vcpu_stack *stack = NULL; - long r; - - switch (ioctl) { - case KVM_IA64_VCPU_GET_STACK: { - struct kvm_ia64_vcpu_stack __user *user_stack; - void __user *first_p = argp; - - r = -EFAULT; - if (copy_from_user(&user_stack, first_p, sizeof(void *))) - goto out; - - if (!access_ok(VERIFY_WRITE, user_stack, - sizeof(struct kvm_ia64_vcpu_stack))) { - printk(KERN_INFO "KVM_IA64_VCPU_GET_STACK: " - "Illegal user destination address for stack\n"); - goto out; - } - stack = kzalloc(sizeof(struct kvm_ia64_vcpu_stack), GFP_KERNEL); - if (!stack) { - r = -ENOMEM; - goto out; - } - - r = kvm_arch_vcpu_ioctl_get_stack(vcpu, stack); - if (r) - goto out; - - if (copy_to_user(user_stack, stack, - sizeof(struct kvm_ia64_vcpu_stack))) { - r = -EFAULT; - goto out; - } - - break; - } - case KVM_IA64_VCPU_SET_STACK: { - struct kvm_ia64_vcpu_stack __user *user_stack; - void __user *first_p = argp; - - r = -EFAULT; - if (copy_from_user(&user_stack, first_p, sizeof(void *))) - goto out; - - if (!access_ok(VERIFY_READ, user_stack, - sizeof(struct kvm_ia64_vcpu_stack))) { - printk(KERN_INFO "KVM_IA64_VCPU_SET_STACK: " - "Illegal user address for stack\n"); - goto out; - } - stack = kmalloc(sizeof(struct kvm_ia64_vcpu_stack), GFP_KERNEL); - if (!stack) { - r = -ENOMEM; - goto out; - } - if (copy_from_user(stack, user_stack, - sizeof(struct kvm_ia64_vcpu_stack))) - goto out; - - r = kvm_arch_vcpu_ioctl_set_stack(vcpu, stack); - break; - } - - default: - r = -EINVAL; - } - -out: - kfree(stack); - return r; -} - -int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) -{ - return VM_FAULT_SIGBUS; -} - -int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, - unsigned long npages) -{ - return 0; -} - -int kvm_arch_prepare_memory_region(struct kvm *kvm, - struct kvm_memory_slot *memslot, - struct kvm_userspace_memory_region *mem, - enum kvm_mr_change change) -{ - unsigned long i; - unsigned long pfn; - int npages = memslot->npages; - unsigned long base_gfn = memslot->base_gfn; - - if (base_gfn + npages > (KVM_MAX_MEM_SIZE >> PAGE_SHIFT)) - return -ENOMEM; - - for (i = 0; i < npages; i++) { - pfn = gfn_to_pfn(kvm, base_gfn + i); - if (!kvm_is_reserved_pfn(pfn)) { - kvm_set_pmt_entry(kvm, base_gfn + i, - pfn << PAGE_SHIFT, - _PAGE_AR_RWX | _PAGE_MA_WB); - memslot->rmap[i] = (unsigned long)pfn_to_page(pfn); - } else { - kvm_set_pmt_entry(kvm, base_gfn + i, - GPFN_PHYS_MMIO | (pfn << PAGE_SHIFT), - _PAGE_MA_UC); - memslot->rmap[i] = 0; - } - } - - return 0; -} - -void kvm_arch_flush_shadow_all(struct kvm *kvm) -{ - kvm_flush_remote_tlbs(kvm); -} - -void kvm_arch_flush_shadow_memslot(struct kvm *kvm, - struct kvm_memory_slot *slot) -{ - kvm_arch_flush_shadow_all(); -} - -long kvm_arch_dev_ioctl(struct file *filp, - unsigned int ioctl, unsigned long arg) -{ - return -EINVAL; -} - -void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) -{ - kvm_vcpu_uninit(vcpu); -} - -static int vti_cpu_has_kvm_support(void) -{ - long avail = 1, status = 1, control = 1; - long ret; - - ret = ia64_pal_proc_get_features(&avail, &status, &control, 0); - if (ret) - goto out; - - if (!(avail & PAL_PROC_VM_BIT)) - goto out; - - printk(KERN_DEBUG"kvm: Hardware Supports VT\n"); - - ret = ia64_pal_vp_env_info(&kvm_vm_buffer_size, &vp_env_info); - if (ret) - goto out; - printk(KERN_DEBUG"kvm: VM Buffer Size:0x%lx\n", kvm_vm_buffer_size); - - if (!(vp_env_info & VP_OPCODE)) { - printk(KERN_WARNING"kvm: No opcode ability on hardware, " - "vm_env_info:0x%lx\n", vp_env_info); - } - - return 1; -out: - return 0; -} - - -/* - * On SN2, the ITC isn't stable, so copy in fast path code to use the - * SN2 RTC, replacing the ITC based default verion. - */ -static void kvm_patch_vmm(struct kvm_vmm_info *vmm_info, - struct module *module) -{ - unsigned long new_ar, new_ar_sn2; - unsigned long module_base; - - if (!ia64_platform_is("sn2")) - return; - - module_base = (unsigned long)module->module_core; - - new_ar = kvm_vmm_base + vmm_info->patch_mov_ar - module_base; - new_ar_sn2 = kvm_vmm_base + vmm_info->patch_mov_ar_sn2 - module_base; - - printk(KERN_INFO "kvm: Patching ITC emulation to use SGI SN2 RTC " - "as source\n"); - - /* - * Copy the SN2 version of mov_ar into place. They are both - * the same size, so 6 bundles is sufficient (6 * 0x10). - */ - memcpy((void *)new_ar, (void *)new_ar_sn2, 0x60); -} - -static int kvm_relocate_vmm(struct kvm_vmm_info *vmm_info, - struct module *module) -{ - unsigned long module_base; - unsigned long vmm_size; - - unsigned long vmm_offset, func_offset, fdesc_offset; - struct fdesc *p_fdesc; - - BUG_ON(!module); - - if (!kvm_vmm_base) { - printk("kvm: kvm area hasn't been initialized yet!!\n"); - return -EFAULT; - } - - /*Calculate new position of relocated vmm module.*/ - module_base = (unsigned long)module->module_core; - vmm_size = module->core_size; - if (unlikely(vmm_size > KVM_VMM_SIZE)) - return -EFAULT; - - memcpy((void *)kvm_vmm_base, (void *)module_base, vmm_size); - kvm_patch_vmm(vmm_info, module); - kvm_flush_icache(kvm_vmm_base, vmm_size); - - /*Recalculate kvm_vmm_info based on new VMM*/ - vmm_offset = vmm_info->vmm_ivt - module_base; - kvm_vmm_info->vmm_ivt = KVM_VMM_BASE + vmm_offset; - printk(KERN_DEBUG"kvm: Relocated VMM's IVT Base Addr:%lx\n", - kvm_vmm_info->vmm_ivt); - - fdesc_offset = (unsigned long)vmm_info->vmm_entry - module_base; - kvm_vmm_info->vmm_entry = (kvm_vmm_entry *)(KVM_VMM_BASE + - fdesc_offset); - func_offset = *(unsigned long *)vmm_info->vmm_entry - module_base; - p_fdesc = (struct fdesc *)(kvm_vmm_base + fdesc_offset); - p_fdesc->ip = KVM_VMM_BASE + func_offset; - p_fdesc->gp = KVM_VMM_BASE+(p_fdesc->gp - module_base); - - printk(KERN_DEBUG"kvm: Relocated VMM's Init Entry Addr:%lx\n", - KVM_VMM_BASE+func_offset); - - fdesc_offset = (unsigned long)vmm_info->tramp_entry - module_base; - kvm_vmm_info->tramp_entry = (kvm_tramp_entry *)(KVM_VMM_BASE + - fdesc_offset); - func_offset = *(unsigned long *)vmm_info->tramp_entry - module_base; - p_fdesc = (struct fdesc *)(kvm_vmm_base + fdesc_offset); - p_fdesc->ip = KVM_VMM_BASE + func_offset; - p_fdesc->gp = KVM_VMM_BASE + (p_fdesc->gp - module_base); - - kvm_vmm_gp = p_fdesc->gp; - - printk(KERN_DEBUG"kvm: Relocated VMM's Entry IP:%p\n", - kvm_vmm_info->vmm_entry); - printk(KERN_DEBUG"kvm: Relocated VMM's Trampoline Entry IP:0x%lx\n", - KVM_VMM_BASE + func_offset); - - return 0; -} - -int kvm_arch_init(void *opaque) -{ - int r; - struct kvm_vmm_info *vmm_info = (struct kvm_vmm_info *)opaque; - - if (!vti_cpu_has_kvm_support()) { - printk(KERN_ERR "kvm: No Hardware Virtualization Support!\n"); - r = -EOPNOTSUPP; - goto out; - } - - if (kvm_vmm_info) { - printk(KERN_ERR "kvm: Already loaded VMM module!\n"); - r = -EEXIST; - goto out; - } - - r = -ENOMEM; - kvm_vmm_info = kzalloc(sizeof(struct kvm_vmm_info), GFP_KERNEL); - if (!kvm_vmm_info) - goto out; - - if (kvm_alloc_vmm_area()) - goto out_free0; - - r = kvm_relocate_vmm(vmm_info, vmm_info->module); - if (r) - goto out_free1; - - return 0; - -out_free1: - kvm_free_vmm_area(); -out_free0: - kfree(kvm_vmm_info); -out: - return r; -} - -void kvm_arch_exit(void) -{ - kvm_free_vmm_area(); - kfree(kvm_vmm_info); - kvm_vmm_info = NULL; -} - -static void kvm_ia64_sync_dirty_log(struct kvm *kvm, - struct kvm_memory_slot *memslot) -{ - int i; - long base; - unsigned long n; - unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base + - offsetof(struct kvm_vm_data, kvm_mem_dirty_log)); - - n = kvm_dirty_bitmap_bytes(memslot); - base = memslot->base_gfn / BITS_PER_LONG; - - spin_lock(&kvm->arch.dirty_log_lock); - for (i = 0; i < n/sizeof(long); ++i) { - memslot->dirty_bitmap[i] = dirty_bitmap[base + i]; - dirty_bitmap[base + i] = 0; - } - spin_unlock(&kvm->arch.dirty_log_lock); -} - -int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, - struct kvm_dirty_log *log) -{ - int r; - unsigned long n; - struct kvm_memory_slot *memslot; - int is_dirty = 0; - - mutex_lock(&kvm->slots_lock); - - r = -EINVAL; - if (log->slot >= KVM_USER_MEM_SLOTS) - goto out; - - memslot = id_to_memslot(kvm->memslots, log->slot); - r = -ENOENT; - if (!memslot->dirty_bitmap) - goto out; - - kvm_ia64_sync_dirty_log(kvm, memslot); - r = kvm_get_dirty_log(kvm, log, &is_dirty); - if (r) - goto out; - - /* If nothing is dirty, don't bother messing with page tables. */ - if (is_dirty) { - kvm_flush_remote_tlbs(kvm); - n = kvm_dirty_bitmap_bytes(memslot); - memset(memslot->dirty_bitmap, 0, n); - } - r = 0; -out: - mutex_unlock(&kvm->slots_lock); - return r; -} - -int kvm_arch_hardware_setup(void) -{ - return 0; -} - -int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq) -{ - return __apic_accept_irq(vcpu, irq->vector); -} - -int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest) -{ - return apic->vcpu->vcpu_id == dest; -} - -int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda) -{ - return 0; -} - -int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2) -{ - return vcpu1->arch.xtp - vcpu2->arch.xtp; -} - -int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source, - int short_hand, int dest, int dest_mode) -{ - struct kvm_lapic *target = vcpu->arch.apic; - return (dest_mode == 0) ? - kvm_apic_match_physical_addr(target, dest) : - kvm_apic_match_logical_addr(target, dest); -} - -static int find_highest_bits(int *dat) -{ - u32 bits, bitnum; - int i; - - /* loop for all 256 bits */ - for (i = 7; i >= 0 ; i--) { - bits = dat[i]; - if (bits) { - bitnum = fls(bits); - return i * 32 + bitnum - 1; - } - } - - return -1; -} - -int kvm_highest_pending_irq(struct kvm_vcpu *vcpu) -{ - struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); - - if (vpd->irr[0] & (1UL << NMI_VECTOR)) - return NMI_VECTOR; - if (vpd->irr[0] & (1UL << ExtINT_VECTOR)) - return ExtINT_VECTOR; - - return find_highest_bits((int *)&vpd->irr[0]); -} - -int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) -{ - return vcpu->arch.timer_fired; -} - -int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) -{ - return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE) || - (kvm_highest_pending_irq(vcpu) != -1); -} - -int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) -{ - return (!test_and_set_bit(KVM_REQ_KICK, &vcpu->requests)); -} - -int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, - struct kvm_mp_state *mp_state) -{ - mp_state->mp_state = vcpu->arch.mp_state; - return 0; -} - -static int vcpu_reset(struct kvm_vcpu *vcpu) -{ - int r; - long psr; - local_irq_save(psr); - r = kvm_insert_vmm_mapping(vcpu); - local_irq_restore(psr); - if (r) - goto fail; - - vcpu->arch.launched = 0; - kvm_arch_vcpu_uninit(vcpu); - r = kvm_arch_vcpu_init(vcpu); - if (r) - goto fail; - - kvm_purge_vmm_mapping(vcpu); - r = 0; -fail: - return r; -} - -int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, - struct kvm_mp_state *mp_state) -{ - int r = 0; - - vcpu->arch.mp_state = mp_state->mp_state; - if (vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED) - r = vcpu_reset(vcpu); - return r; -} diff --git a/arch/ia64/kvm/kvm_fw.c b/arch/ia64/kvm/kvm_fw.c deleted file mode 100644 index cb548ee9fcae..000000000000 --- a/arch/ia64/kvm/kvm_fw.c +++ /dev/null @@ -1,674 +0,0 @@ -/* - * PAL/SAL call delegation - * - * Copyright (c) 2004 Li Susie <susie.li@intel.com> - * Copyright (c) 2005 Yu Ke <ke.yu@intel.com> - * Copyright (c) 2007 Xiantao Zhang <xiantao.zhang@intel.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., 59 Temple - * Place - Suite 330, Boston, MA 02111-1307 USA. - */ - -#include <linux/kvm_host.h> -#include <linux/smp.h> -#include <asm/sn/addrs.h> -#include <asm/sn/clksupport.h> -#include <asm/sn/shub_mmr.h> - -#include "vti.h" -#include "misc.h" - -#include <asm/pal.h> -#include <asm/sal.h> -#include <asm/tlb.h> - -/* - * Handy macros to make sure that the PAL return values start out - * as something meaningful. - */ -#define INIT_PAL_STATUS_UNIMPLEMENTED(x) \ - { \ - x.status = PAL_STATUS_UNIMPLEMENTED; \ - x.v0 = 0; \ - x.v1 = 0; \ - x.v2 = 0; \ - } - -#define INIT_PAL_STATUS_SUCCESS(x) \ - { \ - x.status = PAL_STATUS_SUCCESS; \ - x.v0 = 0; \ - x.v1 = 0; \ - x.v2 = 0; \ - } - -static void kvm_get_pal_call_data(struct kvm_vcpu *vcpu, - u64 *gr28, u64 *gr29, u64 *gr30, u64 *gr31) { - struct exit_ctl_data *p; - - if (vcpu) { - p = &vcpu->arch.exit_data; - if (p->exit_reason == EXIT_REASON_PAL_CALL) { - *gr28 = p->u.pal_data.gr28; - *gr29 = p->u.pal_data.gr29; - *gr30 = p->u.pal_data.gr30; - *gr31 = p->u.pal_data.gr31; - return ; - } - } - printk(KERN_DEBUG"Failed to get vcpu pal data!!!\n"); -} - -static void set_pal_result(struct kvm_vcpu *vcpu, - struct ia64_pal_retval result) { - - struct exit_ctl_data *p; - - p = kvm_get_exit_data(vcpu); - if (p->exit_reason == EXIT_REASON_PAL_CALL) { - p->u.pal_data.ret = result; - return ; - } - INIT_PAL_STATUS_UNIMPLEMENTED(p->u.pal_data.ret); -} - -static void set_sal_result(struct kvm_vcpu *vcpu, - struct sal_ret_values result) { - struct exit_ctl_data *p; - - p = kvm_get_exit_data(vcpu); - if (p->exit_reason == EXIT_REASON_SAL_CALL) { - p->u.sal_data.ret = result; - return ; - } - printk(KERN_WARNING"Failed to set sal result!!\n"); -} - -struct cache_flush_args { - u64 cache_type; - u64 operation; - u64 progress; - long status; -}; - -cpumask_t cpu_cache_coherent_map; - -static void remote_pal_cache_flush(void *data) -{ - struct cache_flush_args *args = data; - long status; - u64 progress = args->progress; - - status = ia64_pal_cache_flush(args->cache_type, args->operation, - &progress, NULL); - if (status != 0) - args->status = status; -} - -static struct ia64_pal_retval pal_cache_flush(struct kvm_vcpu *vcpu) -{ - u64 gr28, gr29, gr30, gr31; - struct ia64_pal_retval result = {0, 0, 0, 0}; - struct cache_flush_args args = {0, 0, 0, 0}; - long psr; - - gr28 = gr29 = gr30 = gr31 = 0; - kvm_get_pal_call_data(vcpu, &gr28, &gr29, &gr30, &gr31); - - if (gr31 != 0) - printk(KERN_ERR"vcpu:%p called cache_flush error!\n", vcpu); - - /* Always call Host Pal in int=1 */ - gr30 &= ~PAL_CACHE_FLUSH_CHK_INTRS; - args.cache_type = gr29; - args.operation = gr30; - smp_call_function(remote_pal_cache_flush, - (void *)&args, 1); - if (args.status != 0) - printk(KERN_ERR"pal_cache_flush error!," - "status:0x%lx\n", args.status); - /* - * Call Host PAL cache flush - * Clear psr.ic when call PAL_CACHE_FLUSH - */ - local_irq_save(psr); - result.status = ia64_pal_cache_flush(gr29, gr30, &result.v1, - &result.v0); - local_irq_restore(psr); - if (result.status != 0) - printk(KERN_ERR"vcpu:%p crashed due to cache_flush err:%ld" - "in1:%lx,in2:%lx\n", - vcpu, result.status, gr29, gr30); - -#if 0 - if (gr29 == PAL_CACHE_TYPE_COHERENT) { - cpus_setall(vcpu->arch.cache_coherent_map); - cpu_clear(vcpu->cpu, vcpu->arch.cache_coherent_map); - cpus_setall(cpu_cache_coherent_map); - cpu_clear(vcpu->cpu, cpu_cache_coherent_map); - } -#endif - return result; -} - -struct ia64_pal_retval pal_cache_summary(struct kvm_vcpu *vcpu) -{ - - struct ia64_pal_retval result; - - PAL_CALL(result, PAL_CACHE_SUMMARY, 0, 0, 0); - return result; -} - -static struct ia64_pal_retval pal_freq_base(struct kvm_vcpu *vcpu) -{ - - struct ia64_pal_retval result; - - PAL_CALL(result, PAL_FREQ_BASE, 0, 0, 0); - - /* - * PAL_FREQ_BASE may not be implemented in some platforms, - * call SAL instead. - */ - if (result.v0 == 0) { - result.status = ia64_sal_freq_base(SAL_FREQ_BASE_PLATFORM, - &result.v0, - &result.v1); - result.v2 = 0; - } - - return result; -} - -/* - * On the SGI SN2, the ITC isn't stable. Emulation backed by the SN2 - * RTC is used instead. This function patches the ratios from SAL - * to match the RTC before providing them to the guest. - */ -static void sn2_patch_itc_freq_ratios(struct ia64_pal_retval *result) -{ - struct pal_freq_ratio *ratio; - unsigned long sal_freq, sal_drift, factor; - - result->status = ia64_sal_freq_base(SAL_FREQ_BASE_PLATFORM, - &sal_freq, &sal_drift); - ratio = (struct pal_freq_ratio *)&result->v2; - factor = ((sal_freq * 3) + (sn_rtc_cycles_per_second / 2)) / - sn_rtc_cycles_per_second; - - ratio->num = 3; - ratio->den = factor; -} - -static struct ia64_pal_retval pal_freq_ratios(struct kvm_vcpu *vcpu) -{ - struct ia64_pal_retval result; - - PAL_CALL(result, PAL_FREQ_RATIOS, 0, 0, 0); - - if (vcpu->kvm->arch.is_sn2) - sn2_patch_itc_freq_ratios(&result); - - return result; -} - -static struct ia64_pal_retval pal_logical_to_physica(struct kvm_vcpu *vcpu) -{ - struct ia64_pal_retval result; - - INIT_PAL_STATUS_UNIMPLEMENTED(result); - return result; -} - -static struct ia64_pal_retval pal_platform_addr(struct kvm_vcpu *vcpu) -{ - - struct ia64_pal_retval result; - - INIT_PAL_STATUS_SUCCESS(result); - return result; -} - -static struct ia64_pal_retval pal_proc_get_features(struct kvm_vcpu *vcpu) -{ - - struct ia64_pal_retval result = {0, 0, 0, 0}; - long in0, in1, in2, in3; - - kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3); - result.status = ia64_pal_proc_get_features(&result.v0, &result.v1, - &result.v2, in2); - - return result; -} - -static struct ia64_pal_retval pal_register_info(struct kvm_vcpu *vcpu) -{ - - struct ia64_pal_retval result = {0, 0, 0, 0}; - long in0, in1, in2, in3; - - kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3); - result.status = ia64_pal_register_info(in1, &result.v1, &result.v2); - - return result; -} - -static struct ia64_pal_retval pal_cache_info(struct kvm_vcpu *vcpu) -{ - - pal_cache_config_info_t ci; - long status; - unsigned long in0, in1, in2, in3, r9, r10; - - kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3); - status = ia64_pal_cache_config_info(in1, in2, &ci); - r9 = ci.pcci_info_1.pcci1_data; - r10 = ci.pcci_info_2.pcci2_data; - return ((struct ia64_pal_retval){status, r9, r10, 0}); -} - -#define GUEST_IMPL_VA_MSB 59 -#define GUEST_RID_BITS 18 - -static struct ia64_pal_retval pal_vm_summary(struct kvm_vcpu *vcpu) -{ - - pal_vm_info_1_u_t vminfo1; - pal_vm_info_2_u_t vminfo2; - struct ia64_pal_retval result; - - PAL_CALL(result, PAL_VM_SUMMARY, 0, 0, 0); - if (!result.status) { - vminfo1.pvi1_val = result.v0; - vminfo1.pal_vm_info_1_s.max_itr_entry = 8; - vminfo1.pal_vm_info_1_s.max_dtr_entry = 8; - result.v0 = vminfo1.pvi1_val; - vminfo2.pal_vm_info_2_s.impl_va_msb = GUEST_IMPL_VA_MSB; - vminfo2.pal_vm_info_2_s.rid_size = GUEST_RID_BITS; - result.v1 = vminfo2.pvi2_val; - } - - return result; -} - -static struct ia64_pal_retval pal_vm_info(struct kvm_vcpu *vcpu) -{ - struct ia64_pal_retval result; - unsigned long in0, in1, in2, in3; - - kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3); - - result.status = ia64_pal_vm_info(in1, in2, - (pal_tc_info_u_t *)&result.v1, &result.v2); - - return result; -} - -static u64 kvm_get_pal_call_index(struct kvm_vcpu *vcpu) -{ - u64 index = 0; - struct exit_ctl_data *p; - - p = kvm_get_exit_data(vcpu); - if (p->exit_reason == EXIT_REASON_PAL_CALL) - index = p->u.pal_data.gr28; - - return index; -} - -static void prepare_for_halt(struct kvm_vcpu *vcpu) -{ - vcpu->arch.timer_pending = 1; - vcpu->arch.timer_fired = 0; -} - -static struct ia64_pal_retval pal_perf_mon_info(struct kvm_vcpu *vcpu) -{ - long status; - unsigned long in0, in1, in2, in3, r9; - unsigned long pm_buffer[16]; - - kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3); - status = ia64_pal_perf_mon_info(pm_buffer, - (pal_perf_mon_info_u_t *) &r9); - if (status != 0) { - printk(KERN_DEBUG"PAL_PERF_MON_INFO fails ret=%ld\n", status); - } else { - if (in1) - memcpy((void *)in1, pm_buffer, sizeof(pm_buffer)); - else { - status = PAL_STATUS_EINVAL; - printk(KERN_WARNING"Invalid parameters " - "for PAL call:0x%lx!\n", in0); - } - } - return (struct ia64_pal_retval){status, r9, 0, 0}; -} - -static struct ia64_pal_retval pal_halt_info(struct kvm_vcpu *vcpu) -{ - unsigned long in0, in1, in2, in3; - long status; - unsigned long res = 1000UL | (1000UL << 16) | (10UL << 32) - | (1UL << 61) | (1UL << 60); - - kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3); - if (in1) { - memcpy((void *)in1, &res, sizeof(res)); - status = 0; - } else{ - status = PAL_STATUS_EINVAL; - printk(KERN_WARNING"Invalid parameters " - "for PAL call:0x%lx!\n", in0); - } - - return (struct ia64_pal_retval){status, 0, 0, 0}; -} - -static struct ia64_pal_retval pal_mem_attrib(struct kvm_vcpu *vcpu) -{ - unsigned long r9; - long status; - - status = ia64_pal_mem_attrib(&r9); - - return (struct ia64_pal_retval){status, r9, 0, 0}; -} - -static void remote_pal_prefetch_visibility(void *v) -{ - s64 trans_type = (s64)v; - ia64_pal_prefetch_visibility(trans_type); -} - -static struct ia64_pal_retval pal_prefetch_visibility(struct kvm_vcpu *vcpu) -{ - struct ia64_pal_retval result = {0, 0, 0, 0}; - unsigned long in0, in1, in2, in3; - kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3); - result.status = ia64_pal_prefetch_visibility(in1); - if (result.status == 0) { - /* Must be performed on all remote processors - in the coherence domain. */ - smp_call_function(remote_pal_prefetch_visibility, - (void *)in1, 1); - /* Unnecessary on remote processor for other vcpus!*/ - result.status = 1; - } - return result; -} - -static void remote_pal_mc_drain(void *v) -{ - ia64_pal_mc_drain(); -} - -static struct ia64_pal_retval pal_get_brand_info(struct kvm_vcpu *vcpu) -{ - struct ia64_pal_retval result = {0, 0, 0, 0}; - unsigned long in0, in1, in2, in3; - - kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3); - - if (in1 == 0 && in2) { - char brand_info[128]; - result.status = ia64_pal_get_brand_info(brand_info); - if (result.status == PAL_STATUS_SUCCESS) - memcpy((void *)in2, brand_info, 128); - } else { - result.status = PAL_STATUS_REQUIRES_MEMORY; - printk(KERN_WARNING"Invalid parameters for " - "PAL call:0x%lx!\n", in0); - } - - return result; -} - -int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run) -{ - - u64 gr28; - struct ia64_pal_retval result; - int ret = 1; - - gr28 = kvm_get_pal_call_index(vcpu); - switch (gr28) { - case PAL_CACHE_FLUSH: - result = pal_cache_flush(vcpu); - break; - case PAL_MEM_ATTRIB: - result = pal_mem_attrib(vcpu); - break; - case PAL_CACHE_SUMMARY: - result = pal_cache_summary(vcpu); - break; - case PAL_PERF_MON_INFO: - result = pal_perf_mon_info(vcpu); - break; - case PAL_HALT_INFO: - result = pal_halt_info(vcpu); - break; - case PAL_HALT_LIGHT: - { - INIT_PAL_STATUS_SUCCESS(result); - prepare_for_halt(vcpu); - if (kvm_highest_pending_irq(vcpu) == -1) - ret = kvm_emulate_halt(vcpu); - } - break; - - case PAL_PREFETCH_VISIBILITY: - result = pal_prefetch_visibility(vcpu); - break; - case PAL_MC_DRAIN: - result.status = ia64_pal_mc_drain(); - /* FIXME: All vcpus likely call PAL_MC_DRAIN. - That causes the congestion. */ - smp_call_function(remote_pal_mc_drain, NULL, 1); - break; - - case PAL_FREQ_RATIOS: - result = pal_freq_ratios(vcpu); - break; - - case PAL_FREQ_BASE: - result = pal_freq_base(vcpu); - break; - - case PAL_LOGICAL_TO_PHYSICAL : - result = pal_logical_to_physica(vcpu); - break; - - case PAL_VM_SUMMARY : - result = pal_vm_summary(vcpu); - break; - - case PAL_VM_INFO : - result = pal_vm_info(vcpu); - break; - case PAL_PLATFORM_ADDR : - result = pal_platform_addr(vcpu); - break; - case PAL_CACHE_INFO: - result = pal_cache_info(vcpu); - break; - case PAL_PTCE_INFO: - INIT_PAL_STATUS_SUCCESS(result); - result.v1 = (1L << 32) | 1L; - break; - case PAL_REGISTER_INFO: - result = pal_register_info(vcpu); - break; - case PAL_VM_PAGE_SIZE: - result.status = ia64_pal_vm_page_size(&result.v0, - &result.v1); - break; - case PAL_RSE_INFO: - result.status = ia64_pal_rse_info(&result.v0, - (pal_hints_u_t *)&result.v1); - break; - case PAL_PROC_GET_FEATURES: - result = pal_proc_get_features(vcpu); - break; - case PAL_DEBUG_INFO: - result.status = ia64_pal_debug_info(&result.v0, - &result.v1); - break; - case PAL_VERSION: - result.status = ia64_pal_version( - (pal_version_u_t *)&result.v0, - (pal_version_u_t *)&result.v1); - break; - case PAL_FIXED_ADDR: - result.status = PAL_STATUS_SUCCESS; - result.v0 = vcpu->vcpu_id; - break; - case PAL_BRAND_INFO: - result = pal_get_brand_info(vcpu); - break; - case PAL_GET_PSTATE: - case PAL_CACHE_SHARED_INFO: - INIT_PAL_STATUS_UNIMPLEMENTED(result); - break; - default: - INIT_PAL_STATUS_UNIMPLEMENTED(result); - printk(KERN_WARNING"kvm: Unsupported pal call," - " index:0x%lx\n", gr28); - } - set_pal_result(vcpu, result); - return ret; -} - -static struct sal_ret_values sal_emulator(struct kvm *kvm, - long index, unsigned long in1, - unsigned long in2, unsigned long in3, - unsigned long in4, unsigned long in5, - unsigned long in6, unsigned long in7) -{ - unsigned long r9 = 0; - unsigned long r10 = 0; - long r11 = 0; - long status; - - status = 0; - switch (index) { - case SAL_FREQ_BASE: - status = ia64_sal_freq_base(in1, &r9, &r10); - break; - case SAL_PCI_CONFIG_READ: - printk(KERN_WARNING"kvm: Not allowed to call here!" - " SAL_PCI_CONFIG_READ\n"); - break; - case SAL_PCI_CONFIG_WRITE: - printk(KERN_WARNING"kvm: Not allowed to call here!" - " SAL_PCI_CONFIG_WRITE\n"); - break; - case SAL_SET_VECTORS: - if (in1 == SAL_VECTOR_OS_BOOT_RENDEZ) { - if (in4 != 0 || in5 != 0 || in6 != 0 || in7 != 0) { - status = -2; - } else { - kvm->arch.rdv_sal_data.boot_ip = in2; - kvm->arch.rdv_sal_data.boot_gp = in3; - } - printk("Rendvous called! iip:%lx\n\n", in2); - } else - printk(KERN_WARNING"kvm: CALLED SAL_SET_VECTORS %lu." - "ignored...\n", in1); - break; - case SAL_GET_STATE_INFO: - /* No more info. */ - status = -5; - r9 = 0; - break; - case SAL_GET_STATE_INFO_SIZE: - /* Return a dummy size. */ - status = 0; - r9 = 128; - break; - case SAL_CLEAR_STATE_INFO: - /* Noop. */ - break; - case SAL_MC_RENDEZ: - printk(KERN_WARNING - "kvm: called SAL_MC_RENDEZ. ignored...\n"); - break; - case SAL_MC_SET_PARAMS: - printk(KERN_WARNING - "kvm: called SAL_MC_SET_PARAMS.ignored!\n"); - break; - case SAL_CACHE_FLUSH: - if (1) { - /*Flush using SAL. - This method is faster but has a side - effect on other vcpu running on - this cpu. */ - status = ia64_sal_cache_flush(in1); - } else { - /*Maybe need to implement the method - without side effect!*/ - status = 0; - } - break; - case SAL_CACHE_INIT: - printk(KERN_WARNING - "kvm: called SAL_CACHE_INIT. ignored...\n"); - break; - case SAL_UPDATE_PAL: - printk(KERN_WARNING - "kvm: CALLED SAL_UPDATE_PAL. ignored...\n"); - break; - default: - printk(KERN_WARNING"kvm: called SAL_CALL with unknown index." - " index:%ld\n", index); - status = -1; - break; - } - return ((struct sal_ret_values) {status, r9, r10, r11}); -} - -static void kvm_get_sal_call_data(struct kvm_vcpu *vcpu, u64 *in0, u64 *in1, - u64 *in2, u64 *in3, u64 *in4, u64 *in5, u64 *in6, u64 *in7){ - - struct exit_ctl_data *p; - - p = kvm_get_exit_data(vcpu); - - if (p->exit_reason == EXIT_REASON_SAL_CALL) { - *in0 = p->u.sal_data.in0; - *in1 = p->u.sal_data.in1; - *in2 = p->u.sal_data.in2; - *in3 = p->u.sal_data.in3; - *in4 = p->u.sal_data.in4; - *in5 = p->u.sal_data.in5; - *in6 = p->u.sal_data.in6; - *in7 = p->u.sal_data.in7; - return ; - } - *in0 = 0; -} - -void kvm_sal_emul(struct kvm_vcpu *vcpu) -{ - - struct sal_ret_values result; - u64 index, in1, in2, in3, in4, in5, in6, in7; - - kvm_get_sal_call_data(vcpu, &index, &in1, &in2, - &in3, &in4, &in5, &in6, &in7); - result = sal_emulator(vcpu->kvm, index, in1, in2, in3, - in4, in5, in6, in7); - set_sal_result(vcpu, result); -} diff --git a/arch/ia64/kvm/kvm_lib.c b/arch/ia64/kvm/kvm_lib.c deleted file mode 100644 index f1268b8e6f9e..000000000000 --- a/arch/ia64/kvm/kvm_lib.c +++ /dev/null @@ -1,21 +0,0 @@ -/* - * kvm_lib.c: Compile some libraries for kvm-intel module. - * - * Just include kernel's library, and disable symbols export. - * Copyright (C) 2008, Intel Corporation. - * Xiantao Zhang (xiantao.zhang@intel.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - */ -#undef CONFIG_MODULES -#include <linux/module.h> -#undef CONFIG_KALLSYMS -#undef EXPORT_SYMBOL -#undef EXPORT_SYMBOL_GPL -#define EXPORT_SYMBOL(sym) -#define EXPORT_SYMBOL_GPL(sym) -#include "../../../lib/vsprintf.c" -#include "../../../lib/ctype.c" diff --git a/arch/ia64/kvm/kvm_minstate.h b/arch/ia64/kvm/kvm_minstate.h deleted file mode 100644 index b2bcaa2787aa..000000000000 --- a/arch/ia64/kvm/kvm_minstate.h +++ /dev/null @@ -1,266 +0,0 @@ -/* - * kvm_minstate.h: min save macros - * Copyright (c) 2007, Intel Corporation. - * - * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com) - * Xiantao Zhang (xiantao.zhang@intel.com) - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., 59 Temple - * Place - Suite 330, Boston, MA 02111-1307 USA. - * - */ - - -#include <asm/asmmacro.h> -#include <asm/types.h> -#include <asm/kregs.h> -#include <asm/kvm_host.h> - -#include "asm-offsets.h" - -#define KVM_MINSTATE_START_SAVE_MIN \ - mov ar.rsc = 0;/* set enforced lazy mode, pl 0, little-endian, loadrs=0 */\ - ;; \ - mov.m r28 = ar.rnat; \ - addl r22 = VMM_RBS_OFFSET,r1; /* compute base of RBS */ \ - ;; \ - lfetch.fault.excl.nt1 [r22]; \ - addl r1 = KVM_STK_OFFSET-VMM_PT_REGS_SIZE, r1; \ - mov r23 = ar.bspstore; /* save ar.bspstore */ \ - ;; \ - mov ar.bspstore = r22; /* switch to kernel RBS */\ - ;; \ - mov r18 = ar.bsp; \ - mov ar.rsc = 0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */ - - - -#define KVM_MINSTATE_END_SAVE_MIN \ - bsw.1; /* switch back to bank 1 (must be last in insn group) */\ - ;; - - -#define PAL_VSA_SYNC_READ \ - /* begin to call pal vps sync_read */ \ -{.mii; \ - add r25 = VMM_VPD_BASE_OFFSET, r21; \ - nop 0x0; \ - mov r24=ip; \ - ;; \ -} \ -{.mmb \ - add r24=0x20, r24; \ - ld8 r25 = [r25]; /* read vpd base */ \ - br.cond.sptk kvm_vps_sync_read; /*call the service*/ \ - ;; \ -}; \ - - -#define KVM_MINSTATE_GET_CURRENT(reg) mov reg=r21 - -/* - * KVM_DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves - * the minimum state necessary that allows us to turn psr.ic back - * on. - * - * Assumed state upon entry: - * psr.ic: off - * r31: contains saved predicates (pr) - * - * Upon exit, the state is as follows: - * psr.ic: off - * r2 = points to &pt_regs.r16 - * r8 = contents of ar.ccv - * r9 = contents of ar.csd - * r10 = contents of ar.ssd - * r11 = FPSR_DEFAULT - * r12 = kernel sp (kernel virtual address) - * r13 = points to current task_struct (kernel virtual address) - * p15 = TRUE if psr.i is set in cr.ipsr - * predicate registers (other than p2, p3, and p15), b6, r3, r14, r15: - * preserved - * - * Note that psr.ic is NOT turned on by this macro. This is so that - * we can pass interruption state as arguments to a handler. - */ - - -#define PT(f) (VMM_PT_REGS_##f##_OFFSET) - -#define KVM_DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA) \ - KVM_MINSTATE_GET_CURRENT(r16); /* M (or M;;I) */ \ - mov r27 = ar.rsc; /* M */ \ - mov r20 = r1; /* A */ \ - mov r25 = ar.unat; /* M */ \ - mov r29 = cr.ipsr; /* M */ \ - mov r26 = ar.pfs; /* I */ \ - mov r18 = cr.isr; \ - COVER; /* B;; (or nothing) */ \ - ;; \ - tbit.z p0,p15 = r29,IA64_PSR_I_BIT; \ - mov r1 = r16; \ -/* mov r21=r16; */ \ - /* switch from user to kernel RBS: */ \ - ;; \ - invala; /* M */ \ - SAVE_IFS; \ - ;; \ - KVM_MINSTATE_START_SAVE_MIN \ - adds r17 = 2*L1_CACHE_BYTES,r1;/* cache-line size */ \ - adds r16 = PT(CR_IPSR),r1; \ - ;; \ - lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES; \ - st8 [r16] = r29; /* save cr.ipsr */ \ - ;; \ - lfetch.fault.excl.nt1 [r17]; \ - tbit.nz p15,p0 = r29,IA64_PSR_I_BIT; \ - mov r29 = b0 \ - ;; \ - adds r16 = PT(R8),r1; /* initialize first base pointer */\ - adds r17 = PT(R9),r1; /* initialize second base pointer */\ - ;; \ -.mem.offset 0,0; st8.spill [r16] = r8,16; \ -.mem.offset 8,0; st8.spill [r17] = r9,16; \ - ;; \ -.mem.offset 0,0; st8.spill [r16] = r10,24; \ -.mem.offset 8,0; st8.spill [r17] = r11,24; \ - ;; \ - mov r9 = cr.iip; /* M */ \ - mov r10 = ar.fpsr; /* M */ \ - ;; \ - st8 [r16] = r9,16; /* save cr.iip */ \ - st8 [r17] = r30,16; /* save cr.ifs */ \ - sub r18 = r18,r22; /* r18=RSE.ndirty*8 */ \ - ;; \ - st8 [r16] = r25,16; /* save ar.unat */ \ - st8 [r17] = r26,16; /* save ar.pfs */ \ - shl r18 = r18,16; /* calu ar.rsc used for "loadrs" */\ - ;; \ - st8 [r16] = r27,16; /* save ar.rsc */ \ - st8 [r17] = r28,16; /* save ar.rnat */ \ - ;; /* avoid RAW on r16 & r17 */ \ - st8 [r16] = r23,16; /* save ar.bspstore */ \ - st8 [r17] = r31,16; /* save predicates */ \ - ;; \ - st8 [r16] = r29,16; /* save b0 */ \ - st8 [r17] = r18,16; /* save ar.rsc value for "loadrs" */\ - ;; \ -.mem.offset 0,0; st8.spill [r16] = r20,16;/* save original r1 */ \ -.mem.offset 8,0; st8.spill [r17] = r12,16; \ - adds r12 = -16,r1; /* switch to kernel memory stack */ \ - ;; \ -.mem.offset 0,0; st8.spill [r16] = r13,16; \ -.mem.offset 8,0; st8.spill [r17] = r10,16; /* save ar.fpsr */\ - mov r13 = r21; /* establish `current' */ \ - ;; \ -.mem.offset 0,0; st8.spill [r16] = r15,16; \ -.mem.offset 8,0; st8.spill [r17] = r14,16; \ - ;; \ -.mem.offset 0,0; st8.spill [r16] = r2,16; \ -.mem.offset 8,0; st8.spill [r17] = r3,16; \ - adds r2 = VMM_PT_REGS_R16_OFFSET,r1; \ - ;; \ - adds r16 = VMM_VCPU_IIPA_OFFSET,r13; \ - adds r17 = VMM_VCPU_ISR_OFFSET,r13; \ - mov r26 = cr.iipa; \ - mov r27 = cr.isr; \ - ;; \ - st8 [r16] = r26; \ - st8 [r17] = r27; \ - ;; \ - EXTRA; \ - mov r8 = ar.ccv; \ - mov r9 = ar.csd; \ - mov r10 = ar.ssd; \ - movl r11 = FPSR_DEFAULT; /* L-unit */ \ - adds r17 = VMM_VCPU_GP_OFFSET,r13; \ - ;; \ - ld8 r1 = [r17];/* establish kernel global pointer */ \ - ;; \ - PAL_VSA_SYNC_READ \ - KVM_MINSTATE_END_SAVE_MIN - -/* - * SAVE_REST saves the remainder of pt_regs (with psr.ic on). - * - * Assumed state upon entry: - * psr.ic: on - * r2: points to &pt_regs.f6 - * r3: points to &pt_regs.f7 - * r8: contents of ar.ccv - * r9: contents of ar.csd - * r10: contents of ar.ssd - * r11: FPSR_DEFAULT - * - * Registers r14 and r15 are guaranteed not to be touched by SAVE_REST. - */ -#define KVM_SAVE_REST \ -.mem.offset 0,0; st8.spill [r2] = r16,16; \ -.mem.offset 8,0; st8.spill [r3] = r17,16; \ - ;; \ -.mem.offset 0,0; st8.spill [r2] = r18,16; \ -.mem.offset 8,0; st8.spill [r3] = r19,16; \ - ;; \ -.mem.offset 0,0; st8.spill [r2] = r20,16; \ -.mem.offset 8,0; st8.spill [r3] = r21,16; \ - mov r18=b6; \ - ;; \ -.mem.offset 0,0; st8.spill [r2] = r22,16; \ -.mem.offset 8,0; st8.spill [r3] = r23,16; \ - mov r19 = b7; \ - ;; \ -.mem.offset 0,0; st8.spill [r2] = r24,16; \ -.mem.offset 8,0; st8.spill [r3] = r25,16; \ - ;; \ -.mem.offset 0,0; st8.spill [r2] = r26,16; \ -.mem.offset 8,0; st8.spill [r3] = r27,16; \ - ;; \ -.mem.offset 0,0; st8.spill [r2] = r28,16; \ -.mem.offset 8,0; st8.spill [r3] = r29,16; \ - ;; \ -.mem.offset 0,0; st8.spill [r2] = r30,16; \ -.mem.offset 8,0; st8.spill [r3] = r31,32; \ - ;; \ - mov ar.fpsr = r11; \ - st8 [r2] = r8,8; \ - adds r24 = PT(B6)-PT(F7),r3; \ - adds r25 = PT(B7)-PT(F7),r3; \ - ;; \ - st8 [r24] = r18,16; /* b6 */ \ - st8 [r25] = r19,16; /* b7 */ \ - adds r2 = PT(R4)-PT(F6),r2; \ - adds r3 = PT(R5)-PT(F7),r3; \ - ;; \ - st8 [r24] = r9; /* ar.csd */ \ - st8 [r25] = r10; /* ar.ssd */ \ - ;; \ - mov r18 = ar.unat; \ - adds r19 = PT(EML_UNAT)-PT(R4),r2; \ - ;; \ - st8 [r19] = r18; /* eml_unat */ \ - - -#define KVM_SAVE_EXTRA \ -.mem.offset 0,0; st8.spill [r2] = r4,16; \ -.mem.offset 8,0; st8.spill [r3] = r5,16; \ - ;; \ -.mem.offset 0,0; st8.spill [r2] = r6,16; \ -.mem.offset 8,0; st8.spill [r3] = r7; \ - ;; \ - mov r26 = ar.unat; \ - ;; \ - st8 [r2] = r26;/* eml_unat */ \ - -#define KVM_SAVE_MIN_WITH_COVER KVM_DO_SAVE_MIN(cover, mov r30 = cr.ifs,) -#define KVM_SAVE_MIN_WITH_COVER_R19 KVM_DO_SAVE_MIN(cover, mov r30 = cr.ifs, mov r15 = r19) -#define KVM_SAVE_MIN KVM_DO_SAVE_MIN( , mov r30 = r0, ) diff --git a/arch/ia64/kvm/lapic.h b/arch/ia64/kvm/lapic.h deleted file mode 100644 index c5f92a926a9a..000000000000 --- a/arch/ia64/kvm/lapic.h +++ /dev/null @@ -1,30 +0,0 @@ -#ifndef __KVM_IA64_LAPIC_H -#define __KVM_IA64_LAPIC_H - -#include <linux/kvm_host.h> - -/* - * vlsapic - */ -struct kvm_lapic{ - struct kvm_vcpu *vcpu; - uint64_t insvc[4]; - uint64_t vhpi; - uint8_t xtp; - uint8_t pal_init_pending; - uint8_t pad[2]; -}; - -int kvm_create_lapic(struct kvm_vcpu *vcpu); -void kvm_free_lapic(struct kvm_vcpu *vcpu); - -int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest); -int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda); -int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source, - int short_hand, int dest, int dest_mode); -int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2); -int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq); -#define kvm_apic_present(x) (true) -#define kvm_lapic_enabled(x) (true) - -#endif diff --git a/arch/ia64/kvm/memcpy.S b/arch/ia64/kvm/memcpy.S deleted file mode 100644 index c04cdbe9f80f..000000000000 --- a/arch/ia64/kvm/memcpy.S +++ /dev/null @@ -1 +0,0 @@ -#include "../lib/memcpy.S" diff --git a/arch/ia64/kvm/memset.S b/arch/ia64/kvm/memset.S deleted file mode 100644 index 83c3066d844a..000000000000 --- a/arch/ia64/kvm/memset.S +++ /dev/null @@ -1 +0,0 @@ -#include "../lib/memset.S" diff --git a/arch/ia64/kvm/misc.h b/arch/ia64/kvm/misc.h deleted file mode 100644 index dd979e00b574..000000000000 --- a/arch/ia64/kvm/misc.h +++ /dev/null @@ -1,94 +0,0 @@ -#ifndef __KVM_IA64_MISC_H -#define __KVM_IA64_MISC_H - -#include <linux/kvm_host.h> -/* - * misc.h - * Copyright (C) 2007, Intel Corporation. - * Xiantao Zhang (xiantao.zhang@intel.com) - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., 59 Temple - * Place - Suite 330, Boston, MA 02111-1307 USA. - * - */ - -/* - *Return p2m base address at host side! - */ -static inline uint64_t *kvm_host_get_pmt(struct kvm *kvm) -{ - return (uint64_t *)(kvm->arch.vm_base + - offsetof(struct kvm_vm_data, kvm_p2m)); -} - -static inline void kvm_set_pmt_entry(struct kvm *kvm, gfn_t gfn, - u64 paddr, u64 mem_flags) -{ - uint64_t *pmt_base = kvm_host_get_pmt(kvm); - unsigned long pte; - - pte = PAGE_ALIGN(paddr) | mem_flags; - pmt_base[gfn] = pte; -} - -/*Function for translating host address to guest address*/ - -static inline void *to_guest(struct kvm *kvm, void *addr) -{ - return (void *)((unsigned long)(addr) - kvm->arch.vm_base + - KVM_VM_DATA_BASE); -} - -/*Function for translating guest address to host address*/ - -static inline void *to_host(struct kvm *kvm, void *addr) -{ - return (void *)((unsigned long)addr - KVM_VM_DATA_BASE - + kvm->arch.vm_base); -} - -/* Get host context of the vcpu */ -static inline union context *kvm_get_host_context(struct kvm_vcpu *vcpu) -{ - union context *ctx = &vcpu->arch.host; - return to_guest(vcpu->kvm, ctx); -} - -/* Get guest context of the vcpu */ -static inline union context *kvm_get_guest_context(struct kvm_vcpu *vcpu) -{ - union context *ctx = &vcpu->arch.guest; - return to_guest(vcpu->kvm, ctx); -} - -/* kvm get exit data from gvmm! */ -static inline struct exit_ctl_data *kvm_get_exit_data(struct kvm_vcpu *vcpu) -{ - return &vcpu->arch.exit_data; -} - -/*kvm get vcpu ioreq for kvm module!*/ -static inline struct kvm_mmio_req *kvm_get_vcpu_ioreq(struct kvm_vcpu *vcpu) -{ - struct exit_ctl_data *p_ctl_data; - - if (vcpu) { - p_ctl_data = kvm_get_exit_data(vcpu); - if (p_ctl_data->exit_reason == EXIT_REASON_MMIO_INSTRUCTION) - return &p_ctl_data->u.ioreq; - } - - return NULL; -} - -#endif diff --git a/arch/ia64/kvm/mmio.c b/arch/ia64/kvm/mmio.c deleted file mode 100644 index f1e17d3d6cd9..000000000000 --- a/arch/ia64/kvm/mmio.c +++ /dev/null @@ -1,336 +0,0 @@ -/* - * mmio.c: MMIO emulation components. - * Copyright (c) 2004, Intel Corporation. - * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com) - * Kun Tian (Kevin Tian) (Kevin.tian@intel.com) - * - * Copyright (c) 2007 Intel Corporation KVM support. - * Xuefei Xu (Anthony Xu) (anthony.xu@intel.com) - * Xiantao Zhang (xiantao.zhang@intel.com) - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., 59 Temple - * Place - Suite 330, Boston, MA 02111-1307 USA. - * - */ - -#include <linux/kvm_host.h> - -#include "vcpu.h" - -static void vlsapic_write_xtp(struct kvm_vcpu *v, uint8_t val) -{ - VLSAPIC_XTP(v) = val; -} - -/* - * LSAPIC OFFSET - */ -#define PIB_LOW_HALF(ofst) !(ofst & (1 << 20)) -#define PIB_OFST_INTA 0x1E0000 -#define PIB_OFST_XTP 0x1E0008 - -/* - * execute write IPI op. - */ -static void vlsapic_write_ipi(struct kvm_vcpu *vcpu, - uint64_t addr, uint64_t data) -{ - struct exit_ctl_data *p = ¤t_vcpu->arch.exit_data; - unsigned long psr; - - local_irq_save(psr); - - p->exit_reason = EXIT_REASON_IPI; - p->u.ipi_data.addr.val = addr; - p->u.ipi_data.data.val = data; - vmm_transition(current_vcpu); - - local_irq_restore(psr); - -} - -void lsapic_write(struct kvm_vcpu *v, unsigned long addr, - unsigned long length, unsigned long val) -{ - addr &= (PIB_SIZE - 1); - - switch (addr) { - case PIB_OFST_INTA: - panic_vm(v, "Undefined write on PIB INTA\n"); - break; - case PIB_OFST_XTP: - if (length == 1) { - vlsapic_write_xtp(v, val); - } else { - panic_vm(v, "Undefined write on PIB XTP\n"); - } - break; - default: - if (PIB_LOW_HALF(addr)) { - /*Lower half */ - if (length != 8) - panic_vm(v, "Can't LHF write with size %ld!\n", - length); - else - vlsapic_write_ipi(v, addr, val); - } else { /*Upper half */ - panic_vm(v, "IPI-UHF write %lx\n", addr); - } - break; - } -} - -unsigned long lsapic_read(struct kvm_vcpu *v, unsigned long addr, - unsigned long length) -{ - uint64_t result = 0; - - addr &= (PIB_SIZE - 1); - - switch (addr) { - case PIB_OFST_INTA: - if (length == 1) /* 1 byte load */ - ; /* There is no i8259, there is no INTA access*/ - else - panic_vm(v, "Undefined read on PIB INTA\n"); - - break; - case PIB_OFST_XTP: - if (length == 1) { - result = VLSAPIC_XTP(v); - } else { - panic_vm(v, "Undefined read on PIB XTP\n"); - } - break; - default: - panic_vm(v, "Undefined addr access for lsapic!\n"); - break; - } - return result; -} - -static void mmio_access(struct kvm_vcpu *vcpu, u64 src_pa, u64 *dest, - u16 s, int ma, int dir) -{ - unsigned long iot; - struct exit_ctl_data *p = &vcpu->arch.exit_data; - unsigned long psr; - - iot = __gpfn_is_io(src_pa >> PAGE_SHIFT); - - local_irq_save(psr); - - /*Intercept the access for PIB range*/ - if (iot == GPFN_PIB) { - if (!dir) - lsapic_write(vcpu, src_pa, s, *dest); - else - *dest = lsapic_read(vcpu, src_pa, s); - goto out; - } - p->exit_reason = EXIT_REASON_MMIO_INSTRUCTION; - p->u.ioreq.addr = src_pa; - p->u.ioreq.size = s; - p->u.ioreq.dir = dir; - if (dir == IOREQ_WRITE) - p->u.ioreq.data = *dest; - p->u.ioreq.state = STATE_IOREQ_READY; - vmm_transition(vcpu); - - if (p->u.ioreq.state == STATE_IORESP_READY) { - if (dir == IOREQ_READ) - /* it's necessary to ensure zero extending */ - *dest = p->u.ioreq.data & (~0UL >> (64-(s*8))); - } else - panic_vm(vcpu, "Unhandled mmio access returned!\n"); -out: - local_irq_restore(psr); - return ; -} - -/* - dir 1: read 0:write - inst_type 0:integer 1:floating point - */ -#define SL_INTEGER 0 /* store/load interger*/ -#define SL_FLOATING 1 /* store/load floating*/ - -void emulate_io_inst(struct kvm_vcpu *vcpu, u64 padr, u64 ma) -{ - struct kvm_pt_regs *regs; - IA64_BUNDLE bundle; - int slot, dir = 0; - int inst_type = -1; - u16 size = 0; - u64 data, slot1a, slot1b, temp, update_reg; - s32 imm; - INST64 inst; - - regs = vcpu_regs(vcpu); - - if (fetch_code(vcpu, regs->cr_iip, &bundle)) { - /* if fetch code fail, return and try again */ - return; - } - slot = ((struct ia64_psr *)&(regs->cr_ipsr))->ri; - if (!slot) - inst.inst = bundle.slot0; - else if (slot == 1) { - slot1a = bundle.slot1a; - slot1b = bundle.slot1b; - inst.inst = slot1a + (slot1b << 18); - } else if (slot == 2) - inst.inst = bundle.slot2; - - /* Integer Load/Store */ - if (inst.M1.major == 4 && inst.M1.m == 0 && inst.M1.x == 0) { - inst_type = SL_INTEGER; - size = (inst.M1.x6 & 0x3); - if ((inst.M1.x6 >> 2) > 0xb) { - /*write*/ - dir = IOREQ_WRITE; - data = vcpu_get_gr(vcpu, inst.M4.r2); - } else if ((inst.M1.x6 >> 2) < 0xb) { - /*read*/ - dir = IOREQ_READ; - } - } else if (inst.M2.major == 4 && inst.M2.m == 1 && inst.M2.x == 0) { - /* Integer Load + Reg update */ - inst_type = SL_INTEGER; - dir = IOREQ_READ; - size = (inst.M2.x6 & 0x3); - temp = vcpu_get_gr(vcpu, inst.M2.r3); - update_reg = vcpu_get_gr(vcpu, inst.M2.r2); - temp += update_reg; - vcpu_set_gr(vcpu, inst.M2.r3, temp, 0); - } else if (inst.M3.major == 5) { - /*Integer Load/Store + Imm update*/ - inst_type = SL_INTEGER; - size = (inst.M3.x6&0x3); - if ((inst.M5.x6 >> 2) > 0xb) { - /*write*/ - dir = IOREQ_WRITE; - data = vcpu_get_gr(vcpu, inst.M5.r2); - temp = vcpu_get_gr(vcpu, inst.M5.r3); - imm = (inst.M5.s << 31) | (inst.M5.i << 30) | - (inst.M5.imm7 << 23); - temp += imm >> 23; - vcpu_set_gr(vcpu, inst.M5.r3, temp, 0); - - } else if ((inst.M3.x6 >> 2) < 0xb) { - /*read*/ - dir = IOREQ_READ; - temp = vcpu_get_gr(vcpu, inst.M3.r3); - imm = (inst.M3.s << 31) | (inst.M3.i << 30) | - (inst.M3.imm7 << 23); - temp += imm >> 23; - vcpu_set_gr(vcpu, inst.M3.r3, temp, 0); - - } - } else if (inst.M9.major == 6 && inst.M9.x6 == 0x3B - && inst.M9.m == 0 && inst.M9.x == 0) { - /* Floating-point spill*/ - struct ia64_fpreg v; - - inst_type = SL_FLOATING; - dir = IOREQ_WRITE; - vcpu_get_fpreg(vcpu, inst.M9.f2, &v); - /* Write high word. FIXME: this is a kludge! */ - v.u.bits[1] &= 0x3ffff; - mmio_access(vcpu, padr + 8, (u64 *)&v.u.bits[1], 8, - ma, IOREQ_WRITE); - data = v.u.bits[0]; - size = 3; - } else if (inst.M10.major == 7 && inst.M10.x6 == 0x3B) { - /* Floating-point spill + Imm update */ - struct ia64_fpreg v; - - inst_type = SL_FLOATING; - dir = IOREQ_WRITE; - vcpu_get_fpreg(vcpu, inst.M10.f2, &v); - temp = vcpu_get_gr(vcpu, inst.M10.r3); - imm = (inst.M10.s << 31) | (inst.M10.i << 30) | - (inst.M10.imm7 << 23); - temp += imm >> 23; - vcpu_set_gr(vcpu, inst.M10.r3, temp, 0); - - /* Write high word.FIXME: this is a kludge! */ - v.u.bits[1] &= 0x3ffff; - mmio_access(vcpu, padr + 8, (u64 *)&v.u.bits[1], - 8, ma, IOREQ_WRITE); - data = v.u.bits[0]; - size = 3; - } else if (inst.M10.major == 7 && inst.M10.x6 == 0x31) { - /* Floating-point stf8 + Imm update */ - struct ia64_fpreg v; - inst_type = SL_FLOATING; - dir = IOREQ_WRITE; - size = 3; - vcpu_get_fpreg(vcpu, inst.M10.f2, &v); - data = v.u.bits[0]; /* Significand. */ - temp = vcpu_get_gr(vcpu, inst.M10.r3); - imm = (inst.M10.s << 31) | (inst.M10.i << 30) | - (inst.M10.imm7 << 23); - temp += imm >> 23; - vcpu_set_gr(vcpu, inst.M10.r3, temp, 0); - } else if (inst.M15.major == 7 && inst.M15.x6 >= 0x2c - && inst.M15.x6 <= 0x2f) { - temp = vcpu_get_gr(vcpu, inst.M15.r3); - imm = (inst.M15.s << 31) | (inst.M15.i << 30) | - (inst.M15.imm7 << 23); - temp += imm >> 23; - vcpu_set_gr(vcpu, inst.M15.r3, temp, 0); - - vcpu_increment_iip(vcpu); - return; - } else if (inst.M12.major == 6 && inst.M12.m == 1 - && inst.M12.x == 1 && inst.M12.x6 == 1) { - /* Floating-point Load Pair + Imm ldfp8 M12*/ - struct ia64_fpreg v; - - inst_type = SL_FLOATING; - dir = IOREQ_READ; - size = 8; /*ldfd*/ - mmio_access(vcpu, padr, &data, size, ma, dir); - v.u.bits[0] = data; - v.u.bits[1] = 0x1003E; - vcpu_set_fpreg(vcpu, inst.M12.f1, &v); - padr += 8; - mmio_access(vcpu, padr, &data, size, ma, dir); - v.u.bits[0] = data; - v.u.bits[1] = 0x1003E; - vcpu_set_fpreg(vcpu, inst.M12.f2, &v); - padr += 8; - vcpu_set_gr(vcpu, inst.M12.r3, padr, 0); - vcpu_increment_iip(vcpu); - return; - } else { - inst_type = -1; - panic_vm(vcpu, "Unsupported MMIO access instruction! " - "Bunld[0]=0x%lx, Bundle[1]=0x%lx\n", - bundle.i64[0], bundle.i64[1]); - } - - size = 1 << size; - if (dir == IOREQ_WRITE) { - mmio_access(vcpu, padr, &data, size, ma, dir); - } else { - mmio_access(vcpu, padr, &data, size, ma, dir); - if (inst_type == SL_INTEGER) - vcpu_set_gr(vcpu, inst.M1.r1, data, 0); - else - panic_vm(vcpu, "Unsupported instruction type!\n"); - - } - vcpu_increment_iip(vcpu); -} diff --git a/arch/ia64/kvm/optvfault.S b/arch/ia64/kvm/optvfault.S deleted file mode 100644 index f793be3effff..000000000000 --- a/arch/ia64/kvm/optvfault.S +++ /dev/null @@ -1,1090 +0,0 @@ -/* - * arch/ia64/kvm/optvfault.S - * optimize virtualization fault handler - * - * Copyright (C) 2006 Intel Co - * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com> - * Copyright (C) 2008 Intel Co - * Add the support for Tukwila processors. - * Xiantao Zhang <xiantao.zhang@intel.com> - */ - -#include <asm/asmmacro.h> -#include <asm/processor.h> -#include <asm/kvm_host.h> - -#include "vti.h" -#include "asm-offsets.h" - -#define ACCE_MOV_FROM_AR -#define ACCE_MOV_FROM_RR -#define ACCE_MOV_TO_RR -#define ACCE_RSM -#define ACCE_SSM -#define ACCE_MOV_TO_PSR -#define ACCE_THASH - -#define VMX_VPS_SYNC_READ \ - add r16=VMM_VPD_BASE_OFFSET,r21; \ - mov r17 = b0; \ - mov r18 = r24; \ - mov r19 = r25; \ - mov r20 = r31; \ - ;; \ -{.mii; \ - ld8 r16 = [r16]; \ - nop 0x0; \ - mov r24 = ip; \ - ;; \ -}; \ -{.mmb; \ - add r24=0x20, r24; \ - mov r25 =r16; \ - br.sptk.many kvm_vps_sync_read; \ -}; \ - mov b0 = r17; \ - mov r24 = r18; \ - mov r25 = r19; \ - mov r31 = r20 - -ENTRY(kvm_vps_entry) - adds r29 = VMM_VCPU_VSA_BASE_OFFSET,r21 - ;; - ld8 r29 = [r29] - ;; - add r29 = r29, r30 - ;; - mov b0 = r29 - br.sptk.many b0 -END(kvm_vps_entry) - -/* - * Inputs: - * r24 : return address - * r25 : vpd - * r29 : scratch - * - */ -GLOBAL_ENTRY(kvm_vps_sync_read) - movl r30 = PAL_VPS_SYNC_READ - ;; - br.sptk.many kvm_vps_entry -END(kvm_vps_sync_read) - -/* - * Inputs: - * r24 : return address - * r25 : vpd - * r29 : scratch - * - */ -GLOBAL_ENTRY(kvm_vps_sync_write) - movl r30 = PAL_VPS_SYNC_WRITE - ;; - br.sptk.many kvm_vps_entry -END(kvm_vps_sync_write) - -/* - * Inputs: - * r23 : pr - * r24 : guest b0 - * r25 : vpd - * - */ -GLOBAL_ENTRY(kvm_vps_resume_normal) - movl r30 = PAL_VPS_RESUME_NORMAL - ;; - mov pr=r23,-2 - br.sptk.many kvm_vps_entry -END(kvm_vps_resume_normal) - -/* - * Inputs: - * r23 : pr - * r24 : guest b0 - * r25 : vpd - * r17 : isr - */ -GLOBAL_ENTRY(kvm_vps_resume_handler) - movl r30 = PAL_VPS_RESUME_HANDLER - ;; - ld8 r26=[r25] - shr r17=r17,IA64_ISR_IR_BIT - ;; - dep r26=r17,r26,63,1 // bit 63 of r26 indicate whether enable CFLE - mov pr=r23,-2 - br.sptk.many kvm_vps_entry -END(kvm_vps_resume_handler) - -//mov r1=ar3 -GLOBAL_ENTRY(kvm_asm_mov_from_ar) -#ifndef ACCE_MOV_FROM_AR - br.many kvm_virtualization_fault_back -#endif - add r18=VMM_VCPU_ITC_OFS_OFFSET, r21 - add r16=VMM_VCPU_LAST_ITC_OFFSET,r21 - extr.u r17=r25,6,7 - ;; - ld8 r18=[r18] - mov r19=ar.itc - mov r24=b0 - ;; - add r19=r19,r18 - addl r20=@gprel(asm_mov_to_reg),gp - ;; - st8 [r16] = r19 - adds r30=kvm_resume_to_guest-asm_mov_to_reg,r20 - shladd r17=r17,4,r20 - ;; - mov b0=r17 - br.sptk.few b0 - ;; -END(kvm_asm_mov_from_ar) - -/* - * Special SGI SN2 optimized version of mov_from_ar using the SN2 RTC - * clock as it's source for emulating the ITC. This version will be - * copied on top of the original version if the host is determined to - * be an SN2. - */ -GLOBAL_ENTRY(kvm_asm_mov_from_ar_sn2) - add r18=VMM_VCPU_ITC_OFS_OFFSET, r21 - movl r19 = (KVM_VMM_BASE+(1<<KVM_VMM_SHIFT)) - - add r16=VMM_VCPU_LAST_ITC_OFFSET,r21 - extr.u r17=r25,6,7 - mov r24=b0 - ;; - ld8 r18=[r18] - ld8 r19=[r19] - addl r20=@gprel(asm_mov_to_reg),gp - ;; - add r19=r19,r18 - shladd r17=r17,4,r20 - ;; - adds r30=kvm_resume_to_guest-asm_mov_to_reg,r20 - st8 [r16] = r19 - mov b0=r17 - br.sptk.few b0 - ;; -END(kvm_asm_mov_from_ar_sn2) - - - -// mov r1=rr[r3] -GLOBAL_ENTRY(kvm_asm_mov_from_rr) -#ifndef ACCE_MOV_FROM_RR - br.many kvm_virtualization_fault_back -#endif - extr.u r16=r25,20,7 - extr.u r17=r25,6,7 - addl r20=@gprel(asm_mov_from_reg),gp - ;; - adds r30=kvm_asm_mov_from_rr_back_1-asm_mov_from_reg,r20 - shladd r16=r16,4,r20 - mov r24=b0 - ;; - add r27=VMM_VCPU_VRR0_OFFSET,r21 - mov b0=r16 - br.many b0 - ;; -kvm_asm_mov_from_rr_back_1: - adds r30=kvm_resume_to_guest-asm_mov_from_reg,r20 - adds r22=asm_mov_to_reg-asm_mov_from_reg,r20 - shr.u r26=r19,61 - ;; - shladd r17=r17,4,r22 - shladd r27=r26,3,r27 - ;; - ld8 r19=[r27] - mov b0=r17 - br.many b0 -END(kvm_asm_mov_from_rr) - - -// mov rr[r3]=r2 -GLOBAL_ENTRY(kvm_asm_mov_to_rr) -#ifndef ACCE_MOV_TO_RR - br.many kvm_virtualization_fault_back -#endif - extr.u r16=r25,20,7 - extr.u r17=r25,13,7 - addl r20=@gprel(asm_mov_from_reg),gp - ;; - adds r30=kvm_asm_mov_to_rr_back_1-asm_mov_from_reg,r20 - shladd r16=r16,4,r20 - mov r22=b0 - ;; - add r27=VMM_VCPU_VRR0_OFFSET,r21 - mov b0=r16 - br.many b0 - ;; -kvm_asm_mov_to_rr_back_1: - adds r30=kvm_asm_mov_to_rr_back_2-asm_mov_from_reg,r20 - shr.u r23=r19,61 - shladd r17=r17,4,r20 - ;; - //if rr6, go back - cmp.eq p6,p0=6,r23 - mov b0=r22 - (p6) br.cond.dpnt.many kvm_virtualization_fault_back - ;; - mov r28=r19 - mov b0=r17 - br.many b0 -kvm_asm_mov_to_rr_back_2: - adds r30=kvm_resume_to_guest-asm_mov_from_reg,r20 - shladd r27=r23,3,r27 - ;; // vrr.rid<<4 |0xe - st8 [r27]=r19 - mov b0=r30 - ;; - extr.u r16=r19,8,26 - extr.u r18 =r19,2,6 - mov r17 =0xe - ;; - shladd r16 = r16, 4, r17 - extr.u r19 =r19,0,8 - ;; - shl r16 = r16,8 - ;; - add r19 = r19, r16 - ;; //set ve 1 - dep r19=-1,r19,0,1 - cmp.lt p6,p0=14,r18 - ;; - (p6) mov r18=14 - ;; - (p6) dep r19=r18,r19,2,6 - ;; - cmp.eq p6,p0=0,r23 - ;; - cmp.eq.or p6,p0=4,r23 - ;; - adds r16=VMM_VCPU_MODE_FLAGS_OFFSET,r21 - (p6) adds r17=VMM_VCPU_META_SAVED_RR0_OFFSET,r21 - ;; - ld4 r16=[r16] - cmp.eq p7,p0=r0,r0 - (p6) shladd r17=r23,1,r17 - ;; - (p6) st8 [r17]=r19 - (p6) tbit.nz p6,p7=r16,0 - ;; - (p7) mov rr[r28]=r19 - mov r24=r22 - br.many b0 -END(kvm_asm_mov_to_rr) - - -//rsm -GLOBAL_ENTRY(kvm_asm_rsm) -#ifndef ACCE_RSM - br.many kvm_virtualization_fault_back -#endif - VMX_VPS_SYNC_READ - ;; - extr.u r26=r25,6,21 - extr.u r27=r25,31,2 - ;; - extr.u r28=r25,36,1 - dep r26=r27,r26,21,2 - ;; - add r17=VPD_VPSR_START_OFFSET,r16 - add r22=VMM_VCPU_MODE_FLAGS_OFFSET,r21 - //r26 is imm24 - dep r26=r28,r26,23,1 - ;; - ld8 r18=[r17] - movl r28=IA64_PSR_IC+IA64_PSR_I+IA64_PSR_DT+IA64_PSR_SI - ld4 r23=[r22] - sub r27=-1,r26 - mov r24=b0 - ;; - mov r20=cr.ipsr - or r28=r27,r28 - and r19=r18,r27 - ;; - st8 [r17]=r19 - and r20=r20,r28 - /* Comment it out due to short of fp lazy alorgithm support - adds r27=IA64_VCPU_FP_PSR_OFFSET,r21 - ;; - ld8 r27=[r27] - ;; - tbit.nz p8,p0= r27,IA64_PSR_DFH_BIT - ;; - (p8) dep r20=-1,r20,IA64_PSR_DFH_BIT,1 - */ - ;; - mov cr.ipsr=r20 - tbit.nz p6,p0=r23,0 - ;; - tbit.z.or p6,p0=r26,IA64_PSR_DT_BIT - (p6) br.dptk kvm_resume_to_guest_with_sync - ;; - add r26=VMM_VCPU_META_RR0_OFFSET,r21 - add r27=VMM_VCPU_META_RR0_OFFSET+8,r21 - dep r23=-1,r23,0,1 - ;; - ld8 r26=[r26] - ld8 r27=[r27] - st4 [r22]=r23 - dep.z r28=4,61,3 - ;; - mov rr[r0]=r26 - ;; - mov rr[r28]=r27 - ;; - srlz.d - br.many kvm_resume_to_guest_with_sync -END(kvm_asm_rsm) - - -//ssm -GLOBAL_ENTRY(kvm_asm_ssm) -#ifndef ACCE_SSM - br.many kvm_virtualization_fault_back -#endif - VMX_VPS_SYNC_READ - ;; - extr.u r26=r25,6,21 - extr.u r27=r25,31,2 - ;; - extr.u r28=r25,36,1 - dep r26=r27,r26,21,2 - ;; //r26 is imm24 - add r27=VPD_VPSR_START_OFFSET,r16 - dep r26=r28,r26,23,1 - ;; //r19 vpsr - ld8 r29=[r27] - mov r24=b0 - ;; - add r22=VMM_VCPU_MODE_FLAGS_OFFSET,r21 - mov r20=cr.ipsr - or r19=r29,r26 - ;; - ld4 r23=[r22] - st8 [r27]=r19 - or r20=r20,r26 - ;; - mov cr.ipsr=r20 - movl r28=IA64_PSR_DT+IA64_PSR_RT+IA64_PSR_IT - ;; - and r19=r28,r19 - tbit.z p6,p0=r23,0 - ;; - cmp.ne.or p6,p0=r28,r19 - (p6) br.dptk kvm_asm_ssm_1 - ;; - add r26=VMM_VCPU_META_SAVED_RR0_OFFSET,r21 - add r27=VMM_VCPU_META_SAVED_RR0_OFFSET+8,r21 - dep r23=0,r23,0,1 - ;; - ld8 r26=[r26] - ld8 r27=[r27] - st4 [r22]=r23 - dep.z r28=4,61,3 - ;; - mov rr[r0]=r26 - ;; - mov rr[r28]=r27 - ;; - srlz.d - ;; -kvm_asm_ssm_1: - tbit.nz p6,p0=r29,IA64_PSR_I_BIT - ;; - tbit.z.or p6,p0=r19,IA64_PSR_I_BIT - (p6) br.dptk kvm_resume_to_guest_with_sync - ;; - add r29=VPD_VTPR_START_OFFSET,r16 - add r30=VPD_VHPI_START_OFFSET,r16 - ;; - ld8 r29=[r29] - ld8 r30=[r30] - ;; - extr.u r17=r29,4,4 - extr.u r18=r29,16,1 - ;; - dep r17=r18,r17,4,1 - ;; - cmp.gt p6,p0=r30,r17 - (p6) br.dpnt.few kvm_asm_dispatch_vexirq - br.many kvm_resume_to_guest_with_sync -END(kvm_asm_ssm) - - -//mov psr.l=r2 -GLOBAL_ENTRY(kvm_asm_mov_to_psr) -#ifndef ACCE_MOV_TO_PSR - br.many kvm_virtualization_fault_back -#endif - VMX_VPS_SYNC_READ - ;; - extr.u r26=r25,13,7 //r2 - addl r20=@gprel(asm_mov_from_reg),gp - ;; - adds r30=kvm_asm_mov_to_psr_back-asm_mov_from_reg,r20 - shladd r26=r26,4,r20 - mov r24=b0 - ;; - add r27=VPD_VPSR_START_OFFSET,r16 - mov b0=r26 - br.many b0 - ;; -kvm_asm_mov_to_psr_back: - ld8 r17=[r27] - add r22=VMM_VCPU_MODE_FLAGS_OFFSET,r21 - dep r19=0,r19,32,32 - ;; - ld4 r23=[r22] - dep r18=0,r17,0,32 - ;; - add r30=r18,r19 - movl r28=IA64_PSR_DT+IA64_PSR_RT+IA64_PSR_IT - ;; - st8 [r27]=r30 - and r27=r28,r30 - and r29=r28,r17 - ;; - cmp.eq p5,p0=r29,r27 - cmp.eq p6,p7=r28,r27 - (p5) br.many kvm_asm_mov_to_psr_1 - ;; - //virtual to physical - (p7) add r26=VMM_VCPU_META_RR0_OFFSET,r21 - (p7) add r27=VMM_VCPU_META_RR0_OFFSET+8,r21 - (p7) dep r23=-1,r23,0,1 - ;; - //physical to virtual - (p6) add r26=VMM_VCPU_META_SAVED_RR0_OFFSET,r21 - (p6) add r27=VMM_VCPU_META_SAVED_RR0_OFFSET+8,r21 - (p6) dep r23=0,r23,0,1 - ;; - ld8 r26=[r26] - ld8 r27=[r27] - st4 [r22]=r23 - dep.z r28=4,61,3 - ;; - mov rr[r0]=r26 - ;; - mov rr[r28]=r27 - ;; - srlz.d - ;; -kvm_asm_mov_to_psr_1: - mov r20=cr.ipsr - movl r28=IA64_PSR_IC+IA64_PSR_I+IA64_PSR_DT+IA64_PSR_SI+IA64_PSR_RT - ;; - or r19=r19,r28 - dep r20=0,r20,0,32 - ;; - add r20=r19,r20 - mov b0=r24 - ;; - /* Comment it out due to short of fp lazy algorithm support - adds r27=IA64_VCPU_FP_PSR_OFFSET,r21 - ;; - ld8 r27=[r27] - ;; - tbit.nz p8,p0=r27,IA64_PSR_DFH_BIT - ;; - (p8) dep r20=-1,r20,IA64_PSR_DFH_BIT,1 - ;; - */ - mov cr.ipsr=r20 - cmp.ne p6,p0=r0,r0 - ;; - tbit.nz.or p6,p0=r17,IA64_PSR_I_BIT - tbit.z.or p6,p0=r30,IA64_PSR_I_BIT - (p6) br.dpnt.few kvm_resume_to_guest_with_sync - ;; - add r29=VPD_VTPR_START_OFFSET,r16 - add r30=VPD_VHPI_START_OFFSET,r16 - ;; - ld8 r29=[r29] - ld8 r30=[r30] - ;; - extr.u r17=r29,4,4 - extr.u r18=r29,16,1 - ;; - dep r17=r18,r17,4,1 - ;; - cmp.gt p6,p0=r30,r17 - (p6) br.dpnt.few kvm_asm_dispatch_vexirq - br.many kvm_resume_to_guest_with_sync -END(kvm_asm_mov_to_psr) - - -ENTRY(kvm_asm_dispatch_vexirq) -//increment iip - mov r17 = b0 - mov r18 = r31 -{.mii - add r25=VMM_VPD_BASE_OFFSET,r21 - nop 0x0 - mov r24 = ip - ;; -} -{.mmb - add r24 = 0x20, r24 - ld8 r25 = [r25] - br.sptk.many kvm_vps_sync_write -} - mov b0 =r17 - mov r16=cr.ipsr - mov r31 = r18 - mov r19 = 37 - ;; - extr.u r17=r16,IA64_PSR_RI_BIT,2 - tbit.nz p6,p7=r16,IA64_PSR_RI_BIT+1 - ;; - (p6) mov r18=cr.iip - (p6) mov r17=r0 - (p7) add r17=1,r17 - ;; - (p6) add r18=0x10,r18 - dep r16=r17,r16,IA64_PSR_RI_BIT,2 - ;; - (p6) mov cr.iip=r18 - mov cr.ipsr=r16 - mov r30 =1 - br.many kvm_dispatch_vexirq -END(kvm_asm_dispatch_vexirq) - -// thash -// TODO: add support when pta.vf = 1 -GLOBAL_ENTRY(kvm_asm_thash) -#ifndef ACCE_THASH - br.many kvm_virtualization_fault_back -#endif - extr.u r17=r25,20,7 // get r3 from opcode in r25 - extr.u r18=r25,6,7 // get r1 from opcode in r25 - addl r20=@gprel(asm_mov_from_reg),gp - ;; - adds r30=kvm_asm_thash_back1-asm_mov_from_reg,r20 - shladd r17=r17,4,r20 // get addr of MOVE_FROM_REG(r17) - adds r16=VMM_VPD_BASE_OFFSET,r21 // get vcpu.arch.priveregs - ;; - mov r24=b0 - ;; - ld8 r16=[r16] // get VPD addr - mov b0=r17 - br.many b0 // r19 return value - ;; -kvm_asm_thash_back1: - shr.u r23=r19,61 // get RR number - adds r28=VMM_VCPU_VRR0_OFFSET,r21 // get vcpu->arch.vrr[0]'s addr - adds r16=VMM_VPD_VPTA_OFFSET,r16 // get vpta - ;; - shladd r27=r23,3,r28 // get vcpu->arch.vrr[r23]'s addr - ld8 r17=[r16] // get PTA - mov r26=1 - ;; - extr.u r29=r17,2,6 // get pta.size - ld8 r28=[r27] // get vcpu->arch.vrr[r23]'s value - ;; - mov b0=r24 - //Fallback to C if pta.vf is set - tbit.nz p6,p0=r17, 8 - ;; - (p6) mov r24=EVENT_THASH - (p6) br.cond.dpnt.many kvm_virtualization_fault_back - extr.u r28=r28,2,6 // get rr.ps - shl r22=r26,r29 // 1UL << pta.size - ;; - shr.u r23=r19,r28 // vaddr >> rr.ps - adds r26=3,r29 // pta.size + 3 - shl r27=r17,3 // pta << 3 - ;; - shl r23=r23,3 // (vaddr >> rr.ps) << 3 - shr.u r27=r27,r26 // (pta << 3) >> (pta.size+3) - movl r16=7<<61 - ;; - adds r22=-1,r22 // (1UL << pta.size) - 1 - shl r27=r27,r29 // ((pta<<3)>>(pta.size+3))<<pta.size - and r19=r19,r16 // vaddr & VRN_MASK - ;; - and r22=r22,r23 // vhpt_offset - or r19=r19,r27 // (vadr&VRN_MASK)|(((pta<<3)>>(pta.size + 3))<<pta.size) - adds r26=asm_mov_to_reg-asm_mov_from_reg,r20 - ;; - or r19=r19,r22 // calc pval - shladd r17=r18,4,r26 - adds r30=kvm_resume_to_guest-asm_mov_from_reg,r20 - ;; - mov b0=r17 - br.many b0 -END(kvm_asm_thash) - -#define MOV_TO_REG0 \ -{; \ - nop.b 0x0; \ - nop.b 0x0; \ - nop.b 0x0; \ - ;; \ -}; - - -#define MOV_TO_REG(n) \ -{; \ - mov r##n##=r19; \ - mov b0=r30; \ - br.sptk.many b0; \ - ;; \ -}; - - -#define MOV_FROM_REG(n) \ -{; \ - mov r19=r##n##; \ - mov b0=r30; \ - br.sptk.many b0; \ - ;; \ -}; - - -#define MOV_TO_BANK0_REG(n) \ -ENTRY_MIN_ALIGN(asm_mov_to_bank0_reg##n##); \ -{; \ - mov r26=r2; \ - mov r2=r19; \ - bsw.1; \ - ;; \ -}; \ -{; \ - mov r##n##=r2; \ - nop.b 0x0; \ - bsw.0; \ - ;; \ -}; \ -{; \ - mov r2=r26; \ - mov b0=r30; \ - br.sptk.many b0; \ - ;; \ -}; \ -END(asm_mov_to_bank0_reg##n##) - - -#define MOV_FROM_BANK0_REG(n) \ -ENTRY_MIN_ALIGN(asm_mov_from_bank0_reg##n##); \ -{; \ - mov r26=r2; \ - nop.b 0x0; \ - bsw.1; \ - ;; \ -}; \ -{; \ - mov r2=r##n##; \ - nop.b 0x0; \ - bsw.0; \ - ;; \ -}; \ -{; \ - mov r19=r2; \ - mov r2=r26; \ - mov b0=r30; \ -}; \ -{; \ - nop.b 0x0; \ - nop.b 0x0; \ - br.sptk.many b0; \ - ;; \ -}; \ -END(asm_mov_from_bank0_reg##n##) - - -#define JMP_TO_MOV_TO_BANK0_REG(n) \ -{; \ - nop.b 0x0; \ - nop.b 0x0; \ - br.sptk.many asm_mov_to_bank0_reg##n##; \ - ;; \ -} - - -#define JMP_TO_MOV_FROM_BANK0_REG(n) \ -{; \ - nop.b 0x0; \ - nop.b 0x0; \ - br.sptk.many asm_mov_from_bank0_reg##n##; \ - ;; \ -} - - -MOV_FROM_BANK0_REG(16) -MOV_FROM_BANK0_REG(17) -MOV_FROM_BANK0_REG(18) -MOV_FROM_BANK0_REG(19) -MOV_FROM_BANK0_REG(20) -MOV_FROM_BANK0_REG(21) -MOV_FROM_BANK0_REG(22) -MOV_FROM_BANK0_REG(23) -MOV_FROM_BANK0_REG(24) -MOV_FROM_BANK0_REG(25) -MOV_FROM_BANK0_REG(26) -MOV_FROM_BANK0_REG(27) -MOV_FROM_BANK0_REG(28) -MOV_FROM_BANK0_REG(29) -MOV_FROM_BANK0_REG(30) -MOV_FROM_BANK0_REG(31) - - -// mov from reg table -ENTRY(asm_mov_from_reg) - MOV_FROM_REG(0) - MOV_FROM_REG(1) - MOV_FROM_REG(2) - MOV_FROM_REG(3) - MOV_FROM_REG(4) - MOV_FROM_REG(5) - MOV_FROM_REG(6) - MOV_FROM_REG(7) - MOV_FROM_REG(8) - MOV_FROM_REG(9) - MOV_FROM_REG(10) - MOV_FROM_REG(11) - MOV_FROM_REG(12) - MOV_FROM_REG(13) - MOV_FROM_REG(14) - MOV_FROM_REG(15) - JMP_TO_MOV_FROM_BANK0_REG(16) - JMP_TO_MOV_FROM_BANK0_REG(17) - JMP_TO_MOV_FROM_BANK0_REG(18) - JMP_TO_MOV_FROM_BANK0_REG(19) - JMP_TO_MOV_FROM_BANK0_REG(20) - JMP_TO_MOV_FROM_BANK0_REG(21) - JMP_TO_MOV_FROM_BANK0_REG(22) - JMP_TO_MOV_FROM_BANK0_REG(23) - JMP_TO_MOV_FROM_BANK0_REG(24) - JMP_TO_MOV_FROM_BANK0_REG(25) - JMP_TO_MOV_FROM_BANK0_REG(26) - JMP_TO_MOV_FROM_BANK0_REG(27) - JMP_TO_MOV_FROM_BANK0_REG(28) - JMP_TO_MOV_FROM_BANK0_REG(29) - JMP_TO_MOV_FROM_BANK0_REG(30) - JMP_TO_MOV_FROM_BANK0_REG(31) - MOV_FROM_REG(32) - MOV_FROM_REG(33) - MOV_FROM_REG(34) - MOV_FROM_REG(35) - MOV_FROM_REG(36) - MOV_FROM_REG(37) - MOV_FROM_REG(38) - MOV_FROM_REG(39) - MOV_FROM_REG(40) - MOV_FROM_REG(41) - MOV_FROM_REG(42) - MOV_FROM_REG(43) - MOV_FROM_REG(44) - MOV_FROM_REG(45) - MOV_FROM_REG(46) - MOV_FROM_REG(47) - MOV_FROM_REG(48) - MOV_FROM_REG(49) - MOV_FROM_REG(50) - MOV_FROM_REG(51) - MOV_FROM_REG(52) - MOV_FROM_REG(53) - MOV_FROM_REG(54) - MOV_FROM_REG(55) - MOV_FROM_REG(56) - MOV_FROM_REG(57) - MOV_FROM_REG(58) - MOV_FROM_REG(59) - MOV_FROM_REG(60) - MOV_FROM_REG(61) - MOV_FROM_REG(62) - MOV_FROM_REG(63) - MOV_FROM_REG(64) - MOV_FROM_REG(65) - MOV_FROM_REG(66) - MOV_FROM_REG(67) - MOV_FROM_REG(68) - MOV_FROM_REG(69) - MOV_FROM_REG(70) - MOV_FROM_REG(71) - MOV_FROM_REG(72) - MOV_FROM_REG(73) - MOV_FROM_REG(74) - MOV_FROM_REG(75) - MOV_FROM_REG(76) - MOV_FROM_REG(77) - MOV_FROM_REG(78) - MOV_FROM_REG(79) - MOV_FROM_REG(80) - MOV_FROM_REG(81) - MOV_FROM_REG(82) - MOV_FROM_REG(83) - MOV_FROM_REG(84) - MOV_FROM_REG(85) - MOV_FROM_REG(86) - MOV_FROM_REG(87) - MOV_FROM_REG(88) - MOV_FROM_REG(89) - MOV_FROM_REG(90) - MOV_FROM_REG(91) - MOV_FROM_REG(92) - MOV_FROM_REG(93) - MOV_FROM_REG(94) - MOV_FROM_REG(95) - MOV_FROM_REG(96) - MOV_FROM_REG(97) - MOV_FROM_REG(98) - MOV_FROM_REG(99) - MOV_FROM_REG(100) - MOV_FROM_REG(101) - MOV_FROM_REG(102) - MOV_FROM_REG(103) - MOV_FROM_REG(104) - MOV_FROM_REG(105) - MOV_FROM_REG(106) - MOV_FROM_REG(107) - MOV_FROM_REG(108) - MOV_FROM_REG(109) - MOV_FROM_REG(110) - MOV_FROM_REG(111) - MOV_FROM_REG(112) - MOV_FROM_REG(113) - MOV_FROM_REG(114) - MOV_FROM_REG(115) - MOV_FROM_REG(116) - MOV_FROM_REG(117) - MOV_FROM_REG(118) - MOV_FROM_REG(119) - MOV_FROM_REG(120) - MOV_FROM_REG(121) - MOV_FROM_REG(122) - MOV_FROM_REG(123) - MOV_FROM_REG(124) - MOV_FROM_REG(125) - MOV_FROM_REG(126) - MOV_FROM_REG(127) -END(asm_mov_from_reg) - - -/* must be in bank 0 - * parameter: - * r31: pr - * r24: b0 - */ -ENTRY(kvm_resume_to_guest_with_sync) - adds r19=VMM_VPD_BASE_OFFSET,r21 - mov r16 = r31 - mov r17 = r24 - ;; -{.mii - ld8 r25 =[r19] - nop 0x0 - mov r24 = ip - ;; -} -{.mmb - add r24 =0x20, r24 - nop 0x0 - br.sptk.many kvm_vps_sync_write -} - - mov r31 = r16 - mov r24 =r17 - ;; - br.sptk.many kvm_resume_to_guest -END(kvm_resume_to_guest_with_sync) - -ENTRY(kvm_resume_to_guest) - adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21 - ;; - ld8 r1 =[r16] - adds r20 = VMM_VCPU_VSA_BASE_OFFSET,r21 - ;; - mov r16=cr.ipsr - ;; - ld8 r20 = [r20] - adds r19=VMM_VPD_BASE_OFFSET,r21 - ;; - ld8 r25=[r19] - extr.u r17=r16,IA64_PSR_RI_BIT,2 - tbit.nz p6,p7=r16,IA64_PSR_RI_BIT+1 - ;; - (p6) mov r18=cr.iip - (p6) mov r17=r0 - ;; - (p6) add r18=0x10,r18 - (p7) add r17=1,r17 - ;; - (p6) mov cr.iip=r18 - dep r16=r17,r16,IA64_PSR_RI_BIT,2 - ;; - mov cr.ipsr=r16 - adds r19= VPD_VPSR_START_OFFSET,r25 - add r28=PAL_VPS_RESUME_NORMAL,r20 - add r29=PAL_VPS_RESUME_HANDLER,r20 - ;; - ld8 r19=[r19] - mov b0=r29 - mov r27=cr.isr - ;; - tbit.z p6,p7 = r19,IA64_PSR_IC_BIT // p7=vpsr.ic - shr r27=r27,IA64_ISR_IR_BIT - ;; - (p6) ld8 r26=[r25] - (p7) mov b0=r28 - ;; - (p6) dep r26=r27,r26,63,1 - mov pr=r31,-2 - br.sptk.many b0 // call pal service - ;; -END(kvm_resume_to_guest) - - -MOV_TO_BANK0_REG(16) -MOV_TO_BANK0_REG(17) -MOV_TO_BANK0_REG(18) -MOV_TO_BANK0_REG(19) -MOV_TO_BANK0_REG(20) -MOV_TO_BANK0_REG(21) -MOV_TO_BANK0_REG(22) -MOV_TO_BANK0_REG(23) -MOV_TO_BANK0_REG(24) -MOV_TO_BANK0_REG(25) -MOV_TO_BANK0_REG(26) -MOV_TO_BANK0_REG(27) -MOV_TO_BANK0_REG(28) -MOV_TO_BANK0_REG(29) -MOV_TO_BANK0_REG(30) -MOV_TO_BANK0_REG(31) - - -// mov to reg table -ENTRY(asm_mov_to_reg) - MOV_TO_REG0 - MOV_TO_REG(1) - MOV_TO_REG(2) - MOV_TO_REG(3) - MOV_TO_REG(4) - MOV_TO_REG(5) - MOV_TO_REG(6) - MOV_TO_REG(7) - MOV_TO_REG(8) - MOV_TO_REG(9) - MOV_TO_REG(10) - MOV_TO_REG(11) - MOV_TO_REG(12) - MOV_TO_REG(13) - MOV_TO_REG(14) - MOV_TO_REG(15) - JMP_TO_MOV_TO_BANK0_REG(16) - JMP_TO_MOV_TO_BANK0_REG(17) - JMP_TO_MOV_TO_BANK0_REG(18) - JMP_TO_MOV_TO_BANK0_REG(19) - JMP_TO_MOV_TO_BANK0_REG(20) - JMP_TO_MOV_TO_BANK0_REG(21) - JMP_TO_MOV_TO_BANK0_REG(22) - JMP_TO_MOV_TO_BANK0_REG(23) - JMP_TO_MOV_TO_BANK0_REG(24) - JMP_TO_MOV_TO_BANK0_REG(25) - JMP_TO_MOV_TO_BANK0_REG(26) - JMP_TO_MOV_TO_BANK0_REG(27) - JMP_TO_MOV_TO_BANK0_REG(28) - JMP_TO_MOV_TO_BANK0_REG(29) - JMP_TO_MOV_TO_BANK0_REG(30) - JMP_TO_MOV_TO_BANK0_REG(31) - MOV_TO_REG(32) - MOV_TO_REG(33) - MOV_TO_REG(34) - MOV_TO_REG(35) - MOV_TO_REG(36) - MOV_TO_REG(37) - MOV_TO_REG(38) - MOV_TO_REG(39) - MOV_TO_REG(40) - MOV_TO_REG(41) - MOV_TO_REG(42) - MOV_TO_REG(43) - MOV_TO_REG(44) - MOV_TO_REG(45) - MOV_TO_REG(46) - MOV_TO_REG(47) - MOV_TO_REG(48) - MOV_TO_REG(49) - MOV_TO_REG(50) - MOV_TO_REG(51) - MOV_TO_REG(52) - MOV_TO_REG(53) - MOV_TO_REG(54) - MOV_TO_REG(55) - MOV_TO_REG(56) - MOV_TO_REG(57) - MOV_TO_REG(58) - MOV_TO_REG(59) - MOV_TO_REG(60) - MOV_TO_REG(61) - MOV_TO_REG(62) - MOV_TO_REG(63) - MOV_TO_REG(64) - MOV_TO_REG(65) - MOV_TO_REG(66) - MOV_TO_REG(67) - MOV_TO_REG(68) - MOV_TO_REG(69) - MOV_TO_REG(70) - MOV_TO_REG(71) - MOV_TO_REG(72) - MOV_TO_REG(73) - MOV_TO_REG(74) - MOV_TO_REG(75) - MOV_TO_REG(76) - MOV_TO_REG(77) - MOV_TO_REG(78) - MOV_TO_REG(79) - MOV_TO_REG(80) - MOV_TO_REG(81) - MOV_TO_REG(82) - MOV_TO_REG(83) - MOV_TO_REG(84) - MOV_TO_REG(85) - MOV_TO_REG(86) - MOV_TO_REG(87) - MOV_TO_REG(88) - MOV_TO_REG(89) - MOV_TO_REG(90) - MOV_TO_REG(91) - MOV_TO_REG(92) - MOV_TO_REG(93) - MOV_TO_REG(94) - MOV_TO_REG(95) - MOV_TO_REG(96) - MOV_TO_REG(97) - MOV_TO_REG(98) - MOV_TO_REG(99) - MOV_TO_REG(100) - MOV_TO_REG(101) - MOV_TO_REG(102) - MOV_TO_REG(103) - MOV_TO_REG(104) - MOV_TO_REG(105) - MOV_TO_REG(106) - MOV_TO_REG(107) - MOV_TO_REG(108) - MOV_TO_REG(109) - MOV_TO_REG(110) - MOV_TO_REG(111) - MOV_TO_REG(112) - MOV_TO_REG(113) - MOV_TO_REG(114) - MOV_TO_REG(115) - MOV_TO_REG(116) - MOV_TO_REG(117) - MOV_TO_REG(118) - MOV_TO_REG(119) - MOV_TO_REG(120) - MOV_TO_REG(121) - MOV_TO_REG(122) - MOV_TO_REG(123) - MOV_TO_REG(124) - MOV_TO_REG(125) - MOV_TO_REG(126) - MOV_TO_REG(127) -END(asm_mov_to_reg) diff --git a/arch/ia64/kvm/process.c b/arch/ia64/kvm/process.c deleted file mode 100644 index b0398740b48d..000000000000 --- a/arch/ia64/kvm/process.c +++ /dev/null @@ -1,1024 +0,0 @@ -/* - * process.c: handle interruption inject for guests. - * Copyright (c) 2005, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., 59 Temple - * Place - Suite 330, Boston, MA 02111-1307 USA. - * - * Shaofan Li (Susue Li) <susie.li@intel.com> - * Xiaoyan Feng (Fleming Feng) <fleming.feng@intel.com> - * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com) - * Xiantao Zhang (xiantao.zhang@intel.com) - */ -#include "vcpu.h" - -#include <asm/pal.h> -#include <asm/sal.h> -#include <asm/fpswa.h> -#include <asm/kregs.h> -#include <asm/tlb.h> - -fpswa_interface_t *vmm_fpswa_interface; - -#define IA64_VHPT_TRANS_VECTOR 0x0000 -#define IA64_INST_TLB_VECTOR 0x0400 -#define IA64_DATA_TLB_VECTOR 0x0800 -#define IA64_ALT_INST_TLB_VECTOR 0x0c00 -#define IA64_ALT_DATA_TLB_VECTOR 0x1000 -#define IA64_DATA_NESTED_TLB_VECTOR 0x1400 -#define IA64_INST_KEY_MISS_VECTOR 0x1800 -#define IA64_DATA_KEY_MISS_VECTOR 0x1c00 -#define IA64_DIRTY_BIT_VECTOR 0x2000 -#define IA64_INST_ACCESS_BIT_VECTOR 0x2400 -#define IA64_DATA_ACCESS_BIT_VECTOR 0x2800 -#define IA64_BREAK_VECTOR 0x2c00 -#define IA64_EXTINT_VECTOR 0x3000 -#define IA64_PAGE_NOT_PRESENT_VECTOR 0x5000 -#define IA64_KEY_PERMISSION_VECTOR 0x5100 -#define IA64_INST_ACCESS_RIGHTS_VECTOR 0x5200 -#define IA64_DATA_ACCESS_RIGHTS_VECTOR 0x5300 -#define IA64_GENEX_VECTOR 0x5400 -#define IA64_DISABLED_FPREG_VECTOR 0x5500 -#define IA64_NAT_CONSUMPTION_VECTOR 0x5600 -#define IA64_SPECULATION_VECTOR 0x5700 /* UNUSED */ -#define IA64_DEBUG_VECTOR 0x5900 -#define IA64_UNALIGNED_REF_VECTOR 0x5a00 -#define IA64_UNSUPPORTED_DATA_REF_VECTOR 0x5b00 -#define IA64_FP_FAULT_VECTOR 0x5c00 -#define IA64_FP_TRAP_VECTOR 0x5d00 -#define IA64_LOWERPRIV_TRANSFER_TRAP_VECTOR 0x5e00 -#define IA64_TAKEN_BRANCH_TRAP_VECTOR 0x5f00 -#define IA64_SINGLE_STEP_TRAP_VECTOR 0x6000 - -/* SDM vol2 5.5 - IVA based interruption handling */ -#define INITIAL_PSR_VALUE_AT_INTERRUPTION (IA64_PSR_UP | IA64_PSR_MFL |\ - IA64_PSR_MFH | IA64_PSR_PK | IA64_PSR_DT | \ - IA64_PSR_RT | IA64_PSR_MC|IA64_PSR_IT) - -#define DOMN_PAL_REQUEST 0x110000 -#define DOMN_SAL_REQUEST 0x110001 - -static u64 vec2off[68] = {0x0, 0x400, 0x800, 0xc00, 0x1000, 0x1400, 0x1800, - 0x1c00, 0x2000, 0x2400, 0x2800, 0x2c00, 0x3000, 0x3400, 0x3800, 0x3c00, - 0x4000, 0x4400, 0x4800, 0x4c00, 0x5000, 0x5100, 0x5200, 0x5300, 0x5400, - 0x5500, 0x5600, 0x5700, 0x5800, 0x5900, 0x5a00, 0x5b00, 0x5c00, 0x5d00, - 0x5e00, 0x5f00, 0x6000, 0x6100, 0x6200, 0x6300, 0x6400, 0x6500, 0x6600, - 0x6700, 0x6800, 0x6900, 0x6a00, 0x6b00, 0x6c00, 0x6d00, 0x6e00, 0x6f00, - 0x7000, 0x7100, 0x7200, 0x7300, 0x7400, 0x7500, 0x7600, 0x7700, 0x7800, - 0x7900, 0x7a00, 0x7b00, 0x7c00, 0x7d00, 0x7e00, 0x7f00 -}; - -static void collect_interruption(struct kvm_vcpu *vcpu) -{ - u64 ipsr; - u64 vdcr; - u64 vifs; - unsigned long vpsr; - struct kvm_pt_regs *regs = vcpu_regs(vcpu); - - vpsr = vcpu_get_psr(vcpu); - vcpu_bsw0(vcpu); - if (vpsr & IA64_PSR_IC) { - - /* Sync mpsr id/da/dd/ss/ed bits to vipsr - * since after guest do rfi, we still want these bits on in - * mpsr - */ - - ipsr = regs->cr_ipsr; - vpsr = vpsr | (ipsr & (IA64_PSR_ID | IA64_PSR_DA - | IA64_PSR_DD | IA64_PSR_SS - | IA64_PSR_ED)); - vcpu_set_ipsr(vcpu, vpsr); - - /* Currently, for trap, we do not advance IIP to next - * instruction. That's because we assume caller already - * set up IIP correctly - */ - - vcpu_set_iip(vcpu , regs->cr_iip); - - /* set vifs.v to zero */ - vifs = VCPU(vcpu, ifs); - vifs &= ~IA64_IFS_V; - vcpu_set_ifs(vcpu, vifs); - - vcpu_set_iipa(vcpu, VMX(vcpu, cr_iipa)); - } - - vdcr = VCPU(vcpu, dcr); - - /* Set guest psr - * up/mfl/mfh/pk/dt/rt/mc/it keeps unchanged - * be: set to the value of dcr.be - * pp: set to the value of dcr.pp - */ - vpsr &= INITIAL_PSR_VALUE_AT_INTERRUPTION; - vpsr |= (vdcr & IA64_DCR_BE); - - /* VDCR pp bit position is different from VPSR pp bit */ - if (vdcr & IA64_DCR_PP) { - vpsr |= IA64_PSR_PP; - } else { - vpsr &= ~IA64_PSR_PP; - } - - vcpu_set_psr(vcpu, vpsr); - -} - -void inject_guest_interruption(struct kvm_vcpu *vcpu, u64 vec) -{ - u64 viva; - struct kvm_pt_regs *regs; - union ia64_isr pt_isr; - - regs = vcpu_regs(vcpu); - - /* clear cr.isr.ir (incomplete register frame)*/ - pt_isr.val = VMX(vcpu, cr_isr); - pt_isr.ir = 0; - VMX(vcpu, cr_isr) = pt_isr.val; - - collect_interruption(vcpu); - - viva = vcpu_get_iva(vcpu); - regs->cr_iip = viva + vec; -} - -static u64 vcpu_get_itir_on_fault(struct kvm_vcpu *vcpu, u64 ifa) -{ - union ia64_rr rr, rr1; - - rr.val = vcpu_get_rr(vcpu, ifa); - rr1.val = 0; - rr1.ps = rr.ps; - rr1.rid = rr.rid; - return (rr1.val); -} - -/* - * Set vIFA & vITIR & vIHA, when vPSR.ic =1 - * Parameter: - * set_ifa: if true, set vIFA - * set_itir: if true, set vITIR - * set_iha: if true, set vIHA - */ -void set_ifa_itir_iha(struct kvm_vcpu *vcpu, u64 vadr, - int set_ifa, int set_itir, int set_iha) -{ - long vpsr; - u64 value; - - vpsr = VCPU(vcpu, vpsr); - /* Vol2, Table 8-1 */ - if (vpsr & IA64_PSR_IC) { - if (set_ifa) - vcpu_set_ifa(vcpu, vadr); - if (set_itir) { - value = vcpu_get_itir_on_fault(vcpu, vadr); - vcpu_set_itir(vcpu, value); - } - - if (set_iha) { - value = vcpu_thash(vcpu, vadr); - vcpu_set_iha(vcpu, value); - } - } -} - -/* - * Data TLB Fault - * @ Data TLB vector - * Refer to SDM Vol2 Table 5-6 & 8-1 - */ -void dtlb_fault(struct kvm_vcpu *vcpu, u64 vadr) -{ - /* If vPSR.ic, IFA, ITIR, IHA */ - set_ifa_itir_iha(vcpu, vadr, 1, 1, 1); - inject_guest_interruption(vcpu, IA64_DATA_TLB_VECTOR); -} - -/* - * Instruction TLB Fault - * @ Instruction TLB vector - * Refer to SDM Vol2 Table 5-6 & 8-1 - */ -void itlb_fault(struct kvm_vcpu *vcpu, u64 vadr) -{ - /* If vPSR.ic, IFA, ITIR, IHA */ - set_ifa_itir_iha(vcpu, vadr, 1, 1, 1); - inject_guest_interruption(vcpu, IA64_INST_TLB_VECTOR); -} - -/* - * Data Nested TLB Fault - * @ Data Nested TLB Vector - * Refer to SDM Vol2 Table 5-6 & 8-1 - */ -void nested_dtlb(struct kvm_vcpu *vcpu) -{ - inject_guest_interruption(vcpu, IA64_DATA_NESTED_TLB_VECTOR); -} - -/* - * Alternate Data TLB Fault - * @ Alternate Data TLB vector - * Refer to SDM Vol2 Table 5-6 & 8-1 - */ -void alt_dtlb(struct kvm_vcpu *vcpu, u64 vadr) -{ - set_ifa_itir_iha(vcpu, vadr, 1, 1, 0); - inject_guest_interruption(vcpu, IA64_ALT_DATA_TLB_VECTOR); -} - -/* - * Data TLB Fault - * @ Data TLB vector - * Refer to SDM Vol2 Table 5-6 & 8-1 - */ -void alt_itlb(struct kvm_vcpu *vcpu, u64 vadr) -{ - set_ifa_itir_iha(vcpu, vadr, 1, 1, 0); - inject_guest_interruption(vcpu, IA64_ALT_INST_TLB_VECTOR); -} - -/* Deal with: - * VHPT Translation Vector - */ -static void _vhpt_fault(struct kvm_vcpu *vcpu, u64 vadr) -{ - /* If vPSR.ic, IFA, ITIR, IHA*/ - set_ifa_itir_iha(vcpu, vadr, 1, 1, 1); - inject_guest_interruption(vcpu, IA64_VHPT_TRANS_VECTOR); -} - -/* - * VHPT Instruction Fault - * @ VHPT Translation vector - * Refer to SDM Vol2 Table 5-6 & 8-1 - */ -void ivhpt_fault(struct kvm_vcpu *vcpu, u64 vadr) -{ - _vhpt_fault(vcpu, vadr); -} - -/* - * VHPT Data Fault - * @ VHPT Translation vector - * Refer to SDM Vol2 Table 5-6 & 8-1 - */ -void dvhpt_fault(struct kvm_vcpu *vcpu, u64 vadr) -{ - _vhpt_fault(vcpu, vadr); -} - -/* - * Deal with: - * General Exception vector - */ -void _general_exception(struct kvm_vcpu *vcpu) -{ - inject_guest_interruption(vcpu, IA64_GENEX_VECTOR); -} - -/* - * Illegal Operation Fault - * @ General Exception Vector - * Refer to SDM Vol2 Table 5-6 & 8-1 - */ -void illegal_op(struct kvm_vcpu *vcpu) -{ - _general_exception(vcpu); -} - -/* - * Illegal Dependency Fault - * @ General Exception Vector - * Refer to SDM Vol2 Table 5-6 & 8-1 - */ -void illegal_dep(struct kvm_vcpu *vcpu) -{ - _general_exception(vcpu); -} - -/* - * Reserved Register/Field Fault - * @ General Exception Vector - * Refer to SDM Vol2 Table 5-6 & 8-1 - */ -void rsv_reg_field(struct kvm_vcpu *vcpu) -{ - _general_exception(vcpu); -} -/* - * Privileged Operation Fault - * @ General Exception Vector - * Refer to SDM Vol2 Table 5-6 & 8-1 - */ - -void privilege_op(struct kvm_vcpu *vcpu) -{ - _general_exception(vcpu); -} - -/* - * Unimplement Data Address Fault - * @ General Exception Vector - * Refer to SDM Vol2 Table 5-6 & 8-1 - */ -void unimpl_daddr(struct kvm_vcpu *vcpu) -{ - _general_exception(vcpu); -} - -/* - * Privileged Register Fault - * @ General Exception Vector - * Refer to SDM Vol2 Table 5-6 & 8-1 - */ -void privilege_reg(struct kvm_vcpu *vcpu) -{ - _general_exception(vcpu); -} - -/* Deal with - * Nat consumption vector - * Parameter: - * vaddr: Optional, if t == REGISTER - */ -static void _nat_consumption_fault(struct kvm_vcpu *vcpu, u64 vadr, - enum tlb_miss_type t) -{ - /* If vPSR.ic && t == DATA/INST, IFA */ - if (t == DATA || t == INSTRUCTION) { - /* IFA */ - set_ifa_itir_iha(vcpu, vadr, 1, 0, 0); - } - - inject_guest_interruption(vcpu, IA64_NAT_CONSUMPTION_VECTOR); -} - -/* - * Instruction Nat Page Consumption Fault - * @ Nat Consumption Vector - * Refer to SDM Vol2 Table 5-6 & 8-1 - */ -void inat_page_consumption(struct kvm_vcpu *vcpu, u64 vadr) -{ - _nat_consumption_fault(vcpu, vadr, INSTRUCTION); -} - -/* - * Register Nat Consumption Fault - * @ Nat Consumption Vector - * Refer to SDM Vol2 Table 5-6 & 8-1 - */ -void rnat_consumption(struct kvm_vcpu *vcpu) -{ - _nat_consumption_fault(vcpu, 0, REGISTER); -} - -/* - * Data Nat Page Consumption Fault - * @ Nat Consumption Vector - * Refer to SDM Vol2 Table 5-6 & 8-1 - */ -void dnat_page_consumption(struct kvm_vcpu *vcpu, u64 vadr) -{ - _nat_consumption_fault(vcpu, vadr, DATA); -} - -/* Deal with - * Page not present vector - */ -static void __page_not_present(struct kvm_vcpu *vcpu, u64 vadr) -{ - /* If vPSR.ic, IFA, ITIR */ - set_ifa_itir_iha(vcpu, vadr, 1, 1, 0); - inject_guest_interruption(vcpu, IA64_PAGE_NOT_PRESENT_VECTOR); -} - -void data_page_not_present(struct kvm_vcpu *vcpu, u64 vadr) -{ - __page_not_present(vcpu, vadr); -} - -void inst_page_not_present(struct kvm_vcpu *vcpu, u64 vadr) -{ - __page_not_present(vcpu, vadr); -} - -/* Deal with - * Data access rights vector - */ -void data_access_rights(struct kvm_vcpu *vcpu, u64 vadr) -{ - /* If vPSR.ic, IFA, ITIR */ - set_ifa_itir_iha(vcpu, vadr, 1, 1, 0); - inject_guest_interruption(vcpu, IA64_DATA_ACCESS_RIGHTS_VECTOR); -} - -fpswa_ret_t vmm_fp_emulate(int fp_fault, void *bundle, unsigned long *ipsr, - unsigned long *fpsr, unsigned long *isr, unsigned long *pr, - unsigned long *ifs, struct kvm_pt_regs *regs) -{ - fp_state_t fp_state; - fpswa_ret_t ret; - struct kvm_vcpu *vcpu = current_vcpu; - - uint64_t old_rr7 = ia64_get_rr(7UL<<61); - - if (!vmm_fpswa_interface) - return (fpswa_ret_t) {-1, 0, 0, 0}; - - memset(&fp_state, 0, sizeof(fp_state_t)); - - /* - * compute fp_state. only FP registers f6 - f11 are used by the - * vmm, so set those bits in the mask and set the low volatile - * pointer to point to these registers. - */ - fp_state.bitmask_low64 = 0xfc0; /* bit6..bit11 */ - - fp_state.fp_state_low_volatile = (fp_state_low_volatile_t *) ®s->f6; - - /* - * unsigned long (*EFI_FPSWA) ( - * unsigned long trap_type, - * void *Bundle, - * unsigned long *pipsr, - * unsigned long *pfsr, - * unsigned long *pisr, - * unsigned long *ppreds, - * unsigned long *pifs, - * void *fp_state); - */ - /*Call host fpswa interface directly to virtualize - *guest fpswa request! - */ - ia64_set_rr(7UL << 61, vcpu->arch.host.rr[7]); - ia64_srlz_d(); - - ret = (*vmm_fpswa_interface->fpswa) (fp_fault, bundle, - ipsr, fpsr, isr, pr, ifs, &fp_state); - ia64_set_rr(7UL << 61, old_rr7); - ia64_srlz_d(); - return ret; -} - -/* - * Handle floating-point assist faults and traps for domain. - */ -unsigned long vmm_handle_fpu_swa(int fp_fault, struct kvm_pt_regs *regs, - unsigned long isr) -{ - struct kvm_vcpu *v = current_vcpu; - IA64_BUNDLE bundle; - unsigned long fault_ip; - fpswa_ret_t ret; - - fault_ip = regs->cr_iip; - /* - * When the FP trap occurs, the trapping instruction is completed. - * If ipsr.ri == 0, there is the trapping instruction in previous - * bundle. - */ - if (!fp_fault && (ia64_psr(regs)->ri == 0)) - fault_ip -= 16; - - if (fetch_code(v, fault_ip, &bundle)) - return -EAGAIN; - - if (!bundle.i64[0] && !bundle.i64[1]) - return -EACCES; - - ret = vmm_fp_emulate(fp_fault, &bundle, ®s->cr_ipsr, ®s->ar_fpsr, - &isr, ®s->pr, ®s->cr_ifs, regs); - return ret.status; -} - -void reflect_interruption(u64 ifa, u64 isr, u64 iim, - u64 vec, struct kvm_pt_regs *regs) -{ - u64 vector; - int status ; - struct kvm_vcpu *vcpu = current_vcpu; - u64 vpsr = VCPU(vcpu, vpsr); - - vector = vec2off[vec]; - - if (!(vpsr & IA64_PSR_IC) && (vector != IA64_DATA_NESTED_TLB_VECTOR)) { - panic_vm(vcpu, "Interruption with vector :0x%lx occurs " - "with psr.ic = 0\n", vector); - return; - } - - switch (vec) { - case 32: /*IA64_FP_FAULT_VECTOR*/ - status = vmm_handle_fpu_swa(1, regs, isr); - if (!status) { - vcpu_increment_iip(vcpu); - return; - } else if (-EAGAIN == status) - return; - break; - case 33: /*IA64_FP_TRAP_VECTOR*/ - status = vmm_handle_fpu_swa(0, regs, isr); - if (!status) - return ; - break; - } - - VCPU(vcpu, isr) = isr; - VCPU(vcpu, iipa) = regs->cr_iip; - if (vector == IA64_BREAK_VECTOR || vector == IA64_SPECULATION_VECTOR) - VCPU(vcpu, iim) = iim; - else - set_ifa_itir_iha(vcpu, ifa, 1, 1, 1); - - inject_guest_interruption(vcpu, vector); -} - -static unsigned long kvm_trans_pal_call_args(struct kvm_vcpu *vcpu, - unsigned long arg) -{ - struct thash_data *data; - unsigned long gpa, poff; - - if (!is_physical_mode(vcpu)) { - /* Depends on caller to provide the DTR or DTC mapping.*/ - data = vtlb_lookup(vcpu, arg, D_TLB); - if (data) - gpa = data->page_flags & _PAGE_PPN_MASK; - else { - data = vhpt_lookup(arg); - if (!data) - return 0; - gpa = data->gpaddr & _PAGE_PPN_MASK; - } - - poff = arg & (PSIZE(data->ps) - 1); - arg = PAGEALIGN(gpa, data->ps) | poff; - } - arg = kvm_gpa_to_mpa(arg << 1 >> 1); - - return (unsigned long)__va(arg); -} - -static void set_pal_call_data(struct kvm_vcpu *vcpu) -{ - struct exit_ctl_data *p = &vcpu->arch.exit_data; - unsigned long gr28 = vcpu_get_gr(vcpu, 28); - unsigned long gr29 = vcpu_get_gr(vcpu, 29); - unsigned long gr30 = vcpu_get_gr(vcpu, 30); - - /*FIXME:For static and stacked convention, firmware - * has put the parameters in gr28-gr31 before - * break to vmm !!*/ - - switch (gr28) { - case PAL_PERF_MON_INFO: - case PAL_HALT_INFO: - p->u.pal_data.gr29 = kvm_trans_pal_call_args(vcpu, gr29); - p->u.pal_data.gr30 = vcpu_get_gr(vcpu, 30); - break; - case PAL_BRAND_INFO: - p->u.pal_data.gr29 = gr29; - p->u.pal_data.gr30 = kvm_trans_pal_call_args(vcpu, gr30); - break; - default: - p->u.pal_data.gr29 = gr29; - p->u.pal_data.gr30 = vcpu_get_gr(vcpu, 30); - } - p->u.pal_data.gr28 = gr28; - p->u.pal_data.gr31 = vcpu_get_gr(vcpu, 31); - - p->exit_reason = EXIT_REASON_PAL_CALL; -} - -static void get_pal_call_result(struct kvm_vcpu *vcpu) -{ - struct exit_ctl_data *p = &vcpu->arch.exit_data; - - if (p->exit_reason == EXIT_REASON_PAL_CALL) { - vcpu_set_gr(vcpu, 8, p->u.pal_data.ret.status, 0); - vcpu_set_gr(vcpu, 9, p->u.pal_data.ret.v0, 0); - vcpu_set_gr(vcpu, 10, p->u.pal_data.ret.v1, 0); - vcpu_set_gr(vcpu, 11, p->u.pal_data.ret.v2, 0); - } else - panic_vm(vcpu, "Mis-set for exit reason!\n"); -} - -static void set_sal_call_data(struct kvm_vcpu *vcpu) -{ - struct exit_ctl_data *p = &vcpu->arch.exit_data; - - p->u.sal_data.in0 = vcpu_get_gr(vcpu, 32); - p->u.sal_data.in1 = vcpu_get_gr(vcpu, 33); - p->u.sal_data.in2 = vcpu_get_gr(vcpu, 34); - p->u.sal_data.in3 = vcpu_get_gr(vcpu, 35); - p->u.sal_data.in4 = vcpu_get_gr(vcpu, 36); - p->u.sal_data.in5 = vcpu_get_gr(vcpu, 37); - p->u.sal_data.in6 = vcpu_get_gr(vcpu, 38); - p->u.sal_data.in7 = vcpu_get_gr(vcpu, 39); - p->exit_reason = EXIT_REASON_SAL_CALL; -} - -static void get_sal_call_result(struct kvm_vcpu *vcpu) -{ - struct exit_ctl_data *p = &vcpu->arch.exit_data; - - if (p->exit_reason == EXIT_REASON_SAL_CALL) { - vcpu_set_gr(vcpu, 8, p->u.sal_data.ret.r8, 0); - vcpu_set_gr(vcpu, 9, p->u.sal_data.ret.r9, 0); - vcpu_set_gr(vcpu, 10, p->u.sal_data.ret.r10, 0); - vcpu_set_gr(vcpu, 11, p->u.sal_data.ret.r11, 0); - } else - panic_vm(vcpu, "Mis-set for exit reason!\n"); -} - -void kvm_ia64_handle_break(unsigned long ifa, struct kvm_pt_regs *regs, - unsigned long isr, unsigned long iim) -{ - struct kvm_vcpu *v = current_vcpu; - long psr; - - if (ia64_psr(regs)->cpl == 0) { - /* Allow hypercalls only when cpl = 0. */ - if (iim == DOMN_PAL_REQUEST) { - local_irq_save(psr); - set_pal_call_data(v); - vmm_transition(v); - get_pal_call_result(v); - vcpu_increment_iip(v); - local_irq_restore(psr); - return; - } else if (iim == DOMN_SAL_REQUEST) { - local_irq_save(psr); - set_sal_call_data(v); - vmm_transition(v); - get_sal_call_result(v); - vcpu_increment_iip(v); - local_irq_restore(psr); - return; - } - } - reflect_interruption(ifa, isr, iim, 11, regs); -} - -void check_pending_irq(struct kvm_vcpu *vcpu) -{ - int mask, h_pending, h_inservice; - u64 isr; - unsigned long vpsr; - struct kvm_pt_regs *regs = vcpu_regs(vcpu); - - h_pending = highest_pending_irq(vcpu); - if (h_pending == NULL_VECTOR) { - update_vhpi(vcpu, NULL_VECTOR); - return; - } - h_inservice = highest_inservice_irq(vcpu); - - vpsr = VCPU(vcpu, vpsr); - mask = irq_masked(vcpu, h_pending, h_inservice); - if ((vpsr & IA64_PSR_I) && IRQ_NO_MASKED == mask) { - isr = vpsr & IA64_PSR_RI; - update_vhpi(vcpu, h_pending); - reflect_interruption(0, isr, 0, 12, regs); /* EXT IRQ */ - } else if (mask == IRQ_MASKED_BY_INSVC) { - if (VCPU(vcpu, vhpi)) - update_vhpi(vcpu, NULL_VECTOR); - } else { - /* masked by vpsr.i or vtpr.*/ - update_vhpi(vcpu, h_pending); - } -} - -static void generate_exirq(struct kvm_vcpu *vcpu) -{ - unsigned vpsr; - uint64_t isr; - - struct kvm_pt_regs *regs = vcpu_regs(vcpu); - - vpsr = VCPU(vcpu, vpsr); - isr = vpsr & IA64_PSR_RI; - if (!(vpsr & IA64_PSR_IC)) - panic_vm(vcpu, "Trying to inject one IRQ with psr.ic=0\n"); - reflect_interruption(0, isr, 0, 12, regs); /* EXT IRQ */ -} - -void vhpi_detection(struct kvm_vcpu *vcpu) -{ - uint64_t threshold, vhpi; - union ia64_tpr vtpr; - struct ia64_psr vpsr; - - vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr); - vtpr.val = VCPU(vcpu, tpr); - - threshold = ((!vpsr.i) << 5) | (vtpr.mmi << 4) | vtpr.mic; - vhpi = VCPU(vcpu, vhpi); - if (vhpi > threshold) { - /* interrupt actived*/ - generate_exirq(vcpu); - } -} - -void leave_hypervisor_tail(void) -{ - struct kvm_vcpu *v = current_vcpu; - - if (VMX(v, timer_check)) { - VMX(v, timer_check) = 0; - if (VMX(v, itc_check)) { - if (vcpu_get_itc(v) > VCPU(v, itm)) { - if (!(VCPU(v, itv) & (1 << 16))) { - vcpu_pend_interrupt(v, VCPU(v, itv) - & 0xff); - VMX(v, itc_check) = 0; - } else { - v->arch.timer_pending = 1; - } - VMX(v, last_itc) = VCPU(v, itm) + 1; - } - } - } - - rmb(); - if (v->arch.irq_new_pending) { - v->arch.irq_new_pending = 0; - VMX(v, irq_check) = 0; - check_pending_irq(v); - return; - } - if (VMX(v, irq_check)) { - VMX(v, irq_check) = 0; - vhpi_detection(v); - } -} - -static inline void handle_lds(struct kvm_pt_regs *regs) -{ - regs->cr_ipsr |= IA64_PSR_ED; -} - -void physical_tlb_miss(struct kvm_vcpu *vcpu, unsigned long vadr, int type) -{ - unsigned long pte; - union ia64_rr rr; - - rr.val = ia64_get_rr(vadr); - pte = vadr & _PAGE_PPN_MASK; - pte = pte | PHY_PAGE_WB; - thash_vhpt_insert(vcpu, pte, (u64)(rr.ps << 2), vadr, type); - return; -} - -void kvm_page_fault(u64 vadr , u64 vec, struct kvm_pt_regs *regs) -{ - unsigned long vpsr; - int type; - - u64 vhpt_adr, gppa, pteval, rr, itir; - union ia64_isr misr; - union ia64_pta vpta; - struct thash_data *data; - struct kvm_vcpu *v = current_vcpu; - - vpsr = VCPU(v, vpsr); - misr.val = VMX(v, cr_isr); - - type = vec; - - if (is_physical_mode(v) && (!(vadr << 1 >> 62))) { - if (vec == 2) { - if (__gpfn_is_io((vadr << 1) >> (PAGE_SHIFT + 1))) { - emulate_io_inst(v, ((vadr << 1) >> 1), 4); - return; - } - } - physical_tlb_miss(v, vadr, type); - return; - } - data = vtlb_lookup(v, vadr, type); - if (data != 0) { - if (type == D_TLB) { - gppa = (vadr & ((1UL << data->ps) - 1)) - + (data->ppn >> (data->ps - 12) << data->ps); - if (__gpfn_is_io(gppa >> PAGE_SHIFT)) { - if (data->pl >= ((regs->cr_ipsr >> - IA64_PSR_CPL0_BIT) & 3)) - emulate_io_inst(v, gppa, data->ma); - else { - vcpu_set_isr(v, misr.val); - data_access_rights(v, vadr); - } - return ; - } - } - thash_vhpt_insert(v, data->page_flags, data->itir, vadr, type); - - } else if (type == D_TLB) { - if (misr.sp) { - handle_lds(regs); - return; - } - - rr = vcpu_get_rr(v, vadr); - itir = rr & (RR_RID_MASK | RR_PS_MASK); - - if (!vhpt_enabled(v, vadr, misr.rs ? RSE_REF : DATA_REF)) { - if (vpsr & IA64_PSR_IC) { - vcpu_set_isr(v, misr.val); - alt_dtlb(v, vadr); - } else { - nested_dtlb(v); - } - return ; - } - - vpta.val = vcpu_get_pta(v); - /* avoid recursively walking (short format) VHPT */ - - vhpt_adr = vcpu_thash(v, vadr); - if (!guest_vhpt_lookup(vhpt_adr, &pteval)) { - /* VHPT successfully read. */ - if (!(pteval & _PAGE_P)) { - if (vpsr & IA64_PSR_IC) { - vcpu_set_isr(v, misr.val); - dtlb_fault(v, vadr); - } else { - nested_dtlb(v); - } - } else if ((pteval & _PAGE_MA_MASK) != _PAGE_MA_ST) { - thash_purge_and_insert(v, pteval, itir, - vadr, D_TLB); - } else if (vpsr & IA64_PSR_IC) { - vcpu_set_isr(v, misr.val); - dtlb_fault(v, vadr); - } else { - nested_dtlb(v); - } - } else { - /* Can't read VHPT. */ - if (vpsr & IA64_PSR_IC) { - vcpu_set_isr(v, misr.val); - dvhpt_fault(v, vadr); - } else { - nested_dtlb(v); - } - } - } else if (type == I_TLB) { - if (!(vpsr & IA64_PSR_IC)) - misr.ni = 1; - if (!vhpt_enabled(v, vadr, INST_REF)) { - vcpu_set_isr(v, misr.val); - alt_itlb(v, vadr); - return; - } - - vpta.val = vcpu_get_pta(v); - - vhpt_adr = vcpu_thash(v, vadr); - if (!guest_vhpt_lookup(vhpt_adr, &pteval)) { - /* VHPT successfully read. */ - if (pteval & _PAGE_P) { - if ((pteval & _PAGE_MA_MASK) == _PAGE_MA_ST) { - vcpu_set_isr(v, misr.val); - itlb_fault(v, vadr); - return ; - } - rr = vcpu_get_rr(v, vadr); - itir = rr & (RR_RID_MASK | RR_PS_MASK); - thash_purge_and_insert(v, pteval, itir, - vadr, I_TLB); - } else { - vcpu_set_isr(v, misr.val); - inst_page_not_present(v, vadr); - } - } else { - vcpu_set_isr(v, misr.val); - ivhpt_fault(v, vadr); - } - } -} - -void kvm_vexirq(struct kvm_vcpu *vcpu) -{ - u64 vpsr, isr; - struct kvm_pt_regs *regs; - - regs = vcpu_regs(vcpu); - vpsr = VCPU(vcpu, vpsr); - isr = vpsr & IA64_PSR_RI; - reflect_interruption(0, isr, 0, 12, regs); /*EXT IRQ*/ -} - -void kvm_ia64_handle_irq(struct kvm_vcpu *v) -{ - struct exit_ctl_data *p = &v->arch.exit_data; - long psr; - - local_irq_save(psr); - p->exit_reason = EXIT_REASON_EXTERNAL_INTERRUPT; - vmm_transition(v); - local_irq_restore(psr); - - VMX(v, timer_check) = 1; - -} - -static void ptc_ga_remote_func(struct kvm_vcpu *v, int pos) -{ - u64 oldrid, moldrid, oldpsbits, vaddr; - struct kvm_ptc_g *p = &v->arch.ptc_g_data[pos]; - vaddr = p->vaddr; - - oldrid = VMX(v, vrr[0]); - VMX(v, vrr[0]) = p->rr; - oldpsbits = VMX(v, psbits[0]); - VMX(v, psbits[0]) = VMX(v, psbits[REGION_NUMBER(vaddr)]); - moldrid = ia64_get_rr(0x0); - ia64_set_rr(0x0, vrrtomrr(p->rr)); - ia64_srlz_d(); - - vaddr = PAGEALIGN(vaddr, p->ps); - thash_purge_entries_remote(v, vaddr, p->ps); - - VMX(v, vrr[0]) = oldrid; - VMX(v, psbits[0]) = oldpsbits; - ia64_set_rr(0x0, moldrid); - ia64_dv_serialize_data(); -} - -static void vcpu_do_resume(struct kvm_vcpu *vcpu) -{ - /*Re-init VHPT and VTLB once from resume*/ - vcpu->arch.vhpt.num = VHPT_NUM_ENTRIES; - thash_init(&vcpu->arch.vhpt, VHPT_SHIFT); - vcpu->arch.vtlb.num = VTLB_NUM_ENTRIES; - thash_init(&vcpu->arch.vtlb, VTLB_SHIFT); - - ia64_set_pta(vcpu->arch.vhpt.pta.val); -} - -static void vmm_sanity_check(struct kvm_vcpu *vcpu) -{ - struct exit_ctl_data *p = &vcpu->arch.exit_data; - - if (!vmm_sanity && p->exit_reason != EXIT_REASON_DEBUG) { - panic_vm(vcpu, "Failed to do vmm sanity check," - "it maybe caused by crashed vmm!!\n\n"); - } -} - -static void kvm_do_resume_op(struct kvm_vcpu *vcpu) -{ - vmm_sanity_check(vcpu); /*Guarantee vcpu running on healthy vmm!*/ - - if (test_and_clear_bit(KVM_REQ_RESUME, &vcpu->requests)) { - vcpu_do_resume(vcpu); - return; - } - - if (unlikely(test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))) { - thash_purge_all(vcpu); - return; - } - - if (test_and_clear_bit(KVM_REQ_PTC_G, &vcpu->requests)) { - while (vcpu->arch.ptc_g_count > 0) - ptc_ga_remote_func(vcpu, --vcpu->arch.ptc_g_count); - } -} - -void vmm_transition(struct kvm_vcpu *vcpu) -{ - ia64_call_vsa(PAL_VPS_SAVE, (unsigned long)vcpu->arch.vpd, - 1, 0, 0, 0, 0, 0); - vmm_trampoline(&vcpu->arch.guest, &vcpu->arch.host); - ia64_call_vsa(PAL_VPS_RESTORE, (unsigned long)vcpu->arch.vpd, - 1, 0, 0, 0, 0, 0); - kvm_do_resume_op(vcpu); -} - -void vmm_panic_handler(u64 vec) -{ - struct kvm_vcpu *vcpu = current_vcpu; - vmm_sanity = 0; - panic_vm(vcpu, "Unexpected interruption occurs in VMM, vector:0x%lx\n", - vec2off[vec]); -} diff --git a/arch/ia64/kvm/trampoline.S b/arch/ia64/kvm/trampoline.S deleted file mode 100644 index 30897d44d61e..000000000000 --- a/arch/ia64/kvm/trampoline.S +++ /dev/null @@ -1,1038 +0,0 @@ -/* Save all processor states - * - * Copyright (c) 2007 Fleming Feng <fleming.feng@intel.com> - * Copyright (c) 2007 Anthony Xu <anthony.xu@intel.com> - */ - -#include <asm/asmmacro.h> -#include "asm-offsets.h" - - -#define CTX(name) VMM_CTX_##name##_OFFSET - - /* - * r32: context_t base address - */ -#define SAVE_BRANCH_REGS \ - add r2 = CTX(B0),r32; \ - add r3 = CTX(B1),r32; \ - mov r16 = b0; \ - mov r17 = b1; \ - ;; \ - st8 [r2]=r16,16; \ - st8 [r3]=r17,16; \ - ;; \ - mov r16 = b2; \ - mov r17 = b3; \ - ;; \ - st8 [r2]=r16,16; \ - st8 [r3]=r17,16; \ - ;; \ - mov r16 = b4; \ - mov r17 = b5; \ - ;; \ - st8 [r2]=r16; \ - st8 [r3]=r17; \ - ;; - - /* - * r33: context_t base address - */ -#define RESTORE_BRANCH_REGS \ - add r2 = CTX(B0),r33; \ - add r3 = CTX(B1),r33; \ - ;; \ - ld8 r16=[r2],16; \ - ld8 r17=[r3],16; \ - ;; \ - mov b0 = r16; \ - mov b1 = r17; \ - ;; \ - ld8 r16=[r2],16; \ - ld8 r17=[r3],16; \ - ;; \ - mov b2 = r16; \ - mov b3 = r17; \ - ;; \ - ld8 r16=[r2]; \ - ld8 r17=[r3]; \ - ;; \ - mov b4=r16; \ - mov b5=r17; \ - ;; - - - /* - * r32: context_t base address - * bsw == 1 - * Save all bank1 general registers, r4 ~ r7 - */ -#define SAVE_GENERAL_REGS \ - add r2=CTX(R4),r32; \ - add r3=CTX(R5),r32; \ - ;; \ -.mem.offset 0,0; \ - st8.spill [r2]=r4,16; \ -.mem.offset 8,0; \ - st8.spill [r3]=r5,16; \ - ;; \ -.mem.offset 0,0; \ - st8.spill [r2]=r6,48; \ -.mem.offset 8,0; \ - st8.spill [r3]=r7,48; \ - ;; \ -.mem.offset 0,0; \ - st8.spill [r2]=r12; \ -.mem.offset 8,0; \ - st8.spill [r3]=r13; \ - ;; - - /* - * r33: context_t base address - * bsw == 1 - */ -#define RESTORE_GENERAL_REGS \ - add r2=CTX(R4),r33; \ - add r3=CTX(R5),r33; \ - ;; \ - ld8.fill r4=[r2],16; \ - ld8.fill r5=[r3],16; \ - ;; \ - ld8.fill r6=[r2],48; \ - ld8.fill r7=[r3],48; \ - ;; \ - ld8.fill r12=[r2]; \ - ld8.fill r13 =[r3]; \ - ;; - - - - - /* - * r32: context_t base address - */ -#define SAVE_KERNEL_REGS \ - add r2 = CTX(KR0),r32; \ - add r3 = CTX(KR1),r32; \ - mov r16 = ar.k0; \ - mov r17 = ar.k1; \ - ;; \ - st8 [r2] = r16,16; \ - st8 [r3] = r17,16; \ - ;; \ - mov r16 = ar.k2; \ - mov r17 = ar.k3; \ - ;; \ - st8 [r2] = r16,16; \ - st8 [r3] = r17,16; \ - ;; \ - mov r16 = ar.k4; \ - mov r17 = ar.k5; \ - ;; \ - st8 [r2] = r16,16; \ - st8 [r3] = r17,16; \ - ;; \ - mov r16 = ar.k6; \ - mov r17 = ar.k7; \ - ;; \ - st8 [r2] = r16; \ - st8 [r3] = r17; \ - ;; - - - - /* - * r33: context_t base address - */ -#define RESTORE_KERNEL_REGS \ - add r2 = CTX(KR0),r33; \ - add r3 = CTX(KR1),r33; \ - ;; \ - ld8 r16=[r2],16; \ - ld8 r17=[r3],16; \ - ;; \ - mov ar.k0=r16; \ - mov ar.k1=r17; \ - ;; \ - ld8 r16=[r2],16; \ - ld8 r17=[r3],16; \ - ;; \ - mov ar.k2=r16; \ - mov ar.k3=r17; \ - ;; \ - ld8 r16=[r2],16; \ - ld8 r17=[r3],16; \ - ;; \ - mov ar.k4=r16; \ - mov ar.k5=r17; \ - ;; \ - ld8 r16=[r2],16; \ - ld8 r17=[r3],16; \ - ;; \ - mov ar.k6=r16; \ - mov ar.k7=r17; \ - ;; - - - - /* - * r32: context_t base address - */ -#define SAVE_APP_REGS \ - add r2 = CTX(BSPSTORE),r32; \ - mov r16 = ar.bspstore; \ - ;; \ - st8 [r2] = r16,CTX(RNAT)-CTX(BSPSTORE);\ - mov r16 = ar.rnat; \ - ;; \ - st8 [r2] = r16,CTX(FCR)-CTX(RNAT); \ - mov r16 = ar.fcr; \ - ;; \ - st8 [r2] = r16,CTX(EFLAG)-CTX(FCR); \ - mov r16 = ar.eflag; \ - ;; \ - st8 [r2] = r16,CTX(CFLG)-CTX(EFLAG); \ - mov r16 = ar.cflg; \ - ;; \ - st8 [r2] = r16,CTX(FSR)-CTX(CFLG); \ - mov r16 = ar.fsr; \ - ;; \ - st8 [r2] = r16,CTX(FIR)-CTX(FSR); \ - mov r16 = ar.fir; \ - ;; \ - st8 [r2] = r16,CTX(FDR)-CTX(FIR); \ - mov r16 = ar.fdr; \ - ;; \ - st8 [r2] = r16,CTX(UNAT)-CTX(FDR); \ - mov r16 = ar.unat; \ - ;; \ - st8 [r2] = r16,CTX(FPSR)-CTX(UNAT); \ - mov r16 = ar.fpsr; \ - ;; \ - st8 [r2] = r16,CTX(PFS)-CTX(FPSR); \ - mov r16 = ar.pfs; \ - ;; \ - st8 [r2] = r16,CTX(LC)-CTX(PFS); \ - mov r16 = ar.lc; \ - ;; \ - st8 [r2] = r16; \ - ;; - - /* - * r33: context_t base address - */ -#define RESTORE_APP_REGS \ - add r2=CTX(BSPSTORE),r33; \ - ;; \ - ld8 r16=[r2],CTX(RNAT)-CTX(BSPSTORE); \ - ;; \ - mov ar.bspstore=r16; \ - ld8 r16=[r2],CTX(FCR)-CTX(RNAT); \ - ;; \ - mov ar.rnat=r16; \ - ld8 r16=[r2],CTX(EFLAG)-CTX(FCR); \ - ;; \ - mov ar.fcr=r16; \ - ld8 r16=[r2],CTX(CFLG)-CTX(EFLAG); \ - ;; \ - mov ar.eflag=r16; \ - ld8 r16=[r2],CTX(FSR)-CTX(CFLG); \ - ;; \ - mov ar.cflg=r16; \ - ld8 r16=[r2],CTX(FIR)-CTX(FSR); \ - ;; \ - mov ar.fsr=r16; \ - ld8 r16=[r2],CTX(FDR)-CTX(FIR); \ - ;; \ - mov ar.fir=r16; \ - ld8 r16=[r2],CTX(UNAT)-CTX(FDR); \ - ;; \ - mov ar.fdr=r16; \ - ld8 r16=[r2],CTX(FPSR)-CTX(UNAT); \ - ;; \ - mov ar.unat=r16; \ - ld8 r16=[r2],CTX(PFS)-CTX(FPSR); \ - ;; \ - mov ar.fpsr=r16; \ - ld8 r16=[r2],CTX(LC)-CTX(PFS); \ - ;; \ - mov ar.pfs=r16; \ - ld8 r16=[r2]; \ - ;; \ - mov ar.lc=r16; \ - ;; - - /* - * r32: context_t base address - */ -#define SAVE_CTL_REGS \ - add r2 = CTX(DCR),r32; \ - mov r16 = cr.dcr; \ - ;; \ - st8 [r2] = r16,CTX(IVA)-CTX(DCR); \ - ;; \ - mov r16 = cr.iva; \ - ;; \ - st8 [r2] = r16,CTX(PTA)-CTX(IVA); \ - ;; \ - mov r16 = cr.pta; \ - ;; \ - st8 [r2] = r16 ; \ - ;; - - /* - * r33: context_t base address - */ -#define RESTORE_CTL_REGS \ - add r2 = CTX(DCR),r33; \ - ;; \ - ld8 r16 = [r2],CTX(IVA)-CTX(DCR); \ - ;; \ - mov cr.dcr = r16; \ - dv_serialize_data; \ - ;; \ - ld8 r16 = [r2],CTX(PTA)-CTX(IVA); \ - ;; \ - mov cr.iva = r16; \ - dv_serialize_data; \ - ;; \ - ld8 r16 = [r2]; \ - ;; \ - mov cr.pta = r16; \ - dv_serialize_data; \ - ;; - - - /* - * r32: context_t base address - */ -#define SAVE_REGION_REGS \ - add r2=CTX(RR0),r32; \ - mov r16=rr[r0]; \ - dep.z r18=1,61,3; \ - ;; \ - st8 [r2]=r16,8; \ - mov r17=rr[r18]; \ - dep.z r18=2,61,3; \ - ;; \ - st8 [r2]=r17,8; \ - mov r16=rr[r18]; \ - dep.z r18=3,61,3; \ - ;; \ - st8 [r2]=r16,8; \ - mov r17=rr[r18]; \ - dep.z r18=4,61,3; \ - ;; \ - st8 [r2]=r17,8; \ - mov r16=rr[r18]; \ - dep.z r18=5,61,3; \ - ;; \ - st8 [r2]=r16,8; \ - mov r17=rr[r18]; \ - dep.z r18=7,61,3; \ - ;; \ - st8 [r2]=r17,16; \ - mov r16=rr[r18]; \ - ;; \ - st8 [r2]=r16,8; \ - ;; - - /* - * r33:context_t base address - */ -#define RESTORE_REGION_REGS \ - add r2=CTX(RR0),r33;\ - mov r18=r0; \ - ;; \ - ld8 r20=[r2],8; \ - ;; /* rr0 */ \ - ld8 r21=[r2],8; \ - ;; /* rr1 */ \ - ld8 r22=[r2],8; \ - ;; /* rr2 */ \ - ld8 r23=[r2],8; \ - ;; /* rr3 */ \ - ld8 r24=[r2],8; \ - ;; /* rr4 */ \ - ld8 r25=[r2],16; \ - ;; /* rr5 */ \ - ld8 r27=[r2]; \ - ;; /* rr7 */ \ - mov rr[r18]=r20; \ - dep.z r18=1,61,3; \ - ;; /* rr1 */ \ - mov rr[r18]=r21; \ - dep.z r18=2,61,3; \ - ;; /* rr2 */ \ - mov rr[r18]=r22; \ - dep.z r18=3,61,3; \ - ;; /* rr3 */ \ - mov rr[r18]=r23; \ - dep.z r18=4,61,3; \ - ;; /* rr4 */ \ - mov rr[r18]=r24; \ - dep.z r18=5,61,3; \ - ;; /* rr5 */ \ - mov rr[r18]=r25; \ - dep.z r18=7,61,3; \ - ;; /* rr7 */ \ - mov rr[r18]=r27; \ - ;; \ - srlz.i; \ - ;; - - - - /* - * r32: context_t base address - * r36~r39:scratch registers - */ -#define SAVE_DEBUG_REGS \ - add r2=CTX(IBR0),r32; \ - add r3=CTX(DBR0),r32; \ - mov r16=ibr[r0]; \ - mov r17=dbr[r0]; \ - ;; \ - st8 [r2]=r16,8; \ - st8 [r3]=r17,8; \ - add r18=1,r0; \ - ;; \ - mov r16=ibr[r18]; \ - mov r17=dbr[r18]; \ - ;; \ - st8 [r2]=r16,8; \ - st8 [r3]=r17,8; \ - add r18=2,r0; \ - ;; \ - mov r16=ibr[r18]; \ - mov r17=dbr[r18]; \ - ;; \ - st8 [r2]=r16,8; \ - st8 [r3]=r17,8; \ - add r18=2,r0; \ - ;; \ - mov r16=ibr[r18]; \ - mov r17=dbr[r18]; \ - ;; \ - st8 [r2]=r16,8; \ - st8 [r3]=r17,8; \ - add r18=3,r0; \ - ;; \ - mov r16=ibr[r18]; \ - mov r17=dbr[r18]; \ - ;; \ - st8 [r2]=r16,8; \ - st8 [r3]=r17,8; \ - add r18=4,r0; \ - ;; \ - mov r16=ibr[r18]; \ - mov r17=dbr[r18]; \ - ;; \ - st8 [r2]=r16,8; \ - st8 [r3]=r17,8; \ - add r18=5,r0; \ - ;; \ - mov r16=ibr[r18]; \ - mov r17=dbr[r18]; \ - ;; \ - st8 [r2]=r16,8; \ - st8 [r3]=r17,8; \ - add r18=6,r0; \ - ;; \ - mov r16=ibr[r18]; \ - mov r17=dbr[r18]; \ - ;; \ - st8 [r2]=r16,8; \ - st8 [r3]=r17,8; \ - add r18=7,r0; \ - ;; \ - mov r16=ibr[r18]; \ - mov r17=dbr[r18]; \ - ;; \ - st8 [r2]=r16,8; \ - st8 [r3]=r17,8; \ - ;; - - -/* - * r33: point to context_t structure - * ar.lc are corrupted. - */ -#define RESTORE_DEBUG_REGS \ - add r2=CTX(IBR0),r33; \ - add r3=CTX(DBR0),r33; \ - mov r16=7; \ - mov r17=r0; \ - ;; \ - mov ar.lc = r16; \ - ;; \ -1: \ - ld8 r18=[r2],8; \ - ld8 r19=[r3],8; \ - ;; \ - mov ibr[r17]=r18; \ - mov dbr[r17]=r19; \ - ;; \ - srlz.i; \ - ;; \ - add r17=1,r17; \ - br.cloop.sptk 1b; \ - ;; - - - /* - * r32: context_t base address - */ -#define SAVE_FPU_LOW \ - add r2=CTX(F2),r32; \ - add r3=CTX(F3),r32; \ - ;; \ - stf.spill.nta [r2]=f2,32; \ - stf.spill.nta [r3]=f3,32; \ - ;; \ - stf.spill.nta [r2]=f4,32; \ - stf.spill.nta [r3]=f5,32; \ - ;; \ - stf.spill.nta [r2]=f6,32; \ - stf.spill.nta [r3]=f7,32; \ - ;; \ - stf.spill.nta [r2]=f8,32; \ - stf.spill.nta [r3]=f9,32; \ - ;; \ - stf.spill.nta [r2]=f10,32; \ - stf.spill.nta [r3]=f11,32; \ - ;; \ - stf.spill.nta [r2]=f12,32; \ - stf.spill.nta [r3]=f13,32; \ - ;; \ - stf.spill.nta [r2]=f14,32; \ - stf.spill.nta [r3]=f15,32; \ - ;; \ - stf.spill.nta [r2]=f16,32; \ - stf.spill.nta [r3]=f17,32; \ - ;; \ - stf.spill.nta [r2]=f18,32; \ - stf.spill.nta [r3]=f19,32; \ - ;; \ - stf.spill.nta [r2]=f20,32; \ - stf.spill.nta [r3]=f21,32; \ - ;; \ - stf.spill.nta [r2]=f22,32; \ - stf.spill.nta [r3]=f23,32; \ - ;; \ - stf.spill.nta [r2]=f24,32; \ - stf.spill.nta [r3]=f25,32; \ - ;; \ - stf.spill.nta [r2]=f26,32; \ - stf.spill.nta [r3]=f27,32; \ - ;; \ - stf.spill.nta [r2]=f28,32; \ - stf.spill.nta [r3]=f29,32; \ - ;; \ - stf.spill.nta [r2]=f30; \ - stf.spill.nta [r3]=f31; \ - ;; - - /* - * r32: context_t base address - */ -#define SAVE_FPU_HIGH \ - add r2=CTX(F32),r32; \ - add r3=CTX(F33),r32; \ - ;; \ - stf.spill.nta [r2]=f32,32; \ - stf.spill.nta [r3]=f33,32; \ - ;; \ - stf.spill.nta [r2]=f34,32; \ - stf.spill.nta [r3]=f35,32; \ - ;; \ - stf.spill.nta [r2]=f36,32; \ - stf.spill.nta [r3]=f37,32; \ - ;; \ - stf.spill.nta [r2]=f38,32; \ - stf.spill.nta [r3]=f39,32; \ - ;; \ - stf.spill.nta [r2]=f40,32; \ - stf.spill.nta [r3]=f41,32; \ - ;; \ - stf.spill.nta [r2]=f42,32; \ - stf.spill.nta [r3]=f43,32; \ - ;; \ - stf.spill.nta [r2]=f44,32; \ - stf.spill.nta [r3]=f45,32; \ - ;; \ - stf.spill.nta [r2]=f46,32; \ - stf.spill.nta [r3]=f47,32; \ - ;; \ - stf.spill.nta [r2]=f48,32; \ - stf.spill.nta [r3]=f49,32; \ - ;; \ - stf.spill.nta [r2]=f50,32; \ - stf.spill.nta [r3]=f51,32; \ - ;; \ - stf.spill.nta [r2]=f52,32; \ - stf.spill.nta [r3]=f53,32; \ - ;; \ - stf.spill.nta [r2]=f54,32; \ - stf.spill.nta [r3]=f55,32; \ - ;; \ - stf.spill.nta [r2]=f56,32; \ - stf.spill.nta [r3]=f57,32; \ - ;; \ - stf.spill.nta [r2]=f58,32; \ - stf.spill.nta [r3]=f59,32; \ - ;; \ - stf.spill.nta [r2]=f60,32; \ - stf.spill.nta [r3]=f61,32; \ - ;; \ - stf.spill.nta [r2]=f62,32; \ - stf.spill.nta [r3]=f63,32; \ - ;; \ - stf.spill.nta [r2]=f64,32; \ - stf.spill.nta [r3]=f65,32; \ - ;; \ - stf.spill.nta [r2]=f66,32; \ - stf.spill.nta [r3]=f67,32; \ - ;; \ - stf.spill.nta [r2]=f68,32; \ - stf.spill.nta [r3]=f69,32; \ - ;; \ - stf.spill.nta [r2]=f70,32; \ - stf.spill.nta [r3]=f71,32; \ - ;; \ - stf.spill.nta [r2]=f72,32; \ - stf.spill.nta [r3]=f73,32; \ - ;; \ - stf.spill.nta [r2]=f74,32; \ - stf.spill.nta [r3]=f75,32; \ - ;; \ - stf.spill.nta [r2]=f76,32; \ - stf.spill.nta [r3]=f77,32; \ - ;; \ - stf.spill.nta [r2]=f78,32; \ - stf.spill.nta [r3]=f79,32; \ - ;; \ - stf.spill.nta [r2]=f80,32; \ - stf.spill.nta [r3]=f81,32; \ - ;; \ - stf.spill.nta [r2]=f82,32; \ - stf.spill.nta [r3]=f83,32; \ - ;; \ - stf.spill.nta [r2]=f84,32; \ - stf.spill.nta [r3]=f85,32; \ - ;; \ - stf.spill.nta [r2]=f86,32; \ - stf.spill.nta [r3]=f87,32; \ - ;; \ - stf.spill.nta [r2]=f88,32; \ - stf.spill.nta [r3]=f89,32; \ - ;; \ - stf.spill.nta [r2]=f90,32; \ - stf.spill.nta [r3]=f91,32; \ - ;; \ - stf.spill.nta [r2]=f92,32; \ - stf.spill.nta [r3]=f93,32; \ - ;; \ - stf.spill.nta [r2]=f94,32; \ - stf.spill.nta [r3]=f95,32; \ - ;; \ - stf.spill.nta [r2]=f96,32; \ - stf.spill.nta [r3]=f97,32; \ - ;; \ - stf.spill.nta [r2]=f98,32; \ - stf.spill.nta [r3]=f99,32; \ - ;; \ - stf.spill.nta [r2]=f100,32; \ - stf.spill.nta [r3]=f101,32; \ - ;; \ - stf.spill.nta [r2]=f102,32; \ - stf.spill.nta [r3]=f103,32; \ - ;; \ - stf.spill.nta [r2]=f104,32; \ - stf.spill.nta [r3]=f105,32; \ - ;; \ - stf.spill.nta [r2]=f106,32; \ - stf.spill.nta [r3]=f107,32; \ - ;; \ - stf.spill.nta [r2]=f108,32; \ - stf.spill.nta [r3]=f109,32; \ - ;; \ - stf.spill.nta [r2]=f110,32; \ - stf.spill.nta [r3]=f111,32; \ - ;; \ - stf.spill.nta [r2]=f112,32; \ - stf.spill.nta [r3]=f113,32; \ - ;; \ - stf.spill.nta [r2]=f114,32; \ - stf.spill.nta [r3]=f115,32; \ - ;; \ - stf.spill.nta [r2]=f116,32; \ - stf.spill.nta [r3]=f117,32; \ - ;; \ - stf.spill.nta [r2]=f118,32; \ - stf.spill.nta [r3]=f119,32; \ - ;; \ - stf.spill.nta [r2]=f120,32; \ - stf.spill.nta [r3]=f121,32; \ - ;; \ - stf.spill.nta [r2]=f122,32; \ - stf.spill.nta [r3]=f123,32; \ - ;; \ - stf.spill.nta [r2]=f124,32; \ - stf.spill.nta [r3]=f125,32; \ - ;; \ - stf.spill.nta [r2]=f126; \ - stf.spill.nta [r3]=f127; \ - ;; - - /* - * r33: point to context_t structure - */ -#define RESTORE_FPU_LOW \ - add r2 = CTX(F2), r33; \ - add r3 = CTX(F3), r33; \ - ;; \ - ldf.fill.nta f2 = [r2], 32; \ - ldf.fill.nta f3 = [r3], 32; \ - ;; \ - ldf.fill.nta f4 = [r2], 32; \ - ldf.fill.nta f5 = [r3], 32; \ - ;; \ - ldf.fill.nta f6 = [r2], 32; \ - ldf.fill.nta f7 = [r3], 32; \ - ;; \ - ldf.fill.nta f8 = [r2], 32; \ - ldf.fill.nta f9 = [r3], 32; \ - ;; \ - ldf.fill.nta f10 = [r2], 32; \ - ldf.fill.nta f11 = [r3], 32; \ - ;; \ - ldf.fill.nta f12 = [r2], 32; \ - ldf.fill.nta f13 = [r3], 32; \ - ;; \ - ldf.fill.nta f14 = [r2], 32; \ - ldf.fill.nta f15 = [r3], 32; \ - ;; \ - ldf.fill.nta f16 = [r2], 32; \ - ldf.fill.nta f17 = [r3], 32; \ - ;; \ - ldf.fill.nta f18 = [r2], 32; \ - ldf.fill.nta f19 = [r3], 32; \ - ;; \ - ldf.fill.nta f20 = [r2], 32; \ - ldf.fill.nta f21 = [r3], 32; \ - ;; \ - ldf.fill.nta f22 = [r2], 32; \ - ldf.fill.nta f23 = [r3], 32; \ - ;; \ - ldf.fill.nta f24 = [r2], 32; \ - ldf.fill.nta f25 = [r3], 32; \ - ;; \ - ldf.fill.nta f26 = [r2], 32; \ - ldf.fill.nta f27 = [r3], 32; \ - ;; \ - ldf.fill.nta f28 = [r2], 32; \ - ldf.fill.nta f29 = [r3], 32; \ - ;; \ - ldf.fill.nta f30 = [r2], 32; \ - ldf.fill.nta f31 = [r3], 32; \ - ;; - - - - /* - * r33: point to context_t structure - */ -#define RESTORE_FPU_HIGH \ - add r2 = CTX(F32), r33; \ - add r3 = CTX(F33), r33; \ - ;; \ - ldf.fill.nta f32 = [r2], 32; \ - ldf.fill.nta f33 = [r3], 32; \ - ;; \ - ldf.fill.nta f34 = [r2], 32; \ - ldf.fill.nta f35 = [r3], 32; \ - ;; \ - ldf.fill.nta f36 = [r2], 32; \ - ldf.fill.nta f37 = [r3], 32; \ - ;; \ - ldf.fill.nta f38 = [r2], 32; \ - ldf.fill.nta f39 = [r3], 32; \ - ;; \ - ldf.fill.nta f40 = [r2], 32; \ - ldf.fill.nta f41 = [r3], 32; \ - ;; \ - ldf.fill.nta f42 = [r2], 32; \ - ldf.fill.nta f43 = [r3], 32; \ - ;; \ - ldf.fill.nta f44 = [r2], 32; \ - ldf.fill.nta f45 = [r3], 32; \ - ;; \ - ldf.fill.nta f46 = [r2], 32; \ - ldf.fill.nta f47 = [r3], 32; \ - ;; \ - ldf.fill.nta f48 = [r2], 32; \ - ldf.fill.nta f49 = [r3], 32; \ - ;; \ - ldf.fill.nta f50 = [r2], 32; \ - ldf.fill.nta f51 = [r3], 32; \ - ;; \ - ldf.fill.nta f52 = [r2], 32; \ - ldf.fill.nta f53 = [r3], 32; \ - ;; \ - ldf.fill.nta f54 = [r2], 32; \ - ldf.fill.nta f55 = [r3], 32; \ - ;; \ - ldf.fill.nta f56 = [r2], 32; \ - ldf.fill.nta f57 = [r3], 32; \ - ;; \ - ldf.fill.nta f58 = [r2], 32; \ - ldf.fill.nta f59 = [r3], 32; \ - ;; \ - ldf.fill.nta f60 = [r2], 32; \ - ldf.fill.nta f61 = [r3], 32; \ - ;; \ - ldf.fill.nta f62 = [r2], 32; \ - ldf.fill.nta f63 = [r3], 32; \ - ;; \ - ldf.fill.nta f64 = [r2], 32; \ - ldf.fill.nta f65 = [r3], 32; \ - ;; \ - ldf.fill.nta f66 = [r2], 32; \ - ldf.fill.nta f67 = [r3], 32; \ - ;; \ - ldf.fill.nta f68 = [r2], 32; \ - ldf.fill.nta f69 = [r3], 32; \ - ;; \ - ldf.fill.nta f70 = [r2], 32; \ - ldf.fill.nta f71 = [r3], 32; \ - ;; \ - ldf.fill.nta f72 = [r2], 32; \ - ldf.fill.nta f73 = [r3], 32; \ - ;; \ - ldf.fill.nta f74 = [r2], 32; \ - ldf.fill.nta f75 = [r3], 32; \ - ;; \ - ldf.fill.nta f76 = [r2], 32; \ - ldf.fill.nta f77 = [r3], 32; \ - ;; \ - ldf.fill.nta f78 = [r2], 32; \ - ldf.fill.nta f79 = [r3], 32; \ - ;; \ - ldf.fill.nta f80 = [r2], 32; \ - ldf.fill.nta f81 = [r3], 32; \ - ;; \ - ldf.fill.nta f82 = [r2], 32; \ - ldf.fill.nta f83 = [r3], 32; \ - ;; \ - ldf.fill.nta f84 = [r2], 32; \ - ldf.fill.nta f85 = [r3], 32; \ - ;; \ - ldf.fill.nta f86 = [r2], 32; \ - ldf.fill.nta f87 = [r3], 32; \ - ;; \ - ldf.fill.nta f88 = [r2], 32; \ - ldf.fill.nta f89 = [r3], 32; \ - ;; \ - ldf.fill.nta f90 = [r2], 32; \ - ldf.fill.nta f91 = [r3], 32; \ - ;; \ - ldf.fill.nta f92 = [r2], 32; \ - ldf.fill.nta f93 = [r3], 32; \ - ;; \ - ldf.fill.nta f94 = [r2], 32; \ - ldf.fill.nta f95 = [r3], 32; \ - ;; \ - ldf.fill.nta f96 = [r2], 32; \ - ldf.fill.nta f97 = [r3], 32; \ - ;; \ - ldf.fill.nta f98 = [r2], 32; \ - ldf.fill.nta f99 = [r3], 32; \ - ;; \ - ldf.fill.nta f100 = [r2], 32; \ - ldf.fill.nta f101 = [r3], 32; \ - ;; \ - ldf.fill.nta f102 = [r2], 32; \ - ldf.fill.nta f103 = [r3], 32; \ - ;; \ - ldf.fill.nta f104 = [r2], 32; \ - ldf.fill.nta f105 = [r3], 32; \ - ;; \ - ldf.fill.nta f106 = [r2], 32; \ - ldf.fill.nta f107 = [r3], 32; \ - ;; \ - ldf.fill.nta f108 = [r2], 32; \ - ldf.fill.nta f109 = [r3], 32; \ - ;; \ - ldf.fill.nta f110 = [r2], 32; \ - ldf.fill.nta f111 = [r3], 32; \ - ;; \ - ldf.fill.nta f112 = [r2], 32; \ - ldf.fill.nta f113 = [r3], 32; \ - ;; \ - ldf.fill.nta f114 = [r2], 32; \ - ldf.fill.nta f115 = [r3], 32; \ - ;; \ - ldf.fill.nta f116 = [r2], 32; \ - ldf.fill.nta f117 = [r3], 32; \ - ;; \ - ldf.fill.nta f118 = [r2], 32; \ - ldf.fill.nta f119 = [r3], 32; \ - ;; \ - ldf.fill.nta f120 = [r2], 32; \ - ldf.fill.nta f121 = [r3], 32; \ - ;; \ - ldf.fill.nta f122 = [r2], 32; \ - ldf.fill.nta f123 = [r3], 32; \ - ;; \ - ldf.fill.nta f124 = [r2], 32; \ - ldf.fill.nta f125 = [r3], 32; \ - ;; \ - ldf.fill.nta f126 = [r2], 32; \ - ldf.fill.nta f127 = [r3], 32; \ - ;; - - /* - * r32: context_t base address - */ -#define SAVE_PTK_REGS \ - add r2=CTX(PKR0), r32; \ - mov r16=7; \ - ;; \ - mov ar.lc=r16; \ - mov r17=r0; \ - ;; \ -1: \ - mov r18=pkr[r17]; \ - ;; \ - srlz.i; \ - ;; \ - st8 [r2]=r18, 8; \ - ;; \ - add r17 =1,r17; \ - ;; \ - br.cloop.sptk 1b; \ - ;; - -/* - * r33: point to context_t structure - * ar.lc are corrupted. - */ -#define RESTORE_PTK_REGS \ - add r2=CTX(PKR0), r33; \ - mov r16=7; \ - ;; \ - mov ar.lc=r16; \ - mov r17=r0; \ - ;; \ -1: \ - ld8 r18=[r2], 8; \ - ;; \ - mov pkr[r17]=r18; \ - ;; \ - srlz.i; \ - ;; \ - add r17 =1,r17; \ - ;; \ - br.cloop.sptk 1b; \ - ;; - - -/* - * void vmm_trampoline( context_t * from, - * context_t * to) - * - * from: r32 - * to: r33 - * note: interrupt disabled before call this function. - */ -GLOBAL_ENTRY(vmm_trampoline) - mov r16 = psr - adds r2 = CTX(PSR), r32 - ;; - st8 [r2] = r16, 8 // psr - mov r17 = pr - ;; - st8 [r2] = r17, 8 // pr - mov r18 = ar.unat - ;; - st8 [r2] = r18 - mov r17 = ar.rsc - ;; - adds r2 = CTX(RSC),r32 - ;; - st8 [r2]= r17 - mov ar.rsc =0 - flushrs - ;; - SAVE_GENERAL_REGS - ;; - SAVE_KERNEL_REGS - ;; - SAVE_APP_REGS - ;; - SAVE_BRANCH_REGS - ;; - SAVE_CTL_REGS - ;; - SAVE_REGION_REGS - ;; - //SAVE_DEBUG_REGS - ;; - rsm psr.dfl - ;; - srlz.d - ;; - SAVE_FPU_LOW - ;; - rsm psr.dfh - ;; - srlz.d - ;; - SAVE_FPU_HIGH - ;; - SAVE_PTK_REGS - ;; - RESTORE_PTK_REGS - ;; - RESTORE_FPU_HIGH - ;; - RESTORE_FPU_LOW - ;; - //RESTORE_DEBUG_REGS - ;; - RESTORE_REGION_REGS - ;; - RESTORE_CTL_REGS - ;; - RESTORE_BRANCH_REGS - ;; - RESTORE_APP_REGS - ;; - RESTORE_KERNEL_REGS - ;; - RESTORE_GENERAL_REGS - ;; - adds r2=CTX(PSR), r33 - ;; - ld8 r16=[r2], 8 // psr - ;; - mov psr.l=r16 - ;; - srlz.d - ;; - ld8 r16=[r2], 8 // pr - ;; - mov pr =r16,-1 - ld8 r16=[r2] // unat - ;; - mov ar.unat=r16 - ;; - adds r2=CTX(RSC),r33 - ;; - ld8 r16 =[r2] - ;; - mov ar.rsc = r16 - ;; - br.ret.sptk.few b0 -END(vmm_trampoline) diff --git a/arch/ia64/kvm/vcpu.c b/arch/ia64/kvm/vcpu.c deleted file mode 100644 index 958815c9787d..000000000000 --- a/arch/ia64/kvm/vcpu.c +++ /dev/null @@ -1,2209 +0,0 @@ -/* - * kvm_vcpu.c: handling all virtual cpu related thing. - * Copyright (c) 2005, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., 59 Temple - * Place - Suite 330, Boston, MA 02111-1307 USA. - * - * Shaofan Li (Susue Li) <susie.li@intel.com> - * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com) - * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com) - * Xiantao Zhang <xiantao.zhang@intel.com> - */ - -#include <linux/kvm_host.h> -#include <linux/types.h> - -#include <asm/processor.h> -#include <asm/ia64regs.h> -#include <asm/gcc_intrin.h> -#include <asm/kregs.h> -#include <asm/pgtable.h> -#include <asm/tlb.h> - -#include "asm-offsets.h" -#include "vcpu.h" - -/* - * Special notes: - * - Index by it/dt/rt sequence - * - Only existing mode transitions are allowed in this table - * - RSE is placed at lazy mode when emulating guest partial mode - * - If gva happens to be rr0 and rr4, only allowed case is identity - * mapping (gva=gpa), or panic! (How?) - */ -int mm_switch_table[8][8] = { - /* 2004/09/12(Kevin): Allow switch to self */ - /* - * (it,dt,rt): (0,0,0) -> (1,1,1) - * This kind of transition usually occurs in the very early - * stage of Linux boot up procedure. Another case is in efi - * and pal calls. (see "arch/ia64/kernel/head.S") - * - * (it,dt,rt): (0,0,0) -> (0,1,1) - * This kind of transition is found when OSYa exits efi boot - * service. Due to gva = gpa in this case (Same region), - * data access can be satisfied though itlb entry for physical - * emulation is hit. - */ - {SW_SELF, 0, 0, SW_NOP, 0, 0, 0, SW_P2V}, - {0, 0, 0, 0, 0, 0, 0, 0}, - {0, 0, 0, 0, 0, 0, 0, 0}, - /* - * (it,dt,rt): (0,1,1) -> (1,1,1) - * This kind of transition is found in OSYa. - * - * (it,dt,rt): (0,1,1) -> (0,0,0) - * This kind of transition is found in OSYa - */ - {SW_NOP, 0, 0, SW_SELF, 0, 0, 0, SW_P2V}, - /* (1,0,0)->(1,1,1) */ - {0, 0, 0, 0, 0, 0, 0, SW_P2V}, - /* - * (it,dt,rt): (1,0,1) -> (1,1,1) - * This kind of transition usually occurs when Linux returns - * from the low level TLB miss handlers. - * (see "arch/ia64/kernel/ivt.S") - */ - {0, 0, 0, 0, 0, SW_SELF, 0, SW_P2V}, - {0, 0, 0, 0, 0, 0, 0, 0}, - /* - * (it,dt,rt): (1,1,1) -> (1,0,1) - * This kind of transition usually occurs in Linux low level - * TLB miss handler. (see "arch/ia64/kernel/ivt.S") - * - * (it,dt,rt): (1,1,1) -> (0,0,0) - * This kind of transition usually occurs in pal and efi calls, - * which requires running in physical mode. - * (see "arch/ia64/kernel/head.S") - * (1,1,1)->(1,0,0) - */ - - {SW_V2P, 0, 0, 0, SW_V2P, SW_V2P, 0, SW_SELF}, -}; - -void physical_mode_init(struct kvm_vcpu *vcpu) -{ - vcpu->arch.mode_flags = GUEST_IN_PHY; -} - -void switch_to_physical_rid(struct kvm_vcpu *vcpu) -{ - unsigned long psr; - - /* Save original virtual mode rr[0] and rr[4] */ - psr = ia64_clear_ic(); - ia64_set_rr(VRN0<<VRN_SHIFT, vcpu->arch.metaphysical_rr0); - ia64_srlz_d(); - ia64_set_rr(VRN4<<VRN_SHIFT, vcpu->arch.metaphysical_rr4); - ia64_srlz_d(); - - ia64_set_psr(psr); - return; -} - -void switch_to_virtual_rid(struct kvm_vcpu *vcpu) -{ - unsigned long psr; - - psr = ia64_clear_ic(); - ia64_set_rr(VRN0 << VRN_SHIFT, vcpu->arch.metaphysical_saved_rr0); - ia64_srlz_d(); - ia64_set_rr(VRN4 << VRN_SHIFT, vcpu->arch.metaphysical_saved_rr4); - ia64_srlz_d(); - ia64_set_psr(psr); - return; -} - -static int mm_switch_action(struct ia64_psr opsr, struct ia64_psr npsr) -{ - return mm_switch_table[MODE_IND(opsr)][MODE_IND(npsr)]; -} - -void switch_mm_mode(struct kvm_vcpu *vcpu, struct ia64_psr old_psr, - struct ia64_psr new_psr) -{ - int act; - act = mm_switch_action(old_psr, new_psr); - switch (act) { - case SW_V2P: - /*printk("V -> P mode transition: (0x%lx -> 0x%lx)\n", - old_psr.val, new_psr.val);*/ - switch_to_physical_rid(vcpu); - /* - * Set rse to enforced lazy, to prevent active rse - *save/restor when guest physical mode. - */ - vcpu->arch.mode_flags |= GUEST_IN_PHY; - break; - case SW_P2V: - switch_to_virtual_rid(vcpu); - /* - * recover old mode which is saved when entering - * guest physical mode - */ - vcpu->arch.mode_flags &= ~GUEST_IN_PHY; - break; - case SW_SELF: - break; - case SW_NOP: - break; - default: - /* Sanity check */ - break; - } - return; -} - -/* - * In physical mode, insert tc/tr for region 0 and 4 uses - * RID[0] and RID[4] which is for physical mode emulation. - * However what those inserted tc/tr wants is rid for - * virtual mode. So original virtual rid needs to be restored - * before insert. - * - * Operations which required such switch include: - * - insertions (itc.*, itr.*) - * - purges (ptc.* and ptr.*) - * - tpa - * - tak - * - thash?, ttag? - * All above needs actual virtual rid for destination entry. - */ - -void check_mm_mode_switch(struct kvm_vcpu *vcpu, struct ia64_psr old_psr, - struct ia64_psr new_psr) -{ - - if ((old_psr.dt != new_psr.dt) - || (old_psr.it != new_psr.it) - || (old_psr.rt != new_psr.rt)) - switch_mm_mode(vcpu, old_psr, new_psr); - - return; -} - - -/* - * In physical mode, insert tc/tr for region 0 and 4 uses - * RID[0] and RID[4] which is for physical mode emulation. - * However what those inserted tc/tr wants is rid for - * virtual mode. So original virtual rid needs to be restored - * before insert. - * - * Operations which required such switch include: - * - insertions (itc.*, itr.*) - * - purges (ptc.* and ptr.*) - * - tpa - * - tak - * - thash?, ttag? - * All above needs actual virtual rid for destination entry. - */ - -void prepare_if_physical_mode(struct kvm_vcpu *vcpu) -{ - if (is_physical_mode(vcpu)) { - vcpu->arch.mode_flags |= GUEST_PHY_EMUL; - switch_to_virtual_rid(vcpu); - } - return; -} - -/* Recover always follows prepare */ -void recover_if_physical_mode(struct kvm_vcpu *vcpu) -{ - if (is_physical_mode(vcpu)) - switch_to_physical_rid(vcpu); - vcpu->arch.mode_flags &= ~GUEST_PHY_EMUL; - return; -} - -#define RPT(x) ((u16) &((struct kvm_pt_regs *)0)->x) - -static u16 gr_info[32] = { - 0, /* r0 is read-only : WE SHOULD NEVER GET THIS */ - RPT(r1), RPT(r2), RPT(r3), - RPT(r4), RPT(r5), RPT(r6), RPT(r7), - RPT(r8), RPT(r9), RPT(r10), RPT(r11), - RPT(r12), RPT(r13), RPT(r14), RPT(r15), - RPT(r16), RPT(r17), RPT(r18), RPT(r19), - RPT(r20), RPT(r21), RPT(r22), RPT(r23), - RPT(r24), RPT(r25), RPT(r26), RPT(r27), - RPT(r28), RPT(r29), RPT(r30), RPT(r31) -}; - -#define IA64_FIRST_STACKED_GR 32 -#define IA64_FIRST_ROTATING_FR 32 - -static inline unsigned long -rotate_reg(unsigned long sor, unsigned long rrb, unsigned long reg) -{ - reg += rrb; - if (reg >= sor) - reg -= sor; - return reg; -} - -/* - * Return the (rotated) index for floating point register - * be in the REGNUM (REGNUM must range from 32-127, - * result is in the range from 0-95. - */ -static inline unsigned long fph_index(struct kvm_pt_regs *regs, - long regnum) -{ - unsigned long rrb_fr = (regs->cr_ifs >> 25) & 0x7f; - return rotate_reg(96, rrb_fr, (regnum - IA64_FIRST_ROTATING_FR)); -} - -/* - * The inverse of the above: given bspstore and the number of - * registers, calculate ar.bsp. - */ -static inline unsigned long *kvm_rse_skip_regs(unsigned long *addr, - long num_regs) -{ - long delta = ia64_rse_slot_num(addr) + num_regs; - int i = 0; - - if (num_regs < 0) - delta -= 0x3e; - if (delta < 0) { - while (delta <= -0x3f) { - i--; - delta += 0x3f; - } - } else { - while (delta >= 0x3f) { - i++; - delta -= 0x3f; - } - } - - return addr + num_regs + i; -} - -static void get_rse_reg(struct kvm_pt_regs *regs, unsigned long r1, - unsigned long *val, int *nat) -{ - unsigned long *bsp, *addr, *rnat_addr, *bspstore; - unsigned long *kbs = (void *) current_vcpu + VMM_RBS_OFFSET; - unsigned long nat_mask; - unsigned long old_rsc, new_rsc; - long sof = (regs->cr_ifs) & 0x7f; - long sor = (((regs->cr_ifs >> 14) & 0xf) << 3); - long rrb_gr = (regs->cr_ifs >> 18) & 0x7f; - long ridx = r1 - 32; - - if (ridx < sor) - ridx = rotate_reg(sor, rrb_gr, ridx); - - old_rsc = ia64_getreg(_IA64_REG_AR_RSC); - new_rsc = old_rsc&(~(0x3)); - ia64_setreg(_IA64_REG_AR_RSC, new_rsc); - - bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE); - bsp = kbs + (regs->loadrs >> 19); - - addr = kvm_rse_skip_regs(bsp, -sof + ridx); - nat_mask = 1UL << ia64_rse_slot_num(addr); - rnat_addr = ia64_rse_rnat_addr(addr); - - if (addr >= bspstore) { - ia64_flushrs(); - ia64_mf(); - bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE); - } - *val = *addr; - if (nat) { - if (bspstore < rnat_addr) - *nat = (int)!!(ia64_getreg(_IA64_REG_AR_RNAT) - & nat_mask); - else - *nat = (int)!!((*rnat_addr) & nat_mask); - ia64_setreg(_IA64_REG_AR_RSC, old_rsc); - } -} - -void set_rse_reg(struct kvm_pt_regs *regs, unsigned long r1, - unsigned long val, unsigned long nat) -{ - unsigned long *bsp, *bspstore, *addr, *rnat_addr; - unsigned long *kbs = (void *) current_vcpu + VMM_RBS_OFFSET; - unsigned long nat_mask; - unsigned long old_rsc, new_rsc, psr; - unsigned long rnat; - long sof = (regs->cr_ifs) & 0x7f; - long sor = (((regs->cr_ifs >> 14) & 0xf) << 3); - long rrb_gr = (regs->cr_ifs >> 18) & 0x7f; - long ridx = r1 - 32; - - if (ridx < sor) - ridx = rotate_reg(sor, rrb_gr, ridx); - - old_rsc = ia64_getreg(_IA64_REG_AR_RSC); - /* put RSC to lazy mode, and set loadrs 0 */ - new_rsc = old_rsc & (~0x3fff0003); - ia64_setreg(_IA64_REG_AR_RSC, new_rsc); - bsp = kbs + (regs->loadrs >> 19); /* 16 + 3 */ - - addr = kvm_rse_skip_regs(bsp, -sof + ridx); - nat_mask = 1UL << ia64_rse_slot_num(addr); - rnat_addr = ia64_rse_rnat_addr(addr); - - local_irq_save(psr); - bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE); - if (addr >= bspstore) { - - ia64_flushrs(); - ia64_mf(); - *addr = val; - bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE); - rnat = ia64_getreg(_IA64_REG_AR_RNAT); - if (bspstore < rnat_addr) - rnat = rnat & (~nat_mask); - else - *rnat_addr = (*rnat_addr)&(~nat_mask); - - ia64_mf(); - ia64_loadrs(); - ia64_setreg(_IA64_REG_AR_RNAT, rnat); - } else { - rnat = ia64_getreg(_IA64_REG_AR_RNAT); - *addr = val; - if (bspstore < rnat_addr) - rnat = rnat&(~nat_mask); - else - *rnat_addr = (*rnat_addr) & (~nat_mask); - - ia64_setreg(_IA64_REG_AR_BSPSTORE, (unsigned long)bspstore); - ia64_setreg(_IA64_REG_AR_RNAT, rnat); - } - local_irq_restore(psr); - ia64_setreg(_IA64_REG_AR_RSC, old_rsc); -} - -void getreg(unsigned long regnum, unsigned long *val, - int *nat, struct kvm_pt_regs *regs) -{ - unsigned long addr, *unat; - if (regnum >= IA64_FIRST_STACKED_GR) { - get_rse_reg(regs, regnum, val, nat); - return; - } - - /* - * Now look at registers in [0-31] range and init correct UNAT - */ - addr = (unsigned long)regs; - unat = ®s->eml_unat; - - addr += gr_info[regnum]; - - *val = *(unsigned long *)addr; - /* - * do it only when requested - */ - if (nat) - *nat = (*unat >> ((addr >> 3) & 0x3f)) & 0x1UL; -} - -void setreg(unsigned long regnum, unsigned long val, - int nat, struct kvm_pt_regs *regs) -{ - unsigned long addr; - unsigned long bitmask; - unsigned long *unat; - - /* - * First takes care of stacked registers - */ - if (regnum >= IA64_FIRST_STACKED_GR) { - set_rse_reg(regs, regnum, val, nat); - return; - } - - /* - * Now look at registers in [0-31] range and init correct UNAT - */ - addr = (unsigned long)regs; - unat = ®s->eml_unat; - /* - * add offset from base of struct - * and do it ! - */ - addr += gr_info[regnum]; - - *(unsigned long *)addr = val; - - /* - * We need to clear the corresponding UNAT bit to fully emulate the load - * UNAT bit_pos = GR[r3]{8:3} form EAS-2.4 - */ - bitmask = 1UL << ((addr >> 3) & 0x3f); - if (nat) - *unat |= bitmask; - else - *unat &= ~bitmask; - -} - -u64 vcpu_get_gr(struct kvm_vcpu *vcpu, unsigned long reg) -{ - struct kvm_pt_regs *regs = vcpu_regs(vcpu); - unsigned long val; - - if (!reg) - return 0; - getreg(reg, &val, 0, regs); - return val; -} - -void vcpu_set_gr(struct kvm_vcpu *vcpu, unsigned long reg, u64 value, int nat) -{ - struct kvm_pt_regs *regs = vcpu_regs(vcpu); - long sof = (regs->cr_ifs) & 0x7f; - - if (!reg) - return; - if (reg >= sof + 32) - return; - setreg(reg, value, nat, regs); /* FIXME: handle NATs later*/ -} - -void getfpreg(unsigned long regnum, struct ia64_fpreg *fpval, - struct kvm_pt_regs *regs) -{ - /* Take floating register rotation into consideration*/ - if (regnum >= IA64_FIRST_ROTATING_FR) - regnum = IA64_FIRST_ROTATING_FR + fph_index(regs, regnum); -#define CASE_FIXED_FP(reg) \ - case (reg) : \ - ia64_stf_spill(fpval, reg); \ - break - - switch (regnum) { - CASE_FIXED_FP(0); - CASE_FIXED_FP(1); - CASE_FIXED_FP(2); - CASE_FIXED_FP(3); - CASE_FIXED_FP(4); - CASE_FIXED_FP(5); - - CASE_FIXED_FP(6); - CASE_FIXED_FP(7); - CASE_FIXED_FP(8); - CASE_FIXED_FP(9); - CASE_FIXED_FP(10); - CASE_FIXED_FP(11); - - CASE_FIXED_FP(12); - CASE_FIXED_FP(13); - CASE_FIXED_FP(14); - CASE_FIXED_FP(15); - CASE_FIXED_FP(16); - CASE_FIXED_FP(17); - CASE_FIXED_FP(18); - CASE_FIXED_FP(19); - CASE_FIXED_FP(20); - CASE_FIXED_FP(21); - CASE_FIXED_FP(22); - CASE_FIXED_FP(23); - CASE_FIXED_FP(24); - CASE_FIXED_FP(25); - CASE_FIXED_FP(26); - CASE_FIXED_FP(27); - CASE_FIXED_FP(28); - CASE_FIXED_FP(29); - CASE_FIXED_FP(30); - CASE_FIXED_FP(31); - CASE_FIXED_FP(32); - CASE_FIXED_FP(33); - CASE_FIXED_FP(34); - CASE_FIXED_FP(35); - CASE_FIXED_FP(36); - CASE_FIXED_FP(37); - CASE_FIXED_FP(38); - CASE_FIXED_FP(39); - CASE_FIXED_FP(40); - CASE_FIXED_FP(41); - CASE_FIXED_FP(42); - CASE_FIXED_FP(43); - CASE_FIXED_FP(44); - CASE_FIXED_FP(45); - CASE_FIXED_FP(46); - CASE_FIXED_FP(47); - CASE_FIXED_FP(48); - CASE_FIXED_FP(49); - CASE_FIXED_FP(50); - CASE_FIXED_FP(51); - CASE_FIXED_FP(52); - CASE_FIXED_FP(53); - CASE_FIXED_FP(54); - CASE_FIXED_FP(55); - CASE_FIXED_FP(56); - CASE_FIXED_FP(57); - CASE_FIXED_FP(58); - CASE_FIXED_FP(59); - CASE_FIXED_FP(60); - CASE_FIXED_FP(61); - CASE_FIXED_FP(62); - CASE_FIXED_FP(63); - CASE_FIXED_FP(64); - CASE_FIXED_FP(65); - CASE_FIXED_FP(66); - CASE_FIXED_FP(67); - CASE_FIXED_FP(68); - CASE_FIXED_FP(69); - CASE_FIXED_FP(70); - CASE_FIXED_FP(71); - CASE_FIXED_FP(72); - CASE_FIXED_FP(73); - CASE_FIXED_FP(74); - CASE_FIXED_FP(75); - CASE_FIXED_FP(76); - CASE_FIXED_FP(77); - CASE_FIXED_FP(78); - CASE_FIXED_FP(79); - CASE_FIXED_FP(80); - CASE_FIXED_FP(81); - CASE_FIXED_FP(82); - CASE_FIXED_FP(83); - CASE_FIXED_FP(84); - CASE_FIXED_FP(85); - CASE_FIXED_FP(86); - CASE_FIXED_FP(87); - CASE_FIXED_FP(88); - CASE_FIXED_FP(89); - CASE_FIXED_FP(90); - CASE_FIXED_FP(91); - CASE_FIXED_FP(92); - CASE_FIXED_FP(93); - CASE_FIXED_FP(94); - CASE_FIXED_FP(95); - CASE_FIXED_FP(96); - CASE_FIXED_FP(97); - CASE_FIXED_FP(98); - CASE_FIXED_FP(99); - CASE_FIXED_FP(100); - CASE_FIXED_FP(101); - CASE_FIXED_FP(102); - CASE_FIXED_FP(103); - CASE_FIXED_FP(104); - CASE_FIXED_FP(105); - CASE_FIXED_FP(106); - CASE_FIXED_FP(107); - CASE_FIXED_FP(108); - CASE_FIXED_FP(109); - CASE_FIXED_FP(110); - CASE_FIXED_FP(111); - CASE_FIXED_FP(112); - CASE_FIXED_FP(113); - CASE_FIXED_FP(114); - CASE_FIXED_FP(115); - CASE_FIXED_FP(116); - CASE_FIXED_FP(117); - CASE_FIXED_FP(118); - CASE_FIXED_FP(119); - CASE_FIXED_FP(120); - CASE_FIXED_FP(121); - CASE_FIXED_FP(122); - CASE_FIXED_FP(123); - CASE_FIXED_FP(124); - CASE_FIXED_FP(125); - CASE_FIXED_FP(126); - CASE_FIXED_FP(127); - } -#undef CASE_FIXED_FP -} - -void setfpreg(unsigned long regnum, struct ia64_fpreg *fpval, - struct kvm_pt_regs *regs) -{ - /* Take floating register rotation into consideration*/ - if (regnum >= IA64_FIRST_ROTATING_FR) - regnum = IA64_FIRST_ROTATING_FR + fph_index(regs, regnum); - -#define CASE_FIXED_FP(reg) \ - case (reg) : \ - ia64_ldf_fill(reg, fpval); \ - break - - switch (regnum) { - CASE_FIXED_FP(2); - CASE_FIXED_FP(3); - CASE_FIXED_FP(4); - CASE_FIXED_FP(5); - - CASE_FIXED_FP(6); - CASE_FIXED_FP(7); - CASE_FIXED_FP(8); - CASE_FIXED_FP(9); - CASE_FIXED_FP(10); - CASE_FIXED_FP(11); - - CASE_FIXED_FP(12); - CASE_FIXED_FP(13); - CASE_FIXED_FP(14); - CASE_FIXED_FP(15); - CASE_FIXED_FP(16); - CASE_FIXED_FP(17); - CASE_FIXED_FP(18); - CASE_FIXED_FP(19); - CASE_FIXED_FP(20); - CASE_FIXED_FP(21); - CASE_FIXED_FP(22); - CASE_FIXED_FP(23); - CASE_FIXED_FP(24); - CASE_FIXED_FP(25); - CASE_FIXED_FP(26); - CASE_FIXED_FP(27); - CASE_FIXED_FP(28); - CASE_FIXED_FP(29); - CASE_FIXED_FP(30); - CASE_FIXED_FP(31); - CASE_FIXED_FP(32); - CASE_FIXED_FP(33); - CASE_FIXED_FP(34); - CASE_FIXED_FP(35); - CASE_FIXED_FP(36); - CASE_FIXED_FP(37); - CASE_FIXED_FP(38); - CASE_FIXED_FP(39); - CASE_FIXED_FP(40); - CASE_FIXED_FP(41); - CASE_FIXED_FP(42); - CASE_FIXED_FP(43); - CASE_FIXED_FP(44); - CASE_FIXED_FP(45); - CASE_FIXED_FP(46); - CASE_FIXED_FP(47); - CASE_FIXED_FP(48); - CASE_FIXED_FP(49); - CASE_FIXED_FP(50); - CASE_FIXED_FP(51); - CASE_FIXED_FP(52); - CASE_FIXED_FP(53); - CASE_FIXED_FP(54); - CASE_FIXED_FP(55); - CASE_FIXED_FP(56); - CASE_FIXED_FP(57); - CASE_FIXED_FP(58); - CASE_FIXED_FP(59); - CASE_FIXED_FP(60); - CASE_FIXED_FP(61); - CASE_FIXED_FP(62); - CASE_FIXED_FP(63); - CASE_FIXED_FP(64); - CASE_FIXED_FP(65); - CASE_FIXED_FP(66); - CASE_FIXED_FP(67); - CASE_FIXED_FP(68); - CASE_FIXED_FP(69); - CASE_FIXED_FP(70); - CASE_FIXED_FP(71); - CASE_FIXED_FP(72); - CASE_FIXED_FP(73); - CASE_FIXED_FP(74); - CASE_FIXED_FP(75); - CASE_FIXED_FP(76); - CASE_FIXED_FP(77); - CASE_FIXED_FP(78); - CASE_FIXED_FP(79); - CASE_FIXED_FP(80); - CASE_FIXED_FP(81); - CASE_FIXED_FP(82); - CASE_FIXED_FP(83); - CASE_FIXED_FP(84); - CASE_FIXED_FP(85); - CASE_FIXED_FP(86); - CASE_FIXED_FP(87); - CASE_FIXED_FP(88); - CASE_FIXED_FP(89); - CASE_FIXED_FP(90); - CASE_FIXED_FP(91); - CASE_FIXED_FP(92); - CASE_FIXED_FP(93); - CASE_FIXED_FP(94); - CASE_FIXED_FP(95); - CASE_FIXED_FP(96); - CASE_FIXED_FP(97); - CASE_FIXED_FP(98); - CASE_FIXED_FP(99); - CASE_FIXED_FP(100); - CASE_FIXED_FP(101); - CASE_FIXED_FP(102); - CASE_FIXED_FP(103); - CASE_FIXED_FP(104); - CASE_FIXED_FP(105); - CASE_FIXED_FP(106); - CASE_FIXED_FP(107); - CASE_FIXED_FP(108); - CASE_FIXED_FP(109); - CASE_FIXED_FP(110); - CASE_FIXED_FP(111); - CASE_FIXED_FP(112); - CASE_FIXED_FP(113); - CASE_FIXED_FP(114); - CASE_FIXED_FP(115); - CASE_FIXED_FP(116); - CASE_FIXED_FP(117); - CASE_FIXED_FP(118); - CASE_FIXED_FP(119); - CASE_FIXED_FP(120); - CASE_FIXED_FP(121); - CASE_FIXED_FP(122); - CASE_FIXED_FP(123); - CASE_FIXED_FP(124); - CASE_FIXED_FP(125); - CASE_FIXED_FP(126); - CASE_FIXED_FP(127); - } -} - -void vcpu_get_fpreg(struct kvm_vcpu *vcpu, unsigned long reg, - struct ia64_fpreg *val) -{ - struct kvm_pt_regs *regs = vcpu_regs(vcpu); - - getfpreg(reg, val, regs); /* FIXME: handle NATs later*/ -} - -void vcpu_set_fpreg(struct kvm_vcpu *vcpu, unsigned long reg, - struct ia64_fpreg *val) -{ - struct kvm_pt_regs *regs = vcpu_regs(vcpu); - - if (reg > 1) - setfpreg(reg, val, regs); /* FIXME: handle NATs later*/ -} - -/* - * The Altix RTC is mapped specially here for the vmm module - */ -#define SN_RTC_BASE (u64 *)(KVM_VMM_BASE+(1UL<<KVM_VMM_SHIFT)) -static long kvm_get_itc(struct kvm_vcpu *vcpu) -{ -#if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC) - struct kvm *kvm = (struct kvm *)KVM_VM_BASE; - - if (kvm->arch.is_sn2) - return (*SN_RTC_BASE); - else -#endif - return ia64_getreg(_IA64_REG_AR_ITC); -} - -/************************************************************************ - * lsapic timer - ***********************************************************************/ -u64 vcpu_get_itc(struct kvm_vcpu *vcpu) -{ - unsigned long guest_itc; - guest_itc = VMX(vcpu, itc_offset) + kvm_get_itc(vcpu); - - if (guest_itc >= VMX(vcpu, last_itc)) { - VMX(vcpu, last_itc) = guest_itc; - return guest_itc; - } else - return VMX(vcpu, last_itc); -} - -static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val); -static void vcpu_set_itc(struct kvm_vcpu *vcpu, u64 val) -{ - struct kvm_vcpu *v; - struct kvm *kvm; - int i; - long itc_offset = val - kvm_get_itc(vcpu); - unsigned long vitv = VCPU(vcpu, itv); - - kvm = (struct kvm *)KVM_VM_BASE; - - if (kvm_vcpu_is_bsp(vcpu)) { - for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) { - v = (struct kvm_vcpu *)((char *)vcpu + - sizeof(struct kvm_vcpu_data) * i); - VMX(v, itc_offset) = itc_offset; - VMX(v, last_itc) = 0; - } - } - VMX(vcpu, last_itc) = 0; - if (VCPU(vcpu, itm) <= val) { - VMX(vcpu, itc_check) = 0; - vcpu_unpend_interrupt(vcpu, vitv); - } else { - VMX(vcpu, itc_check) = 1; - vcpu_set_itm(vcpu, VCPU(vcpu, itm)); - } - -} - -static inline u64 vcpu_get_itm(struct kvm_vcpu *vcpu) -{ - return ((u64)VCPU(vcpu, itm)); -} - -static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val) -{ - unsigned long vitv = VCPU(vcpu, itv); - VCPU(vcpu, itm) = val; - - if (val > vcpu_get_itc(vcpu)) { - VMX(vcpu, itc_check) = 1; - vcpu_unpend_interrupt(vcpu, vitv); - VMX(vcpu, timer_pending) = 0; - } else - VMX(vcpu, itc_check) = 0; -} - -#define ITV_VECTOR(itv) (itv&0xff) -#define ITV_IRQ_MASK(itv) (itv&(1<<16)) - -static inline void vcpu_set_itv(struct kvm_vcpu *vcpu, u64 val) -{ - VCPU(vcpu, itv) = val; - if (!ITV_IRQ_MASK(val) && vcpu->arch.timer_pending) { - vcpu_pend_interrupt(vcpu, ITV_VECTOR(val)); - vcpu->arch.timer_pending = 0; - } -} - -static inline void vcpu_set_eoi(struct kvm_vcpu *vcpu, u64 val) -{ - int vec; - - vec = highest_inservice_irq(vcpu); - if (vec == NULL_VECTOR) - return; - VMX(vcpu, insvc[vec >> 6]) &= ~(1UL << (vec & 63)); - VCPU(vcpu, eoi) = 0; - vcpu->arch.irq_new_pending = 1; - -} - -/* See Table 5-8 in SDM vol2 for the definition */ -int irq_masked(struct kvm_vcpu *vcpu, int h_pending, int h_inservice) -{ - union ia64_tpr vtpr; - - vtpr.val = VCPU(vcpu, tpr); - - if (h_inservice == NMI_VECTOR) - return IRQ_MASKED_BY_INSVC; - - if (h_pending == NMI_VECTOR) { - /* Non Maskable Interrupt */ - return IRQ_NO_MASKED; - } - - if (h_inservice == ExtINT_VECTOR) - return IRQ_MASKED_BY_INSVC; - - if (h_pending == ExtINT_VECTOR) { - if (vtpr.mmi) { - /* mask all external IRQ */ - return IRQ_MASKED_BY_VTPR; - } else - return IRQ_NO_MASKED; - } - - if (is_higher_irq(h_pending, h_inservice)) { - if (is_higher_class(h_pending, vtpr.mic + (vtpr.mmi << 4))) - return IRQ_NO_MASKED; - else - return IRQ_MASKED_BY_VTPR; - } else { - return IRQ_MASKED_BY_INSVC; - } -} - -void vcpu_pend_interrupt(struct kvm_vcpu *vcpu, u8 vec) -{ - long spsr; - int ret; - - local_irq_save(spsr); - ret = test_and_set_bit(vec, &VCPU(vcpu, irr[0])); - local_irq_restore(spsr); - - vcpu->arch.irq_new_pending = 1; -} - -void vcpu_unpend_interrupt(struct kvm_vcpu *vcpu, u8 vec) -{ - long spsr; - int ret; - - local_irq_save(spsr); - ret = test_and_clear_bit(vec, &VCPU(vcpu, irr[0])); - local_irq_restore(spsr); - if (ret) { - vcpu->arch.irq_new_pending = 1; - wmb(); - } -} - -void update_vhpi(struct kvm_vcpu *vcpu, int vec) -{ - u64 vhpi; - - if (vec == NULL_VECTOR) - vhpi = 0; - else if (vec == NMI_VECTOR) - vhpi = 32; - else if (vec == ExtINT_VECTOR) - vhpi = 16; - else - vhpi = vec >> 4; - - VCPU(vcpu, vhpi) = vhpi; - if (VCPU(vcpu, vac).a_int) - ia64_call_vsa(PAL_VPS_SET_PENDING_INTERRUPT, - (u64)vcpu->arch.vpd, 0, 0, 0, 0, 0, 0); -} - -u64 vcpu_get_ivr(struct kvm_vcpu *vcpu) -{ - int vec, h_inservice, mask; - - vec = highest_pending_irq(vcpu); - h_inservice = highest_inservice_irq(vcpu); - mask = irq_masked(vcpu, vec, h_inservice); - if (vec == NULL_VECTOR || mask == IRQ_MASKED_BY_INSVC) { - if (VCPU(vcpu, vhpi)) - update_vhpi(vcpu, NULL_VECTOR); - return IA64_SPURIOUS_INT_VECTOR; - } - if (mask == IRQ_MASKED_BY_VTPR) { - update_vhpi(vcpu, vec); - return IA64_SPURIOUS_INT_VECTOR; - } - VMX(vcpu, insvc[vec >> 6]) |= (1UL << (vec & 63)); - vcpu_unpend_interrupt(vcpu, vec); - return (u64)vec; -} - -/************************************************************************** - Privileged operation emulation routines - **************************************************************************/ -u64 vcpu_thash(struct kvm_vcpu *vcpu, u64 vadr) -{ - union ia64_pta vpta; - union ia64_rr vrr; - u64 pval; - u64 vhpt_offset; - - vpta.val = vcpu_get_pta(vcpu); - vrr.val = vcpu_get_rr(vcpu, vadr); - vhpt_offset = ((vadr >> vrr.ps) << 3) & ((1UL << (vpta.size)) - 1); - if (vpta.vf) { - pval = ia64_call_vsa(PAL_VPS_THASH, vadr, vrr.val, - vpta.val, 0, 0, 0, 0); - } else { - pval = (vadr & VRN_MASK) | vhpt_offset | - (vpta.val << 3 >> (vpta.size + 3) << (vpta.size)); - } - return pval; -} - -u64 vcpu_ttag(struct kvm_vcpu *vcpu, u64 vadr) -{ - union ia64_rr vrr; - union ia64_pta vpta; - u64 pval; - - vpta.val = vcpu_get_pta(vcpu); - vrr.val = vcpu_get_rr(vcpu, vadr); - if (vpta.vf) { - pval = ia64_call_vsa(PAL_VPS_TTAG, vadr, vrr.val, - 0, 0, 0, 0, 0); - } else - pval = 1; - - return pval; -} - -u64 vcpu_tak(struct kvm_vcpu *vcpu, u64 vadr) -{ - struct thash_data *data; - union ia64_pta vpta; - u64 key; - - vpta.val = vcpu_get_pta(vcpu); - if (vpta.vf == 0) { - key = 1; - return key; - } - data = vtlb_lookup(vcpu, vadr, D_TLB); - if (!data || !data->p) - key = 1; - else - key = data->key; - - return key; -} - -void kvm_thash(struct kvm_vcpu *vcpu, INST64 inst) -{ - unsigned long thash, vadr; - - vadr = vcpu_get_gr(vcpu, inst.M46.r3); - thash = vcpu_thash(vcpu, vadr); - vcpu_set_gr(vcpu, inst.M46.r1, thash, 0); -} - -void kvm_ttag(struct kvm_vcpu *vcpu, INST64 inst) -{ - unsigned long tag, vadr; - - vadr = vcpu_get_gr(vcpu, inst.M46.r3); - tag = vcpu_ttag(vcpu, vadr); - vcpu_set_gr(vcpu, inst.M46.r1, tag, 0); -} - -int vcpu_tpa(struct kvm_vcpu *vcpu, u64 vadr, unsigned long *padr) -{ - struct thash_data *data; - union ia64_isr visr, pt_isr; - struct kvm_pt_regs *regs; - struct ia64_psr vpsr; - - regs = vcpu_regs(vcpu); - pt_isr.val = VMX(vcpu, cr_isr); - visr.val = 0; - visr.ei = pt_isr.ei; - visr.ir = pt_isr.ir; - vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr); - visr.na = 1; - - data = vhpt_lookup(vadr); - if (data) { - if (data->p == 0) { - vcpu_set_isr(vcpu, visr.val); - data_page_not_present(vcpu, vadr); - return IA64_FAULT; - } else if (data->ma == VA_MATTR_NATPAGE) { - vcpu_set_isr(vcpu, visr.val); - dnat_page_consumption(vcpu, vadr); - return IA64_FAULT; - } else { - *padr = (data->gpaddr >> data->ps << data->ps) | - (vadr & (PSIZE(data->ps) - 1)); - return IA64_NO_FAULT; - } - } - - data = vtlb_lookup(vcpu, vadr, D_TLB); - if (data) { - if (data->p == 0) { - vcpu_set_isr(vcpu, visr.val); - data_page_not_present(vcpu, vadr); - return IA64_FAULT; - } else if (data->ma == VA_MATTR_NATPAGE) { - vcpu_set_isr(vcpu, visr.val); - dnat_page_consumption(vcpu, vadr); - return IA64_FAULT; - } else{ - *padr = ((data->ppn >> (data->ps - 12)) << data->ps) - | (vadr & (PSIZE(data->ps) - 1)); - return IA64_NO_FAULT; - } - } - if (!vhpt_enabled(vcpu, vadr, NA_REF)) { - if (vpsr.ic) { - vcpu_set_isr(vcpu, visr.val); - alt_dtlb(vcpu, vadr); - return IA64_FAULT; - } else { - nested_dtlb(vcpu); - return IA64_FAULT; - } - } else { - if (vpsr.ic) { - vcpu_set_isr(vcpu, visr.val); - dvhpt_fault(vcpu, vadr); - return IA64_FAULT; - } else{ - nested_dtlb(vcpu); - return IA64_FAULT; - } - } - - return IA64_NO_FAULT; -} - -int kvm_tpa(struct kvm_vcpu *vcpu, INST64 inst) -{ - unsigned long r1, r3; - - r3 = vcpu_get_gr(vcpu, inst.M46.r3); - - if (vcpu_tpa(vcpu, r3, &r1)) - return IA64_FAULT; - - vcpu_set_gr(vcpu, inst.M46.r1, r1, 0); - return(IA64_NO_FAULT); -} - -void kvm_tak(struct kvm_vcpu *vcpu, INST64 inst) -{ - unsigned long r1, r3; - - r3 = vcpu_get_gr(vcpu, inst.M46.r3); - r1 = vcpu_tak(vcpu, r3); - vcpu_set_gr(vcpu, inst.M46.r1, r1, 0); -} - -/************************************ - * Insert/Purge translation register/cache - ************************************/ -void vcpu_itc_i(struct kvm_vcpu *vcpu, u64 pte, u64 itir, u64 ifa) -{ - thash_purge_and_insert(vcpu, pte, itir, ifa, I_TLB); -} - -void vcpu_itc_d(struct kvm_vcpu *vcpu, u64 pte, u64 itir, u64 ifa) -{ - thash_purge_and_insert(vcpu, pte, itir, ifa, D_TLB); -} - -void vcpu_itr_i(struct kvm_vcpu *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa) -{ - u64 ps, va, rid; - struct thash_data *p_itr; - - ps = itir_ps(itir); - va = PAGEALIGN(ifa, ps); - pte &= ~PAGE_FLAGS_RV_MASK; - rid = vcpu_get_rr(vcpu, ifa); - rid = rid & RR_RID_MASK; - p_itr = (struct thash_data *)&vcpu->arch.itrs[slot]; - vcpu_set_tr(p_itr, pte, itir, va, rid); - vcpu_quick_region_set(VMX(vcpu, itr_regions), va); -} - - -void vcpu_itr_d(struct kvm_vcpu *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa) -{ - u64 gpfn; - u64 ps, va, rid; - struct thash_data *p_dtr; - - ps = itir_ps(itir); - va = PAGEALIGN(ifa, ps); - pte &= ~PAGE_FLAGS_RV_MASK; - - if (ps != _PAGE_SIZE_16M) - thash_purge_entries(vcpu, va, ps); - gpfn = (pte & _PAGE_PPN_MASK) >> PAGE_SHIFT; - if (__gpfn_is_io(gpfn)) - pte |= VTLB_PTE_IO; - rid = vcpu_get_rr(vcpu, va); - rid = rid & RR_RID_MASK; - p_dtr = (struct thash_data *)&vcpu->arch.dtrs[slot]; - vcpu_set_tr((struct thash_data *)&vcpu->arch.dtrs[slot], - pte, itir, va, rid); - vcpu_quick_region_set(VMX(vcpu, dtr_regions), va); -} - -void vcpu_ptr_d(struct kvm_vcpu *vcpu, u64 ifa, u64 ps) -{ - int index; - u64 va; - - va = PAGEALIGN(ifa, ps); - while ((index = vtr_find_overlap(vcpu, va, ps, D_TLB)) >= 0) - vcpu->arch.dtrs[index].page_flags = 0; - - thash_purge_entries(vcpu, va, ps); -} - -void vcpu_ptr_i(struct kvm_vcpu *vcpu, u64 ifa, u64 ps) -{ - int index; - u64 va; - - va = PAGEALIGN(ifa, ps); - while ((index = vtr_find_overlap(vcpu, va, ps, I_TLB)) >= 0) - vcpu->arch.itrs[index].page_flags = 0; - - thash_purge_entries(vcpu, va, ps); -} - -void vcpu_ptc_l(struct kvm_vcpu *vcpu, u64 va, u64 ps) -{ - va = PAGEALIGN(va, ps); - thash_purge_entries(vcpu, va, ps); -} - -void vcpu_ptc_e(struct kvm_vcpu *vcpu, u64 va) -{ - thash_purge_all(vcpu); -} - -void vcpu_ptc_ga(struct kvm_vcpu *vcpu, u64 va, u64 ps) -{ - struct exit_ctl_data *p = &vcpu->arch.exit_data; - long psr; - local_irq_save(psr); - p->exit_reason = EXIT_REASON_PTC_G; - - p->u.ptc_g_data.rr = vcpu_get_rr(vcpu, va); - p->u.ptc_g_data.vaddr = va; - p->u.ptc_g_data.ps = ps; - vmm_transition(vcpu); - /* Do Local Purge Here*/ - vcpu_ptc_l(vcpu, va, ps); - local_irq_restore(psr); -} - - -void vcpu_ptc_g(struct kvm_vcpu *vcpu, u64 va, u64 ps) -{ - vcpu_ptc_ga(vcpu, va, ps); -} - -void kvm_ptc_e(struct kvm_vcpu *vcpu, INST64 inst) -{ - unsigned long ifa; - - ifa = vcpu_get_gr(vcpu, inst.M45.r3); - vcpu_ptc_e(vcpu, ifa); -} - -void kvm_ptc_g(struct kvm_vcpu *vcpu, INST64 inst) -{ - unsigned long ifa, itir; - - ifa = vcpu_get_gr(vcpu, inst.M45.r3); - itir = vcpu_get_gr(vcpu, inst.M45.r2); - vcpu_ptc_g(vcpu, ifa, itir_ps(itir)); -} - -void kvm_ptc_ga(struct kvm_vcpu *vcpu, INST64 inst) -{ - unsigned long ifa, itir; - - ifa = vcpu_get_gr(vcpu, inst.M45.r3); - itir = vcpu_get_gr(vcpu, inst.M45.r2); - vcpu_ptc_ga(vcpu, ifa, itir_ps(itir)); -} - -void kvm_ptc_l(struct kvm_vcpu *vcpu, INST64 inst) -{ - unsigned long ifa, itir; - - ifa = vcpu_get_gr(vcpu, inst.M45.r3); - itir = vcpu_get_gr(vcpu, inst.M45.r2); - vcpu_ptc_l(vcpu, ifa, itir_ps(itir)); -} - -void kvm_ptr_d(struct kvm_vcpu *vcpu, INST64 inst) -{ - unsigned long ifa, itir; - - ifa = vcpu_get_gr(vcpu, inst.M45.r3); - itir = vcpu_get_gr(vcpu, inst.M45.r2); - vcpu_ptr_d(vcpu, ifa, itir_ps(itir)); -} - -void kvm_ptr_i(struct kvm_vcpu *vcpu, INST64 inst) -{ - unsigned long ifa, itir; - - ifa = vcpu_get_gr(vcpu, inst.M45.r3); - itir = vcpu_get_gr(vcpu, inst.M45.r2); - vcpu_ptr_i(vcpu, ifa, itir_ps(itir)); -} - -void kvm_itr_d(struct kvm_vcpu *vcpu, INST64 inst) -{ - unsigned long itir, ifa, pte, slot; - - slot = vcpu_get_gr(vcpu, inst.M45.r3); - pte = vcpu_get_gr(vcpu, inst.M45.r2); - itir = vcpu_get_itir(vcpu); - ifa = vcpu_get_ifa(vcpu); - vcpu_itr_d(vcpu, slot, pte, itir, ifa); -} - - - -void kvm_itr_i(struct kvm_vcpu *vcpu, INST64 inst) -{ - unsigned long itir, ifa, pte, slot; - - slot = vcpu_get_gr(vcpu, inst.M45.r3); - pte = vcpu_get_gr(vcpu, inst.M45.r2); - itir = vcpu_get_itir(vcpu); - ifa = vcpu_get_ifa(vcpu); - vcpu_itr_i(vcpu, slot, pte, itir, ifa); -} - -void kvm_itc_d(struct kvm_vcpu *vcpu, INST64 inst) -{ - unsigned long itir, ifa, pte; - - itir = vcpu_get_itir(vcpu); - ifa = vcpu_get_ifa(vcpu); - pte = vcpu_get_gr(vcpu, inst.M45.r2); - vcpu_itc_d(vcpu, pte, itir, ifa); -} - -void kvm_itc_i(struct kvm_vcpu *vcpu, INST64 inst) -{ - unsigned long itir, ifa, pte; - - itir = vcpu_get_itir(vcpu); - ifa = vcpu_get_ifa(vcpu); - pte = vcpu_get_gr(vcpu, inst.M45.r2); - vcpu_itc_i(vcpu, pte, itir, ifa); -} - -/************************************* - * Moves to semi-privileged registers - *************************************/ - -void kvm_mov_to_ar_imm(struct kvm_vcpu *vcpu, INST64 inst) -{ - unsigned long imm; - - if (inst.M30.s) - imm = -inst.M30.imm; - else - imm = inst.M30.imm; - - vcpu_set_itc(vcpu, imm); -} - -void kvm_mov_to_ar_reg(struct kvm_vcpu *vcpu, INST64 inst) -{ - unsigned long r2; - - r2 = vcpu_get_gr(vcpu, inst.M29.r2); - vcpu_set_itc(vcpu, r2); -} - -void kvm_mov_from_ar_reg(struct kvm_vcpu *vcpu, INST64 inst) -{ - unsigned long r1; - - r1 = vcpu_get_itc(vcpu); - vcpu_set_gr(vcpu, inst.M31.r1, r1, 0); -} - -/************************************************************************** - struct kvm_vcpu protection key register access routines - **************************************************************************/ - -unsigned long vcpu_get_pkr(struct kvm_vcpu *vcpu, unsigned long reg) -{ - return ((unsigned long)ia64_get_pkr(reg)); -} - -void vcpu_set_pkr(struct kvm_vcpu *vcpu, unsigned long reg, unsigned long val) -{ - ia64_set_pkr(reg, val); -} - -/******************************** - * Moves to privileged registers - ********************************/ -unsigned long vcpu_set_rr(struct kvm_vcpu *vcpu, unsigned long reg, - unsigned long val) -{ - union ia64_rr oldrr, newrr; - unsigned long rrval; - struct exit_ctl_data *p = &vcpu->arch.exit_data; - unsigned long psr; - - oldrr.val = vcpu_get_rr(vcpu, reg); - newrr.val = val; - vcpu->arch.vrr[reg >> VRN_SHIFT] = val; - - switch ((unsigned long)(reg >> VRN_SHIFT)) { - case VRN6: - vcpu->arch.vmm_rr = vrrtomrr(val); - local_irq_save(psr); - p->exit_reason = EXIT_REASON_SWITCH_RR6; - vmm_transition(vcpu); - local_irq_restore(psr); - break; - case VRN4: - rrval = vrrtomrr(val); - vcpu->arch.metaphysical_saved_rr4 = rrval; - if (!is_physical_mode(vcpu)) - ia64_set_rr(reg, rrval); - break; - case VRN0: - rrval = vrrtomrr(val); - vcpu->arch.metaphysical_saved_rr0 = rrval; - if (!is_physical_mode(vcpu)) - ia64_set_rr(reg, rrval); - break; - default: - ia64_set_rr(reg, vrrtomrr(val)); - break; - } - - return (IA64_NO_FAULT); -} - -void kvm_mov_to_rr(struct kvm_vcpu *vcpu, INST64 inst) -{ - unsigned long r3, r2; - - r3 = vcpu_get_gr(vcpu, inst.M42.r3); - r2 = vcpu_get_gr(vcpu, inst.M42.r2); - vcpu_set_rr(vcpu, r3, r2); -} - -void kvm_mov_to_dbr(struct kvm_vcpu *vcpu, INST64 inst) -{ -} - -void kvm_mov_to_ibr(struct kvm_vcpu *vcpu, INST64 inst) -{ -} - -void kvm_mov_to_pmc(struct kvm_vcpu *vcpu, INST64 inst) -{ - unsigned long r3, r2; - - r3 = vcpu_get_gr(vcpu, inst.M42.r3); - r2 = vcpu_get_gr(vcpu, inst.M42.r2); - vcpu_set_pmc(vcpu, r3, r2); -} - -void kvm_mov_to_pmd(struct kvm_vcpu *vcpu, INST64 inst) -{ - unsigned long r3, r2; - - r3 = vcpu_get_gr(vcpu, inst.M42.r3); - r2 = vcpu_get_gr(vcpu, inst.M42.r2); - vcpu_set_pmd(vcpu, r3, r2); -} - -void kvm_mov_to_pkr(struct kvm_vcpu *vcpu, INST64 inst) -{ - u64 r3, r2; - - r3 = vcpu_get_gr(vcpu, inst.M42.r3); - r2 = vcpu_get_gr(vcpu, inst.M42.r2); - vcpu_set_pkr(vcpu, r3, r2); -} - -void kvm_mov_from_rr(struct kvm_vcpu *vcpu, INST64 inst) -{ - unsigned long r3, r1; - - r3 = vcpu_get_gr(vcpu, inst.M43.r3); - r1 = vcpu_get_rr(vcpu, r3); - vcpu_set_gr(vcpu, inst.M43.r1, r1, 0); -} - -void kvm_mov_from_pkr(struct kvm_vcpu *vcpu, INST64 inst) -{ - unsigned long r3, r1; - - r3 = vcpu_get_gr(vcpu, inst.M43.r3); - r1 = vcpu_get_pkr(vcpu, r3); - vcpu_set_gr(vcpu, inst.M43.r1, r1, 0); -} - -void kvm_mov_from_dbr(struct kvm_vcpu *vcpu, INST64 inst) -{ - unsigned long r3, r1; - - r3 = vcpu_get_gr(vcpu, inst.M43.r3); - r1 = vcpu_get_dbr(vcpu, r3); - vcpu_set_gr(vcpu, inst.M43.r1, r1, 0); -} - -void kvm_mov_from_ibr(struct kvm_vcpu *vcpu, INST64 inst) -{ - unsigned long r3, r1; - - r3 = vcpu_get_gr(vcpu, inst.M43.r3); - r1 = vcpu_get_ibr(vcpu, r3); - vcpu_set_gr(vcpu, inst.M43.r1, r1, 0); -} - -void kvm_mov_from_pmc(struct kvm_vcpu *vcpu, INST64 inst) -{ - unsigned long r3, r1; - - r3 = vcpu_get_gr(vcpu, inst.M43.r3); - r1 = vcpu_get_pmc(vcpu, r3); - vcpu_set_gr(vcpu, inst.M43.r1, r1, 0); -} - -unsigned long vcpu_get_cpuid(struct kvm_vcpu *vcpu, unsigned long reg) -{ - /* FIXME: This could get called as a result of a rsvd-reg fault */ - if (reg > (ia64_get_cpuid(3) & 0xff)) - return 0; - else - return ia64_get_cpuid(reg); -} - -void kvm_mov_from_cpuid(struct kvm_vcpu *vcpu, INST64 inst) -{ - unsigned long r3, r1; - - r3 = vcpu_get_gr(vcpu, inst.M43.r3); - r1 = vcpu_get_cpuid(vcpu, r3); - vcpu_set_gr(vcpu, inst.M43.r1, r1, 0); -} - -void vcpu_set_tpr(struct kvm_vcpu *vcpu, unsigned long val) -{ - VCPU(vcpu, tpr) = val; - vcpu->arch.irq_check = 1; -} - -unsigned long kvm_mov_to_cr(struct kvm_vcpu *vcpu, INST64 inst) -{ - unsigned long r2; - - r2 = vcpu_get_gr(vcpu, inst.M32.r2); - VCPU(vcpu, vcr[inst.M32.cr3]) = r2; - - switch (inst.M32.cr3) { - case 0: - vcpu_set_dcr(vcpu, r2); - break; - case 1: - vcpu_set_itm(vcpu, r2); - break; - case 66: - vcpu_set_tpr(vcpu, r2); - break; - case 67: - vcpu_set_eoi(vcpu, r2); - break; - default: - break; - } - - return 0; -} - -unsigned long kvm_mov_from_cr(struct kvm_vcpu *vcpu, INST64 inst) -{ - unsigned long tgt = inst.M33.r1; - unsigned long val; - |