aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/arm/cluster-pm-race-avoidance.txt498
-rw-r--r--Documentation/arm/vlocks.txt211
-rw-r--r--Documentation/cpu-freq/cpufreq-arm-bl.txt47
-rw-r--r--Documentation/devicetree/bindings/arm/pmu.txt3
-rw-r--r--Documentation/devicetree/bindings/arm/rtsm-dcscb.txt19
-rw-r--r--Documentation/kernel-parameters.txt9
-rw-r--r--Documentation/powerpc/transactional_memory.txt27
-rw-r--r--Makefile4
-rw-r--r--arch/arc/include/asm/pgtable.h26
-rw-r--r--arch/arc/include/asm/tlb.h2
-rw-r--r--arch/arc/mm/tlbex.S6
-rw-r--r--arch/arm/Kconfig121
-rw-r--r--arch/arm/boot/compressed/Makefile2
-rw-r--r--arch/arm/boot/compressed/head-sa1100.S1
-rw-r--r--arch/arm/boot/compressed/head-shark.S1
-rw-r--r--arch/arm/boot/compressed/head.S1
-rw-r--r--arch/arm/boot/dts/Makefile9
-rw-r--r--arch/arm/boot/dts/at91sam9260.dtsi18
-rw-r--r--arch/arm/boot/dts/at91sam9g15.dtsi2
-rw-r--r--arch/arm/boot/dts/at91sam9g15ek.dts2
-rw-r--r--arch/arm/boot/dts/at91sam9g25.dtsi2
-rw-r--r--arch/arm/boot/dts/at91sam9g35.dtsi2
-rw-r--r--arch/arm/boot/dts/at91sam9x25.dtsi2
-rw-r--r--arch/arm/boot/dts/at91sam9x25ek.dts2
-rw-r--r--arch/arm/boot/dts/at91sam9x35.dtsi2
-rw-r--r--arch/arm/boot/dts/at91sam9x5ek.dtsi2
-rw-r--r--arch/arm/boot/dts/clcd-panels.dtsi52
-rw-r--r--arch/arm/boot/dts/rtsm_ve-cortex_a15x1.dts159
-rw-r--r--arch/arm/boot/dts/rtsm_ve-cortex_a15x2.dts165
-rw-r--r--arch/arm/boot/dts/rtsm_ve-cortex_a15x4.dts177
-rw-r--r--arch/arm/boot/dts/rtsm_ve-cortex_a9x2.dts171
-rw-r--r--arch/arm/boot/dts/rtsm_ve-cortex_a9x4.dts183
-rw-r--r--arch/arm/boot/dts/rtsm_ve-motherboard.dtsi224
-rw-r--r--arch/arm/boot/dts/rtsm_ve-v2p-ca15x1-ca7x1.dts227
-rw-r--r--arch/arm/boot/dts/rtsm_ve-v2p-ca15x4-ca7x4.dts335
-rw-r--r--arch/arm/boot/dts/vexpress-v2m-rs1.dtsi1
-rw-r--r--arch/arm/boot/dts/vexpress-v2m.dtsi1
-rw-r--r--arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts6
-rw-r--r--arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts131
-rw-r--r--arch/arm/boot/dts/vexpress-v2p-ca5s.dts6
-rw-r--r--arch/arm/boot/dts/vexpress-v2p-ca9.dts4
-rw-r--r--arch/arm/common/Makefile6
-rw-r--r--arch/arm/common/bL_switcher.c783
-rw-r--r--arch/arm/common/bL_switcher_dummy_if.c71
-rw-r--r--arch/arm/common/mcpm_entry.c275
-rw-r--r--arch/arm/common/mcpm_head.S231
-rw-r--r--arch/arm/common/mcpm_platsmp.c89
-rw-r--r--arch/arm/common/vlock.S108
-rw-r--r--arch/arm/common/vlock.h29
-rw-r--r--arch/arm/configs/at91sam9g45_defconfig1
-rw-r--r--arch/arm/configs/vexpress_bL_defconfig157
-rw-r--r--arch/arm/crypto/sha1-armv4-large.S2
-rw-r--r--arch/arm/include/asm/bL_switcher.h69
-rw-r--r--arch/arm/include/asm/cacheflush.h75
-rw-r--r--arch/arm/include/asm/cmpxchg.h8
-rw-r--r--arch/arm/include/asm/cp15.h30
-rw-r--r--arch/arm/include/asm/cputype.h61
-rw-r--r--arch/arm/include/asm/glue-df.h20
-rw-r--r--arch/arm/include/asm/hardirq.h2
-rw-r--r--arch/arm/include/asm/irq.h5
-rw-r--r--arch/arm/include/asm/kvm_arm.h4
-rw-r--r--arch/arm/include/asm/kvm_asm.h2
-rw-r--r--arch/arm/include/asm/kvm_emulate.h107
-rw-r--r--arch/arm/include/asm/kvm_host.h42
-rw-r--r--arch/arm/include/asm/kvm_mmu.h67
-rw-r--r--arch/arm/include/asm/kvm_vgic.h1
-rw-r--r--arch/arm/include/asm/mach/arch.h5
-rw-r--r--arch/arm/include/asm/mach/irq.h36
-rw-r--r--arch/arm/include/asm/mcpm.h217
-rw-r--r--arch/arm/include/asm/percpu.h11
-rw-r--r--arch/arm/include/asm/pgtable.h9
-rw-r--r--arch/arm/include/asm/pmu.h39
-rw-r--r--arch/arm/include/asm/psci.h12
-rw-r--r--arch/arm/include/asm/smp.h2
-rw-r--r--arch/arm/include/asm/topology.h34
-rw-r--r--arch/arm/include/uapi/asm/kvm.h12
-rw-r--r--arch/arm/kernel/asm-offsets.c12
-rw-r--r--arch/arm/kernel/head-common.S9
-rw-r--r--arch/arm/kernel/head-nommu.S8
-rw-r--r--arch/arm/kernel/hw_breakpoint.c3
-rw-r--r--arch/arm/kernel/perf_event.c37
-rw-r--r--arch/arm/kernel/perf_event_cpu.c390
-rw-r--r--arch/arm/kernel/perf_event_v6.c12
-rw-r--r--arch/arm/kernel/perf_event_v7.c465
-rw-r--r--arch/arm/kernel/perf_event_xscale.c20
-rw-r--r--arch/arm/kernel/psci.c42
-rw-r--r--arch/arm/kernel/setup.c22
-rw-r--r--arch/arm/kernel/sleep.S32
-rw-r--r--arch/arm/kernel/smp.c21
-rw-r--r--arch/arm/kernel/smp_scu.c2
-rw-r--r--arch/arm/kernel/topology.c134
-rw-r--r--arch/arm/kvm/Makefile2
-rw-r--r--arch/arm/kvm/arm.c194
-rw-r--r--arch/arm/kvm/coproc.c28
-rw-r--r--arch/arm/kvm/coproc.h4
-rw-r--r--arch/arm/kvm/emulate.c75
-rw-r--r--arch/arm/kvm/guest.c17
-rw-r--r--arch/arm/kvm/handle_exit.c164
-rw-r--r--arch/arm/kvm/interrupts.S13
-rw-r--r--arch/arm/kvm/mmio.c46
-rw-r--r--arch/arm/kvm/mmu.c184
-rw-r--r--arch/arm/kvm/vgic.c2
-rw-r--r--arch/arm/mach-at91/at91rm9200_time.c7
-rw-r--r--arch/arm/mach-at91/gpio.c3
-rw-r--r--arch/arm/mach-at91/setup.c2
-rw-r--r--arch/arm/mach-exynos/common.c1
-rw-r--r--arch/arm/mach-exynos/include/mach/regs-pmu.h1
-rw-r--r--arch/arm/mach-exynos/platsmp.c8
-rw-r--r--arch/arm/mach-exynos/pmu.c5
-rw-r--r--arch/arm/mach-highbank/platsmp.c7
-rw-r--r--arch/arm/mach-imx/platsmp.c12
-rw-r--r--arch/arm/mach-kirkwood/board-ts219.c10
-rw-r--r--arch/arm/mach-kirkwood/ts219-setup.c2
-rw-r--r--arch/arm/mach-msm/platsmp.c8
-rw-r--r--arch/arm/mach-omap2/board-rx51-peripherals.c4
-rw-r--r--arch/arm/mach-omap2/cclock33xx_data.c26
-rw-r--r--arch/arm/mach-omap2/cpuidle34xx.c5
-rw-r--r--arch/arm/mach-omap2/id.c4
-rw-r--r--arch/arm/mach-omap2/omap-smp.c9
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c2
-rw-r--r--arch/arm/mach-prima2/platsmp.c8
-rw-r--r--arch/arm/mach-s3c24xx/irq.c1
-rw-r--r--arch/arm/mach-shmobile/smp-emev2.c7
-rw-r--r--arch/arm/mach-shmobile/smp-r8a7779.c7
-rw-r--r--arch/arm/mach-shmobile/smp-sh73a0.c7
-rw-r--r--arch/arm/mach-socfpga/platsmp.c12
-rw-r--r--arch/arm/mach-spear13xx/platsmp.c8
-rw-r--r--arch/arm/mach-tegra/platsmp.c8
-rw-r--r--arch/arm/mach-u300/include/mach/u300-regs.h2
-rw-r--r--arch/arm/mach-ux500/platsmp.c8
-rw-r--r--arch/arm/mach-vexpress/Kconfig18
-rw-r--r--arch/arm/mach-vexpress/Makefile8
-rw-r--r--arch/arm/mach-vexpress/core.h2
-rw-r--r--arch/arm/mach-vexpress/dcscb.c256
-rw-r--r--arch/arm/mach-vexpress/dcscb_setup.S80
-rw-r--r--arch/arm/mach-vexpress/include/mach/tc2.h10
-rw-r--r--arch/arm/mach-vexpress/platsmp.c13
-rw-r--r--arch/arm/mach-vexpress/tc2_pm.c271
-rw-r--r--arch/arm/mach-vexpress/tc2_pm_psci.c168
-rw-r--r--arch/arm/mach-vexpress/tc2_pm_setup.S102
-rw-r--r--arch/arm/mach-vexpress/v2m.c29
-rw-r--r--arch/arm/mach-virt/platsmp.c8
-rw-r--r--arch/arm/mm/Kconfig9
-rw-r--r--arch/arm/mm/alignment.c2
-rw-r--r--arch/arm/mm/fault.c13
-rw-r--r--arch/arm/mm/mmu.c17
-rw-r--r--arch/arm/mm/proc-v7-2level.S4
-rw-r--r--arch/arm/mm/proc-v7-3level.S4
-rw-r--r--arch/arm/plat-orion/common.c12
-rw-r--r--arch/arm/plat-samsung/irq-vic-timer.c3
-rw-r--r--arch/arm/plat-samsung/s5p-irq-gpioint.c3
-rw-r--r--arch/arm/plat-samsung/setup-mipiphy.c3
-rw-r--r--arch/arm/plat-versatile/platsmp.c8
-rw-r--r--arch/arm/xen/enlighten.c2
-rw-r--r--arch/arm64/kernel/debug-monitors.c2
-rw-r--r--arch/arm64/kernel/traps.c12
-rw-r--r--arch/arm64/mm/cache.S2
-rw-r--r--arch/arm64/mm/fault.c3
-rw-r--r--arch/arm64/mm/proc.S3
-rw-r--r--arch/avr32/configs/favr-32_defconfig1
-rw-r--r--arch/avr32/configs/merisc_defconfig1
-rw-r--r--arch/avr32/kernel/module.c2
-rw-r--r--arch/ia64/include/asm/futex.h5
-rw-r--r--arch/ia64/include/asm/mca.h1
-rw-r--r--arch/ia64/kernel/irq.c8
-rw-r--r--arch/ia64/kernel/mca.c37
-rw-r--r--arch/ia64/kvm/vtlb.c2
-rw-r--r--arch/m68k/kernel/head.S29
-rw-r--r--arch/parisc/Makefile23
-rw-r--r--arch/parisc/kernel/entry.S172
-rw-r--r--arch/powerpc/include/asm/cputable.h17
-rw-r--r--arch/powerpc/include/asm/machdep.h3
-rw-r--r--arch/powerpc/include/asm/pci-bridge.h2
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h4
-rw-r--r--arch/powerpc/include/asm/processor.h13
-rw-r--r--arch/powerpc/include/asm/reg.h11
-rw-r--r--arch/powerpc/include/asm/rtas.h2
-rw-r--r--arch/powerpc/include/asm/signal.h3
-rw-r--r--arch/powerpc/include/asm/tm.h2
-rw-r--r--arch/powerpc/include/uapi/asm/Kbuild1
-rw-r--r--arch/powerpc/include/uapi/asm/tm.h18
-rw-r--r--arch/powerpc/kernel/cpu_setup_power.S1
-rw-r--r--arch/powerpc/kernel/entry_32.S2
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S2
-rw-r--r--arch/powerpc/kernel/head_64.S1
-rw-r--r--arch/powerpc/kernel/machine_kexec_64.c4
-rw-r--r--arch/powerpc/kernel/pci-common.c21
-rw-r--r--arch/powerpc/kernel/process.c3
-rw-r--r--arch/powerpc/kernel/rtas.c113
-rw-r--r--arch/powerpc/kernel/signal.c40
-rw-r--r--arch/powerpc/kernel/signal.h2
-rw-r--r--arch/powerpc/kernel/signal_32.c10
-rw-r--r--arch/powerpc/kernel/signal_64.c23
-rw-r--r--arch/powerpc/kernel/traps.c39
-rw-r--r--arch/powerpc/mm/hash_utils_64.c1
-rw-r--r--arch/powerpc/mm/numa.c2
-rw-r--r--arch/powerpc/perf/core-book3s.c2
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c1
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig2
-rw-r--r--arch/powerpc/platforms/pseries/eeh_pseries.c12
-rw-r--r--arch/powerpc/platforms/pseries/msi.c55
-rw-r--r--arch/powerpc/platforms/pseries/pci.c53
-rw-r--r--arch/powerpc/platforms/pseries/pseries.h4
-rw-r--r--arch/powerpc/platforms/pseries/setup.c2
-rw-r--r--arch/powerpc/platforms/pseries/suspend.c22
-rw-r--r--arch/s390/include/asm/pgtable.h18
-rw-r--r--arch/tile/Kconfig14
-rw-r--r--arch/tile/include/hv/hypervisor.h27
-rw-r--r--arch/tile/kernel/head_32.S2
-rw-r--r--arch/tile/kernel/head_64.S12
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/crypto/crc32-pclmul_asm.S2
-rw-r--r--arch/x86/crypto/crc32c-pcl-intel-asm_64.S5
-rw-r--r--arch/x86/include/asm/inst.h74
-rw-r--r--arch/x86/include/asm/syscalls.h4
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c13
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_lbr.c27
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.c17
-rw-r--r--arch/x86/kernel/head64.c3
-rw-r--r--arch/x86/kernel/head_64.S7
-rw-r--r--arch/x86/kernel/i387.c14
-rw-r--r--arch/x86/kernel/irq.c4
-rw-r--r--arch/x86/kernel/microcode_intel_early.c5
-rw-r--r--arch/x86/kernel/vm86_32.c11
-rw-r--r--arch/x86/kvm/emulate.c55
-rw-r--r--arch/x86/kvm/vmx.c6
-rw-r--r--arch/x86/pci/common.c5
-rw-r--r--arch/x86/xen/enlighten.c20
-rw-r--r--arch/x86/xen/smp.c10
-rw-r--r--arch/x86/xen/time.c6
-rw-r--r--block/blk-cgroup.c4
-rw-r--r--drivers/acpi/Makefile2
-rw-r--r--drivers/acpi/acpica/exfldio.c14
-rw-r--r--drivers/acpi/device_pm.c136
-rw-r--r--drivers/acpi/ec.c4
-rw-r--r--drivers/acpi/osl.c2
-rw-r--r--drivers/acpi/pci_root.c4
-rw-r--r--drivers/acpi/thermal.c16
-rw-r--r--drivers/acpi/video.c16
-rw-r--r--drivers/acpi/video_detect.c8
-rw-r--r--drivers/ata/ata_piix.c15
-rw-r--r--drivers/ata/libata-acpi.c45
-rw-r--r--drivers/ata/libata-core.c6
-rw-r--r--drivers/ata/sata_highbank.c2
-rw-r--r--drivers/ata/sata_rcar.c24
-rw-r--r--drivers/block/brd.c4
-rw-r--r--drivers/block/drbd/drbd_main.c1
-rw-r--r--drivers/block/drbd/drbd_receiver.c5
-rw-r--r--drivers/bus/Kconfig5
-rw-r--r--drivers/bus/Makefile2
-rw-r--r--drivers/bus/arm-cci.c510
-rw-r--r--drivers/char/ipmi/ipmi_bt_sm.c4
-rw-r--r--drivers/char/ipmi/ipmi_devintf.c14
-rw-r--r--drivers/char/random.c54
-rw-r--r--drivers/char/tpm/tpm.c31
-rw-r--r--drivers/char/tpm/tpm.h3
-rw-r--r--drivers/clk/versatile/Makefile2
-rw-r--r--drivers/clk/versatile/clk-vexpress-spc.c131
-rw-r--r--drivers/cpufreq/Kconfig.arm42
-rw-r--r--drivers/cpufreq/Makefile8
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c4
-rw-r--r--drivers/cpufreq/arm-bl-cpufreq.c270
-rw-r--r--drivers/cpufreq/arm-bl-cpufreq.h37
-rw-r--r--drivers/cpufreq/arm-bl-cpufreq_tests.c652
-rw-r--r--drivers/cpufreq/arm_big_little.c593
-rw-r--r--drivers/cpufreq/arm_big_little.h38
-rw-r--r--drivers/cpufreq/arm_dt_big_little.c101
-rw-r--r--drivers/cpufreq/cpufreq_stats.c49
-rw-r--r--drivers/cpufreq/intel_pstate.c67
-rw-r--r--drivers/cpufreq/vexpress_big_little.c74
-rw-r--r--drivers/cpuidle/Makefile2
-rw-r--r--drivers/cpuidle/arm_big_little.c183
-rw-r--r--drivers/cpuidle/cpuidle-calxeda.c14
-rw-r--r--drivers/crypto/caam/caamalg.c4
-rw-r--r--drivers/dma/of-dma.c8
-rw-r--r--drivers/dma/pch_dma.c2
-rw-r--r--drivers/dma/ste_dma40.c8
-rw-r--r--drivers/edac/edac_mc_sysfs.c12
-rw-r--r--drivers/gpio/gpio-msm-v2.c3
-rw-r--r--drivers/gpio/gpio-mxc.c2
-rw-r--r--drivers/gpio/gpio-omap.c3
-rw-r--r--drivers/gpio/gpio-pl061.c2
-rw-r--r--drivers/gpio/gpio-pxa.c3
-rw-r--r--drivers/gpio/gpio-tegra.c3
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h2
-rw-r--r--drivers/gpu/drm/ast/ast_fb.c43
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.h2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_fbdev.c38
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c2
-rw-r--r--drivers/gpu/drm/drm_crtc.c4
-rw-r--r--drivers/gpu/drm/drm_gem.c4
-rw-r--r--drivers/gpu/drm/drm_irq.c6
-rw-r--r--drivers/gpu/drm/drm_mm.c34
-rw-r--r--drivers/gpu/drm/drm_prime.c84
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dmabuf.c4
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c4
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c46
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h1
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c35
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c7
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c3
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h2
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c6
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h4
-rw-r--r--drivers/gpu/drm/i915/intel_display.c84
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c5
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c13
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c16
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c13
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c7
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c9
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c7
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_fb.c43
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c78
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/init.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c1
-rw-r--r--drivers/gpu/drm/radeon/atom.c6
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c3
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c11
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c135
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h2
-rw-r--r--drivers/gpu/drm/radeon/ni.c18
-rw-r--r--drivers/gpu/drm/radeon/nid.h4
-rw-r--r--drivers/gpu/drm/radeon/r100.c86
-rw-r--r--drivers/gpu/drm/radeon/r300.c9
-rw-r--r--drivers/gpu/drm/radeon/r300_cmdbuf.c2
-rw-r--r--drivers/gpu/drm/radeon/r420.c10
-rw-r--r--drivers/gpu/drm/radeon/r500_reg.h2
-rw-r--r--drivers/gpu/drm/radeon/r520.c9
-rw-r--r--drivers/gpu/drm/radeon/r600.c19
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c21
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c19
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c2
-rw-r--r--drivers/gpu/drm/radeon/rs400.c9
-rw-r--r--drivers/gpu/drm/radeon/rs600.c61
-rw-r--r--drivers/gpu/drm/radeon/rs690.c9
-rw-r--r--drivers/gpu/drm/radeon/rv515.c65
-rw-r--r--drivers/gpu/drm/radeon/rv770.c19
-rw-r--r--drivers/gpu/drm/radeon/si.c19
-rw-r--r--drivers/gpu/drm/radeon/sid.h2
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c2
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c4
-rw-r--r--drivers/hid/hid-core.c1
-rw-r--r--drivers/hv/channel_mgmt.c2
-rw-r--r--drivers/hv/ring_buffer.c1
-rw-r--r--drivers/hwmon/abituguru.c16
-rw-r--r--drivers/hwmon/adm1021.c58
-rw-r--r--drivers/i2c/busses/i2c-designware-core.c14
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h2
-rw-r--r--drivers/i2c/busses/i2c-xiic.c6
-rw-r--r--drivers/iio/frequency/adf4350.c2
-rw-r--r--drivers/iio/inkern.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c25
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c3
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c32
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.h1
-rw-r--r--drivers/input/touchscreen/egalax_ts.c2
-rw-r--r--drivers/iommu/amd_iommu.c34
-rw-r--r--drivers/iommu/amd_iommu_types.h1
-rw-r--r--drivers/irqchip/exynos-combiner.c1
-rw-r--r--drivers/irqchip/irq-gic.c183
-rw-r--r--drivers/irqchip/irq-vic.c2
-rw-r--r--drivers/leds/leds-gpio.c9
-rw-r--r--drivers/leds/leds-ot200.c14
-rw-r--r--drivers/md/dm-bufio.c24
-rw-r--r--drivers/md/dm-cache-target.c1
-rw-r--r--drivers/md/dm-snap.c1
-rw-r--r--drivers/md/dm-stripe.c11
-rw-r--r--drivers/md/dm-table.c2
-rw-r--r--drivers/md/md.c9
-rw-r--r--drivers/md/raid1.c7
-rw-r--r--drivers/md/raid10.c7
-rw-r--r--drivers/mfd/adp5520.c8
-rw-r--r--drivers/mfd/vexpress-config.c35
-rw-r--r--drivers/mfd/vexpress-sysreg.c4
-rw-r--r--drivers/misc/Kconfig1
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/vexpress/Kconfig3
-rw-r--r--drivers/misc/vexpress/Makefile1
-rw-r--r--drivers/misc/vexpress/arm-spc.c718
-rw-r--r--drivers/mmc/card/block.c6
-rw-r--r--drivers/mmc/core/mmc.c2
-rw-r--r--drivers/mmc/host/Kconfig10
-rw-r--r--drivers/mmc/host/atmel-mci.c16
-rw-r--r--drivers/net/can/usb/kvaser_usb.c64
-rw-r--r--drivers/net/ethernet/3com/3c509.c2
-rw-r--r--drivers/net/ethernet/3com/3c59x.c27
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c70
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h5
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c3
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c23
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c10
-rw-r--r--drivers/net/ethernet/realtek/r8169.c9
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c2
-rw-r--r--drivers/net/ethernet/tile/tilegx.c2
-rw-r--r--drivers/net/macvlan.c7
-rw-r--r--drivers/net/ntb_netdev.c2
-rw-r--r--drivers/net/tun.c7
-rw-r--r--drivers/net/usb/asix_common.c3
-rw-r--r--drivers/net/usb/cdc_ether.c7
-rw-r--r--drivers/net/usb/qmi_wwan.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.h9
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9485_initvals.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h5
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c6
-rw-r--r--drivers/net/wireless/b43/dma.c19
-rw-r--r--drivers/net/wireless/b43/dma.h4
-rw-r--r--drivers/net/wireless/b43/main.c43
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c3
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/debugfs.c16
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/sta.c5
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api.h27
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c12
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c31
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h1
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/ops.c1
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c6
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.c13
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.h2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c48
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c4
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c3
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c1
-rw-r--r--drivers/net/wireless/mwifiex/main.c1
-rw-r--r--drivers/net/wireless/mwifiex/pcie.c4
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c21
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c8
-rw-r--r--drivers/net/xen-netback/netback.c311
-rw-r--r--drivers/net/xen-netfront.c17
-rw-r--r--drivers/ntb/ntb_hw.c10
-rw-r--r--drivers/ntb/ntb_transport.c175
-rw-r--r--drivers/pci/bus.c1
-rw-r--r--drivers/pci/pci.c12
-rw-r--r--drivers/pci/probe.c3
-rw-r--r--drivers/pinctrl/pinctrl-at91.c3
-rw-r--r--drivers/pinctrl/pinctrl-exynos.c3
-rw-r--r--drivers/pinctrl/pinctrl-nomadik.c2
-rw-r--r--drivers/pinctrl/pinctrl-sirf.c2
-rw-r--r--drivers/pinctrl/spear/pinctrl-plgpio.c2
-rw-r--r--drivers/platform/x86/hp-wmi.c2
-rw-r--r--drivers/platform/x86/hp_accel.c3
-rw-r--r--drivers/pwm/pwm-spear.c6
-rw-r--r--drivers/rapidio/devices/tsi721.c12
-rw-r--r--drivers/regulator/palmas-regulator.c2
-rw-r--r--drivers/remoteproc/Kconfig1
-rw-r--r--drivers/rpmsg/Kconfig1
-rw-r--r--drivers/rtc/Kconfig2
-rw-r--r--drivers/rtc/rtc-at91rm9200.c14
-rw-r--r--drivers/rtc/rtc-cmos.c4
-rw-r--r--drivers/rtc/rtc-pcf2123.c1
-rw-r--r--drivers/s390/char/sclp_cmd.c4
-rw-r--r--drivers/scsi/ipr.c12
-rw-r--r--drivers/scsi/sd.c20
-rw-r--r--drivers/scsi/sd.h1
-rw-r--r--drivers/staging/imx-drm/ipu-v3/ipu-common.c2
-rw-r--r--drivers/staging/vt6656/hostap.c2
-rw-r--r--drivers/staging/vt6656/iwctl.c6
-rw-r--r--drivers/staging/zsmalloc/Kconfig2
-rw-r--r--drivers/staging/zsmalloc/zsmalloc-main.c5
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c7
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c8
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.h4
-rw-r--r--drivers/target/target_core_file.c18
-rw-r--r--drivers/target/target_core_iblock.c2
-rw-r--r--drivers/target/target_core_transport.c29
-rw-r--r--drivers/tty/n_tty.c8
-rw-r--r--drivers/tty/pty.c3
-rw-r--r--drivers/tty/serial/serial_core.c4
-rw-r--r--drivers/tty/tty_io.c4
-rw-r--r--drivers/usb/atm/cxacru.c3
-rw-r--r--drivers/usb/chipidea/Kconfig2
-rw-r--r--drivers/usb/chipidea/udc.c8
-rw-r--r--drivers/usb/chipidea/udc.h4
-rw-r--r--drivers/usb/core/devio.c2
-rw-r--r--drivers/usb/core/quirks.c3
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c2
-rw-r--r--drivers/usb/dwc3/gadget.c14
-rw-r--r--drivers/usb/host/ehci-hcd.c3
-rw-r--r--drivers/usb/host/ehci-sched.c2
-rw-r--r--drivers/usb/host/ohci-hcd.c34
-rw-r--r--drivers/usb/host/uhci-hub.c3
-rw-r--r--drivers/usb/host/uhci-q.c2
-rw-r--r--drivers/usb/host/xhci-mem.c27
-rw-r--r--drivers/usb/host/xhci-pci.c8
-rw-r--r--drivers/usb/host/xhci.c16
-rw-r--r--drivers/usb/host/xhci.h3
-rw-r--r--drivers/usb/misc/appledisplay.c1
-rw-r--r--drivers/usb/musb/musb_host.c18
-rw-r--r--drivers/usb/musb/musb_host.h1
-rw-r--r--drivers/usb/serial/ark3116.c2
-rw-r--r--drivers/usb/serial/cypress_m8.c18
-rw-r--r--drivers/usb/serial/cypress_m8.h4
-rw-r--r--drivers/usb/serial/ftdi_sio.c53
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h6
-rw-r--r--drivers/usb/serial/generic.c31
-rw-r--r--drivers/usb/serial/io_ti.c24
-rw-r--r--drivers/usb/serial/iuu_phoenix.c4
-rw-r--r--drivers/usb/serial/keyspan.c2
-rw-r--r--drivers/usb/serial/mos7720.c25
-rw-r--r--drivers/usb/serial/mos7840.c35
-rw-r--r--drivers/usb/serial/option.c47
-rw-r--r--drivers/usb/serial/qcserial.c1
-rw-r--r--drivers/usb/serial/usb-serial.c19
-rw-r--r--drivers/usb/serial/visor.c9
-rw-r--r--drivers/usb/serial/whiteheat.c2
-rw-r--r--drivers/usb/serial/zte_ev.c58
-rw-r--r--drivers/usb/storage/cypress_atacb.c16
-rw-r--r--drivers/video/Kconfig20
-rw-r--r--drivers/video/Makefile4
-rw-r--r--drivers/video/amba-clcd.c280
-rw-r--r--drivers/video/arm-hdlcd.c839
-rw-r--r--drivers/video/console/fbcon.c2
-rw-r--r--drivers/video/vexpress-dvi.c220
-rw-r--r--drivers/watchdog/watchdog_dev.c3
-rw-r--r--fs/autofs4/expire.c9
-rw-r--r--fs/btrfs/delayed-ref.c24
-rw-r--r--fs/btrfs/inode.c21
-rw-r--r--fs/btrfs/ioctl.c10
-rw-r--r--fs/cifs/cifs_dfs_ref.c4
-rw-r--r--fs/cifs/connect.c4
-rw-r--r--fs/cifs/inode.c3
-rw-r--r--fs/dcache.c4
-rw-r--r--fs/ecryptfs/file.c6
-rw-r--r--fs/exec.c7
-rw-r--r--fs/ext4/Kconfig3
-rw-r--r--fs/ext4/ext4_jbd2.h6
-rw-r--r--fs/ext4/fsync.c3
-rw-r--r--fs/ext4/inode.c11
-rw-r--r--fs/ext4/mballoc.c14
-rw-r--r--fs/ext4/mmp.c2
-rw-r--r--fs/ext4/resize.c6
-rw-r--r--fs/ext4/super.c14
-rw-r--r--fs/fat/inode.c15
-rw-r--r--fs/fscache/stats.c2
-rw-r--r--fs/fuse/dir.c12
-rw-r--r--fs/fuse/inode.c7
-rw-r--r--fs/hpfs/file.c4
-rw-r--r--fs/hugetlbfs/inode.c24
-rw-r--r--fs/jbd2/commit.c50
-rw-r--r--fs/jbd2/journal.c31
-rw-r--r--fs/jfs/inode.c2
-rw-r--r--fs/jfs/jfs_logmgr.c3
-rw-r--r--fs/lockd/clntlock.c3
-rw-r--r--fs/lockd/clntproc.c3
-rw-r--r--fs/namei.c2
-rw-r--r--fs/namespace.c5
-rw-r--r--fs/nfs/nfs4proc.c18
-rw-r--r--fs/nfsd/nfs4proc.c23
-rw-r--r--fs/nfsd/nfs4recover.c12
-rw-r--r--fs/nfsd/nfs4state.c10
-rw-r--r--fs/nfsd/nfs4xdr.c19
-rw-r--r--fs/nilfs2/inode.c27
-rw-r--r--fs/notify/inotify/inotify_user.c6
-rw-r--r--fs/ocfs2/extent_map.c2
-rw-r--r--fs/reiserfs/dir.c2
-rw-r--r--fs/reiserfs/inode.c9
-rw-r--r--fs/reiserfs/xattr.c14
-rw-r--r--fs/reiserfs/xattr_acl.c3
-rw-r--r--fs/sysfs/dir.c15
-rw-r--r--fs/xfs/xfs_iops.c47
-rw-r--r--include/acpi/acpi_bus.h40
-rw-r--r--include/asm-generic/pgtable.h10
-rw-r--r--include/drm/drmP.h5
-rw-r--r--include/drm/drm_pciids.h5
-rw-r--r--include/linux/arm-cci.h30
-rw-r--r--include/linux/arm-hdlcd.h122
-rw-r--r--include/linux/audit.h7
-rw-r--r--include/linux/blkdev.h2
-rw-r--r--include/linux/cgroup.h5
-rw-r--r--include/linux/hugetlb.h19
-rw-r--r--include/linux/ipc_namespace.h2
-rw-r--r--include/linux/irqchip/arm-gic.h10
-rw-r--r--include/linux/irqchip/chained_irq.h52
-rw-r--r--include/linux/jbd2.h2
-rw-r--r--include/linux/kref.h33
-rw-r--r--include/linux/sched.h30
-rw-r--r--include/linux/time.h4
-rw-r--r--include/linux/usb/serial.h4
-rw-r--r--include/linux/vexpress.h116
-rw-r--r--include/linux/wait.h16
-rw-r--r--include/net/inet_frag.h5
-rw-r--r--include/net/sock.h12
-rw-r--r--include/net/tcp.h1
-rw-r--r--include/sound/emu10k1.h1
-rw-r--r--include/target/target_core_base.h1
-rw-r--r--include/trace/events/power_cpu_migrate.h67
-rw-r--r--include/trace/events/sched.h153
-rw-r--r--include/uapi/linux/audit.h1
-rw-r--r--include/uapi/linux/if_cablemodem.h12
-rw-r--r--include/uapi/linux/virtio_console.h2
-rw-r--r--include/uapi/linux/virtio_net.h2
-rw-r--r--include/xen/interface/io/netif.h19
-rw-r--r--ipc/shm.c14
-rw-r--r--kernel/Makefile2
-rw-r--r--kernel/audit_tree.c2
-rw-r--r--kernel/auditfilter.c31
-rw-r--r--kernel/auditsc.c5
-rw-r--r--kernel/cgroup.c29
-rw-r--r--kernel/hrtimer.c6
-rw-r--r--kernel/irq/irqdesc.c21
-rw-r--r--kernel/kmod.c5
-rw-r--r--kernel/module.c13
-rw-r--r--kernel/range.c8
-rw-r--r--kernel/rcutree_trace.c8
-rw-r--r--kernel/sched/core.c4
-rw-r--r--kernel/sched/cputime.c70
-rw-r--r--kernel/sched/debug.c3
-rw-r--r--kernel/sched/fair.c1021
-rw-r--r--kernel/sched/sched.h13
-rw-r--r--kernel/softirq.c13
-rw-r--r--kernel/time/Kconfig5
-rw-r--r--kernel/time/tick-broadcast.c4
-rw-r--r--kernel/time/tick-common.c1
-rw-r--r--kernel/time/tick-sched.c2
-rw-r--r--kernel/time/timekeeping.c8
-rw-r--r--kernel/timer.c2
-rw-r--r--kernel/trace/ftrace.c5
-rw-r--r--kernel/trace/trace.c64
-rw-r--r--kernel/trace/trace_events_filter.c4
-rw-r--r--kernel/trace/trace_selftest.c9
-rw-r--r--kernel/trace/trace_stack.c76
-rw-r--r--kernel/trace/trace_stat.c2
-rw-r--r--lib/klist.c2
-rw-r--r--lib/oid_registry.c5
-rw-r--r--linaro/configs/android.conf31
-rw-r--r--linaro/configs/big-LITTLE-IKS.conf5
-rw-r--r--linaro/configs/big-LITTLE-MP.conf13
-rw-r--r--linaro/configs/debug.conf1
-rw-r--r--linaro/configs/linaro-base.conf88
-rw-r--r--linaro/configs/ubuntu-minimal.conf26
-rw-r--r--linaro/configs/ubuntu.conf2129
-rw-r--r--linaro/configs/vexpress-tuning.conf1
-rw-r--r--linaro/configs/vexpress.conf59
-rw-r--r--mm/huge_memory.c7
-rw-r--r--mm/memcontrol.c14
-rw-r--r--mm/migrate.c2
-rw-r--r--mm/mmap.c15
-rw-r--r--mm/mmu_notifier.c79
-rw-r--r--mm/page_io.c17
-rw-r--r--mm/pagewalk.c70
-rw-r--r--net/8021q/vlan_dev.c2
-rw-r--r--net/bridge/br_stp_timer.c2
-rw-r--r--net/core/dev.c2
-rw-r--r--net/core/ethtool.c2
-rw-r--r--net/core/sock.c12
-rw-r--r--net/ipv4/inet_fragment.c1
-rw-r--r--net/ipv4/tcp_minisocks.c7
-rw-r--r--net/ipv6/ip6_gre.c2
-rw-r--r--net/ipv6/tcp_ipv6.c12
-rw-r--r--net/ipv6/udp.c13
-rw-r--r--net/ipv6/udp_impl.h2
-rw-r--r--net/ipv6/udplite.c2
-rw-r--r--net/ipv6/xfrm6_policy.c4
-rw-r--r--net/mac80211/cfg.c1
-rw-r--r--net/mac80211/iface.c14
-rw-r--r--net/mac80211/mlme.c12
-rw-r--r--net/mac80211/pm.c3
-rw-r--r--net/mac80211/rx.c3
-rw-r--r--net/mac80211/tkip.c4
-rw-r--r--net/mac802154/mac802154.h2
-rw-r--r--net/netfilter/ipvs/ip_vs_pe_sip.c6
-rw-r--r--net/packet/af_packet.c53
-rw-r--r--net/sched/act_ipt.c33
-rw-r--r--net/sunrpc/sched.c8
-rw-r--r--net/sunrpc/svcauth_unix.c12
-rw-r--r--net/vmw_vsock/af_vsock.c2
-rw-r--r--net/wireless/core.c16
-rw-r--r--net/wireless/nl80211.c2
-rw-r--r--net/wireless/reg.c2
-rw-r--r--net/wireless/sme.c3
-rw-r--r--net/wireless/trace.h23
-rw-r--r--scripts/kconfig/list.h13
-rw-r--r--scripts/kconfig/mconf.c3
-rw-r--r--scripts/kconfig/streamline_config.pl17
-rw-r--r--scripts/package/builddeb86
-rw-r--r--sound/pci/emu10k1/emu10k1_main.c39
-rw-r--r--sound/pci/hda/hda_codec.c7
-rw-r--r--sound/pci/hda/hda_generic.c66
-rw-r--r--sound/pci/hda/hda_generic.h1
-rw-r--r--sound/pci/hda/patch_conexant.c17
-rw-r--r--sound/pci/hda/patch_realtek.c6
-rw-r--r--sound/pci/hda/patch_via.c10
-rw-r--r--sound/soc/codecs/cs42l52.c2
-rw-r--r--sound/soc/codecs/da7213.c8
-rw-r--r--sound/soc/codecs/max98088.c2
-rw-r--r--sound/soc/codecs/wm5110.c2
-rw-r--r--sound/soc/codecs/wm8994.c1
-rw-r--r--sound/soc/davinci/davinci-mcasp.c7
-rw-r--r--sound/usb/6fire/pcm.c1
-rw-r--r--sound/usb/caiaq/audio.c3
-rw-r--r--sound/usb/card.c4
-rw-r--r--sound/usb/card.h1
-rw-r--r--sound/usb/endpoint.c5
-rw-r--r--sound/usb/midi.c13
-rw-r--r--sound/usb/misc/ua101.c3
-rw-r--r--sound/usb/mixer.c1
-rw-r--r--sound/usb/pcm.c2
-rw-r--r--sound/usb/quirks-table.h14
-rw-r--r--sound/usb/quirks.c1
-rw-r--r--sound/usb/stream.c9
-rw-r--r--sound/usb/usx2y/usb_stream.c1
-rw-r--r--sound/usb/usx2y/usbusx2yaudio.c1
-rw-r--r--sound/usb/usx2y/usx2yhwdeppcm.c1
-rw-r--r--tools/perf/config/utilities.mak2
-rwxr-xr-xtools/perf/scripts/python/net_dropmonitor.py8
720 files changed, 21616 insertions, 3047 deletions
diff --git a/Documentation/arm/cluster-pm-race-avoidance.txt b/Documentation/arm/cluster-pm-race-avoidance.txt
new file mode 100644
index 00000000000..750b6fc24af
--- /dev/null
+++ b/Documentation/arm/cluster-pm-race-avoidance.txt
@@ -0,0 +1,498 @@
+Cluster-wide Power-up/power-down race avoidance algorithm
+=========================================================
+
+This file documents the algorithm which is used to coordinate CPU and
+cluster setup and teardown operations and to manage hardware coherency
+controls safely.
+
+The section "Rationale" explains what the algorithm is for and why it is
+needed. "Basic model" explains general concepts using a simplified view
+of the system. The other sections explain the actual details of the
+algorithm in use.
+
+
+Rationale
+---------
+
+In a system containing multiple CPUs, it is desirable to have the
+ability to turn off individual CPUs when the system is idle, reducing
+power consumption and thermal dissipation.
+
+In a system containing multiple clusters of CPUs, it is also desirable
+to have the ability to turn off entire clusters.
+
+Turning entire clusters off and on is a risky business, because it
+involves performing potentially destructive operations affecting a group
+of independently running CPUs, while the OS continues to run. This
+means that we need some coordination in order to ensure that critical
+cluster-level operations are only performed when it is truly safe to do
+so.
+
+Simple locking may not be sufficient to solve this problem, because
+mechanisms like Linux spinlocks may rely on coherency mechanisms which
+are not immediately enabled when a cluster powers up. Since enabling or
+disabling those mechanisms may itself be a non-atomic operation (such as
+writing some hardware registers and invalidating large caches), other
+methods of coordination are required in order to guarantee safe
+power-down and power-up at the cluster level.
+
+The mechanism presented in this document describes a coherent memory
+based protocol for performing the needed coordination. It aims to be as
+lightweight as possible, while providing the required safety properties.
+
+
+Basic model
+-----------
+
+Each cluster and CPU is assigned a state, as follows:
+
+ DOWN
+ COMING_UP
+ UP
+ GOING_DOWN
+
+ +---------> UP ----------+
+ | v
+
+ COMING_UP GOING_DOWN
+
+ ^ |
+ +--------- DOWN <--------+
+
+
+DOWN: The CPU or cluster is not coherent, and is either powered off or
+ suspended, or is ready to be powered off or suspended.
+
+COMING_UP: The CPU or cluster has committed to moving to the UP state.
+ It may be part way through the process of initialisation and
+ enabling coherency.
+
+UP: The CPU or cluster is active and coherent at the hardware
+ level. A CPU in this state is not necessarily being used
+ actively by the kernel.
+
+GOING_DOWN: The CPU or cluster has committed to moving to the DOWN
+ state. It may be part way through the process of teardown and
+ coherency exit.
+
+
+Each CPU has one of these states assigned to it at any point in time.
+The CPU states are described in the "CPU state" section, below.
+
+Each cluster is also assigned a state, but it is necessary to split the
+state value into two parts (the "cluster" state and "inbound" state) and
+to introduce additional states in order to avoid races between different
+CPUs in the cluster simultaneously modifying the state. The cluster-
+level states are described in the "Cluster state" section.
+
+To help distinguish the CPU states from cluster states in this
+discussion, the state names are given a CPU_ prefix for the CPU states,
+and a CLUSTER_ or INBOUND_ prefix for the cluster states.
+
+
+CPU state
+---------
+
+In this algorithm, each individual core in a multi-core processor is
+referred to as a "CPU". CPUs are assumed to be single-threaded:
+therefore, a CPU can only be doing one thing at a single point in time.
+
+This means that CPUs fit the basic model closely.
+
+The algorithm defines the following states for each CPU in the system:
+
+ CPU_DOWN
+ CPU_COMING_UP
+ CPU_UP
+ CPU_GOING_DOWN
+
+ cluster setup and
+ CPU setup complete policy decision
+ +-----------> CPU_UP ------------+
+ | v
+
+ CPU_COMING_UP CPU_GOING_DOWN
+
+ ^ |
+ +----------- CPU_DOWN <----------+
+ policy decision CPU teardown complete
+ or hardware event
+
+
+The definitions of the four states correspond closely to the states of
+the basic model.
+
+Transitions between states occur as follows.
+
+A trigger event (spontaneous) means that the CPU can transition to the
+next state as a result of making local progress only, with no
+requirement for any external event to happen.
+
+
+CPU_DOWN:
+
+ A CPU reaches the CPU_DOWN state when it is ready for
+ power-down. On reaching this state, the CPU will typically
+ power itself down or suspend itself, via a WFI instruction or a
+ firmware call.
+
+ Next state: CPU_COMING_UP
+ Conditions: none
+
+ Trigger events:
+
+ a) an explicit hardware power-up operation, resulting
+ from a policy decision on another CPU;
+
+ b) a hardware event, such as an interrupt.
+
+
+CPU_COMING_UP:
+
+ A CPU cannot start participating in hardware coherency until the
+ cluster is set up and coherent. If the cluster is not ready,
+ then the CPU will wait in the CPU_COMING_UP state until the
+ cluster has been set up.
+
+ Next state: CPU_UP
+ Conditions: The CPU's parent cluster must be in CLUSTER_UP.
+ Trigger events: Transition of the parent cluster to CLUSTER_UP.
+
+ Refer to the "Cluster state" section for a description of the
+ CLUSTER_UP state.
+
+
+CPU_UP:
+ When a CPU reaches the CPU_UP state, it is safe for the CPU to
+ start participating in local coherency.
+
+ This is done by jumping to the kernel's CPU resume code.
+
+ Note that the definition of this state is slightly different
+ from the basic model definition: CPU_UP does not mean that the
+ CPU is coherent yet, but it does mean that it is safe to resume
+ the kernel. The kernel handles the rest of the resume
+ procedure, so the remaining steps are not visible as part of the
+ race avoidance algorithm.
+
+ The CPU remains in this state until an explicit policy decision
+ is made to shut down or suspend the CPU.
+
+ Next state: CPU_GOING_DOWN
+ Conditions: none
+ Trigger events: explicit policy decision
+
+
+CPU_GOING_DOWN:
+
+ While in this state, the CPU exits coherency, including any
+ operations required to achieve this (such as cleaning data
+ caches).
+
+ Next state: CPU_DOWN
+ Conditions: local CPU teardown complete
+ Trigger events: (spontaneous)
+
+
+Cluster state
+-------------
+
+A cluster is a group of connected CPUs with some common resources.
+Because a cluster contains multiple CPUs, it can be doing multiple
+things at the same time. This has some implications. In particular, a
+CPU can start up while another CPU is tearing the cluster down.
+
+In this discussion, the "outbound side" is the view of the cluster state
+as seen by a CPU tearing the cluster down. The "inbound side" is the
+view of the cluster state as seen by a CPU setting the CPU up.
+
+In order to enable safe coordination in such situations, it is important
+that a CPU which is setting up the cluster can advertise its state
+independently of the CPU which is tearing down the cluster. For this
+reason, the cluster state is split into two parts:
+
+ "cluster" state: The global state of the cluster; or the state
+ on the outbound side:
+
+ CLUSTER_DOWN
+ CLUSTER_UP
+ CLUSTER_GOING_DOWN
+
+ "inbound" state: The state of the cluster on the inbound side.
+
+ INBOUND_NOT_COMING_UP
+ INBOUND_COMING_UP
+
+
+ The different pairings of these states results in six possible
+ states for the cluster as a whole:
+
+ CLUSTER_UP
+ +==========> INBOUND_NOT_COMING_UP -------------+
+ # |
+ |
+ CLUSTER_UP <----+ |
+ INBOUND_COMING_UP | v
+
+ ^ CLUSTER_GOING_DOWN CLUSTER_GOING_DOWN
+ # INBOUND_COMING_UP <=== INBOUND_NOT_COMING_UP
+
+ CLUSTER_DOWN | |
+ INBOUND_COMING_UP <----+ |
+ |
+ ^ |
+ +=========== CLUSTER_DOWN <------------+
+ INBOUND_NOT_COMING_UP
+
+ Transitions -----> can only be made by the outbound CPU, and
+ only involve changes to the "cluster" state.
+
+ Transitions ===##> can only be made by the inbound CPU, and only
+ involve changes to the "inbound" state, except where there is no
+ further transition possible on the outbound side (i.e., the
+ outbound CPU has put the cluster into the CLUSTER_DOWN state).
+
+ The race avoidance algorithm does not provide a way to determine
+ which exact CPUs within the cluster play these roles. This must
+ be decided in advance by some other means. Refer to the section
+ "Last man and first man selection" for more explanation.
+
+
+ CLUSTER_DOWN/INBOUND_NOT_COMING_UP is the only state where the
+ cluster can actually be powered down.
+
+ The parallelism of the inbound and outbound CPUs is observed by
+ the existence of two different paths from CLUSTER_GOING_DOWN/
+ INBOUND_NOT_COMING_UP (corresponding to GOING_DOWN in the basic
+ model) to CLUSTER_DOWN/INBOUND_COMING_UP (corresponding to
+ COMING_UP in the basic model). The second path avoids cluster
+ teardown completely.
+
+ CLUSTER_UP/INBOUND_COMING_UP is equivalent to UP in the basic
+ model. The final transition to CLUSTER_UP/INBOUND_NOT_COMING_UP
+ is trivial and merely resets the state machine ready for the
+ next cycle.
+
+ Details of the allowable transitions follow.
+
+ The next state in each case is notated
+
+ <cluster state>/<inbound state> (<transitioner>)
+
+ where the <transitioner> is the side on which the transition
+ can occur; either the inbound or the outbound side.
+
+
+CLUSTER_DOWN/INBOUND_NOT_COMING_UP:
+
+ Next state: CLUSTER_DOWN/INBOUND_COMING_UP (inbound)
+ Conditions: none
+ Trigger events:
+
+ a) an explicit hardware power-up operation, resulting
+ from a policy decision on another CPU;
+
+ b) a hardware event, such as an interrupt.
+
+
+CLUSTER_DOWN/INBOUND_COMING_UP:
+
+ In this state, an inbound CPU sets up the cluster, including
+ enabling of hardware coherency at the cluster level and any
+ other operations (such as cache invalidation) which are required
+ in order to achieve this.
+
+ The purpose of this state is to do sufficient cluster-level
+ setup to enable other CPUs in the cluster to enter coherency
+ safely.
+
+ Next state: CLUSTER_UP/INBOUND_COMING_UP (inbound)
+ Conditions: cluster-level setup and hardware coherency complete
+ Trigger events: (spontaneous)
+
+
+CLUSTER_UP/INBOUND_COMING_UP:
+
+ Cluster-level setup is complete and hardware coherency is
+ enabled for the cluster. Other CPUs in the cluster can safely
+ enter coherency.
+
+ This is a transient state, leading immediately to
+ CLUSTER_UP/INBOUND_NOT_COMING_UP. All other CPUs on the cluster
+ should consider treat these two states as equivalent.
+
+ Next state: CLUSTER_UP/INBOUND_NOT_COMING_UP (inbound)
+ Conditions: none
+ Trigger events: (spontaneous)
+
+
+CLUSTER_UP/INBOUND_NOT_COMING_UP:
+
+ Cluster-level setup is complete and hardware coherency is
+ enabled for the cluster. Other CPUs in the cluster can safely
+ enter coherency.
+
+ The cluster will remain in this state until a policy decision is
+ made to power the cluster down.
+
+ Next state: CLUSTER_GOING_DOWN/INBOUND_NOT_COMING_UP (outbound)
+ Conditions: none
+ Trigger events: policy decision to power down the cluster
+
+
+CLUSTER_GOING_DOWN/INBOUND_NOT_COMING_UP:
+
+ An outbound CPU is tearing the cluster down. The selected CPU
+ must wait in this state until all CPUs in the cluster are in the
+ CPU_DOWN state.
+
+ When all CPUs are in the CPU_DOWN state, the cluster can be torn
+ down, for example by cleaning data caches and exiting
+ cluster-level coherency.
+
+ To avoid wasteful unnecessary teardown operations, the outbound
+ should check the inbound cluster state for asynchronous
+ transitions to INBOUND_COMING_UP. Alternatively, individual
+ CPUs can be checked for entry into CPU_COMING_UP or CPU_UP.
+
+
+ Next states:
+
+ CLUSTER_DOWN/INBOUND_NOT_COMING_UP (outbound)
+ Conditions: cluster torn down and ready to power off
+ Trigger events: (spontaneous)
+
+ CLUSTER_GOING_DOWN/INBOUND_COMING_UP (inbound)
+ Conditions: none
+ Trigger events:
+
+ a) an explicit hardware power-up operation,
+ resulting from a policy decision on another
+ CPU;
+
+ b) a hardware event, such as an interrupt.
+
+
+CLUSTER_GOING_DOWN/INBOUND_COMING_UP:
+
+ The cluster is (or was) being torn down, but another CPU has
+ come online in the meantime and is trying to set up the cluster
+ again.
+
+ If the outbound CPU observes this state, it has two choices:
+
+ a) back out of teardown, restoring the cluster to the
+ CLUSTER_UP state;
+
+ b) finish tearing the cluster down and put the cluster
+ in the CLUSTER_DOWN state; the inbound CPU will
+ set up the cluster again from there.
+
+ Choice (a) permits the removal of some latency by avoiding
+ unnecessary teardown and setup operations in situations where
+ the cluster is not really going to be powered down.
+
+
+ Next states:
+
+ CLUSTER_UP/INBOUND_COMING_UP (outbound)
+ Conditions: cluster-level setup and hardware
+ coherency complete
+ Trigger events: (spontaneous)
+
+ CLUSTER_DOWN/INBOUND_COMING_UP (outbound)
+ Conditions: cluster torn down and ready to power off
+ Trigger events: (spontaneous)
+
+
+Last man and First man selection
+--------------------------------
+
+The CPU which performs cluster tear-down operations on the outbound side
+is commonly referred to as the "last man".
+
+The CPU which performs cluster setup on the inbound side is commonly
+referred to as the "first man".
+
+The race avoidance algorithm documented above does not provide a
+mechanism to choose which CPUs should play these roles.
+
+
+Last man:
+
+When shutting down the cluster, all the CPUs involved are initially
+executing Linux and hence coherent. Therefore, ordinary spinlocks can
+be used to select a last man safely, before the CPUs become
+non-coherent.
+
+
+First man:
+
+Because CPUs may power up asynchronously in response to external wake-up
+events, a dynamic mechanism is needed to make sure that only one CPU
+attempts to play the first man role and do the cluster-level
+initialisation: any other CPUs must wait for this to complete before
+proceeding.
+
+Cluster-level initialisation may involve actions such as configuring
+coherency controls in the bus fabric.
+
+The current implementation in mcpm_head.S uses a separate mutual exclusion
+mechanism to do this arbitration. This mechanism is documented in
+detail in vlocks.txt.
+
+
+Features and Limitations
+------------------------
+
+Implementation:
+
+ The current ARM-based implementation is split between
+ arch/arm/common/mcpm_head.S (low-level inbound CPU operations) and
+ arch/arm/common/mcpm_entry.c (everything else):
+
+ __mcpm_cpu_going_down() signals the transition of a CPU to the
+ CPU_GOING_DOWN state.
+
+ __mcpm_cpu_down() signals the transition of a CPU to the CPU_DOWN
+ state.
+
+ A CPU transitions to CPU_COMING_UP and then to CPU_UP via the
+ low-level power-up code in mcpm_head.S. This could
+ involve CPU-specific setup code, but in the current
+ implementation it does not.
+
+ __mcpm_outbound_enter_critical() and __mcpm_outbound_leave_critical()
+ handle transitions from CLUSTER_UP to CLUSTER_GOING_DOWN
+ and from there to CLUSTER_DOWN or back to CLUSTER_UP (in
+ the case of an aborted cluster power-down).
+
+ These functions are more complex than the __mcpm_cpu_*()
+ functions due to the extra inter-CPU coordination which
+ is needed for safe transitions at the cluster level.
+
+ A cluster transitions from CLUSTER_DOWN back to CLUSTER_UP via
+ the low-level power-up code in mcpm_head.S. This
+ typically involves platform-specific setup code,
+ provided by the platform-specific power_up_setup
+ function registered via mcpm_sync_init.
+
+Deep topologies:
+
+ As currently described and implemented, the algorithm does not
+ support CPU topologies involving more than two levels (i.e.,
+ clusters of clusters are not supported). The algorithm could be
+ extended by replicating the cluster-level states for the
+ additional topological levels, and modifying the transition
+ rules for the intermediate (non-outermost) cluster levels.
+
+
+Colophon
+--------
+
+Originally created and documented by Dave Martin for Linaro Limited, in
+collaboration with Nicolas Pitre and Achin Gupta.
+
+Copyright (C) 2012-2013 Linaro Limited
+Distributed under the terms of Version 2 of the GNU General Public
+License, as defined in linux/COPYING.
diff --git a/Documentation/arm/vlocks.txt b/Documentation/arm/vlocks.txt
new file mode 100644
index 00000000000..415960a9bab
--- /dev/null
+++ b/Documentation/arm/vlocks.txt
@@ -0,0 +1,211 @@
+vlocks for Bare-Metal Mutual Exclusion
+======================================
+
+Voting Locks, or "vlocks" provide a simple low-level mutual exclusion
+mechanism, with reasonable but minimal requirements on the memory
+system.
+
+These are intended to be used to coordinate critical activity among CPUs
+which are otherwise non-coherent, in situations where the hardware
+provides no other mechanism to support this and ordinary spinlocks
+cannot be used.
+
+
+vlocks make use of the atomicity provided by the memory system for
+writes to a single memory location. To arbitrate, every CPU "votes for
+itself", by storing a unique number to a common memory location. The
+final value seen in that memory location when all the votes have been
+cast identifies the winner.
+
+In order to make sure that the election produces an unambiguous result
+in finite time, a CPU will only enter the election in the first place if
+no winner has been chosen and the election does not appear to have
+started yet.
+
+
+Algorithm
+---------
+
+The easiest way to explain the vlocks algorithm is with some pseudo-code:
+
+
+ int currently_voting[NR_CPUS] = { 0, };
+ int last_vote = -1; /* no votes yet */
+
+ bool vlock_trylock(int this_cpu)
+ {
+ /* signal our desire to vote */
+ currently_voting[this_cpu] = 1;
+ if (last_vote != -1) {
+ /* someone already volunteered himself */
+ currently_voting[this_cpu] = 0;
+ return false; /* not ourself */
+ }
+
+ /* let's suggest ourself */
+ last_vote = this_cpu;
+ currently_voting[this_cpu] = 0;
+
+ /* then wait until everyone else is done voting */
+ for_each_cpu(i) {
+ while (currently_voting[i] != 0)
+ /* wait */;
+ }
+
+ /* result */
+ if (last_vote == this_cpu)
+ return true; /* we won */
+ return false;
+ }
+
+ bool vlock_unlock(void)
+ {
+ last_vote = -1;
+ }
+
+
+The currently_voting[] array provides a way for the CPUs to determine
+whether an election is in progress, and plays a role analogous to the
+"entering" array in Lamport's bakery algorithm [1].
+
+However, once the election has started, the underlying memory system
+atomicity is used to pick the winner. This avoids the need for a static
+priority rule to act as a tie-breaker, or any counters which could
+overflow.
+
+As long as the last_vote variable is globally visible to all CPUs, it
+will contain only one value that won't change once every CPU has cleared
+its currently_voting flag.
+
+
+Features and limitations
+------------------------
+
+ * vlocks are not intended to be fair. In the contended case, it is the
+ _last_ CPU which attempts to get the lock which will be most likely
+ to win.
+
+ vlocks are therefore best suited to situations where it is necessary
+ to pick a unique winner, but it does not matter which CPU actually
+ wins.
+
+ * Like other similar mechanisms, vlocks will not scale well to a large
+ number of CPUs.
+
+ vlocks can be cascaded in a voting hierarchy to permit better scaling
+ if necessary, as in the following hypothetical example for 4096 CPUs:
+
+ /* first level: local election */
+ my_town = towns[(this_cpu >> 4) & 0xf];
+ I_won = vlock_trylock(my_town, this_cpu & 0xf);
+ if (I_won) {
+ /* we won the town election, let's go for the state */
+ my_state = states[(this_cpu >> 8) & 0xf];
+ I_won = vlock_lock(my_state, this_cpu & 0xf));
+ if (I_won) {
+ /* and so on */
+ I_won = vlock_lock(the_whole_country, this_cpu & 0xf];
+ if (I_won) {
+ /* ... */
+ }
+ vlock_unlock(the_whole_country);
+ }
+ vlock_unlock(my_state);
+ }
+ vlock_unlock(my_town);
+
+
+ARM implementation
+------------------
+
+The current ARM implementation [2] contains some optimisations beyond
+the basic algorithm:
+
+ * By packing the members of the currently_voting array close together,
+ we can read the whole array in one transaction (providing the number
+ of CPUs potentially contending the lock is small enough). This
+ reduces the number of round-trips required to external memory.
+
+ In the ARM implementation, this means that we can use a single load
+ and comparison:
+
+ LDR Rt, [Rn]
+ CMP Rt, #0
+
+ ...in place of code equivalent to:
+
+ LDRB Rt, [Rn]
+ CMP Rt, #0
+ LDRBEQ Rt, [Rn, #1]
+ CMPEQ Rt, #0
+ LDRBEQ Rt, [Rn, #2]
+ CMPEQ Rt, #0
+ LDRBEQ Rt, [Rn, #3]
+ CMPEQ Rt, #0
+
+ This cuts down on the fast-path latency, as well as potentially
+ reducing bus contention in contended cases.
+
+ The optimisation relies on the fact that the ARM memory system
+ guarantees coherency between overlapping memory accesses of
+ different sizes, similarly to many other architectures. Note that
+ we do not care which element of currently_voting appears in which
+ bits of Rt, so there is no need to worry about endianness in this
+ optimisation.
+
+ If there are too many CPUs to read the currently_voting array in
+ one transaction then multiple transations are still required. The
+ implementation uses a simple loop of word-sized loads for this
+ case. The number of transactions is still fewer than would be
+ required if bytes were loaded individually.
+
+
+ In principle, we could aggregate further by using LDRD or LDM, but
+ to keep the code simple this was not attempted in the initial
+ implementation.
+
+
+ * vlocks are currently only used to coordinate between CPUs which are
+ unable to enable their caches yet. This means that the
+ implementation removes many of the barriers which would be required
+ when executing the algorithm in cached memory.
+
+ packing of the currently_voting array does not work with cached
+ memory unless all CPUs contending the lock are cache-coherent, due
+ to cache writebacks from one CPU clobbering values written by other
+ CPUs. (Though if all the CPUs are cache-coherent, you should be
+ probably be using proper spinlocks instead anyway).
+
+
+ * The "no votes yet" value used for the last_vote variable is 0 (not
+ -1 as in the pseudocode). This allows statically-allocated vlocks
+ to be implicitly initialised to an unlocked state simply by putting
+ them in .bss.
+
+ An offset is added to each CPU's ID for the purpose of setting this
+ variable, so that no CPU uses the value 0 for its ID.
+
+
+Colophon
+--------
+
+Originally created and documented by Dave Martin for Linaro Limited, for
+use in ARM-based big.LITTLE platforms, with review and input gratefully
+received from Nicolas Pitre and Achin Gupta. Thanks to Nicolas for
+grabbing most of this text out of the relevant mail thread and writing
+up the pseudocode.
+
+Copyright (C) 2012-2013 Linaro Limited
+Distributed under the terms of Version 2 of the GNU General Public
+License, as defined in linux/COPYING.
+
+
+References
+----------
+
+[1] Lamport, L. "A New Solution of Dijkstra's Concurrent Programming
+ Problem", Communications of the ACM 17, 8 (August 1974), 453-455.
+
+ http://en.wikipedia.org/wiki/Lamport%27s_bakery_algorithm
+
+[2] linux/arch/arm/common/vlock.S, www.kernel.org.
diff --git a/Documentation/cpu-freq/cpufreq-arm-bl.txt b/Documentation/cpu-freq/cpufreq-arm-bl.txt
new file mode 100644
index 00000000000..52e2f3ad761
--- /dev/null
+++ b/Documentation/cpu-freq/cpufreq-arm-bl.txt
@@ -0,0 +1,47 @@
+Synchronous cluster switching interface for the ARM big.LITTLE switcher
+-----------------------------------------------------------------------
+
+The arm-bl-cpufreq driver provides a simple interface which models two
+clusters as two performance points.
+
+Within each CPU's cpufreq directory in sysfs
+(/sys/devices/system/cpu/cpu?/cpufreq/):
+
+cpuinfo_max_freq:
+
+ reports the dummy frequency value which corresponds to the "big"
+ cluster.
+
+cpuinfo_min_freq:
+
+ reports the dummy frequency value which corresponds to the
+ "little" cluster.
+
+cpuinfo_cur_freq:
+
+ reports the dummy frequency corresponding to the currently
+ running cluster.
+
+
+To switch clusters, either the built-in "powersave" or "performance"
+governors can be used to force the "little" or "big" cluster
+respectively; or alternatively the "userspace" governor can be used,
+
+The following script fragment demonstrates how the userspace governor
+can be used to switch:
+
+
+for x in /sys/devices/system/cpu/cpu[0-9]*; do
+ echo userspace >$x/cpufreq/scaling_governor
+done
+
+big_freq=`cat /sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq`
+little_freq=`cat /sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_min_freq`
+
+switch_to_big () {
+ echo $big_freq >/sys/devices/system/cpu/cpu0/cpufreq/scaling_setspeed
+}
+
+switch_to_little () {
+ echo $little_freq >/sys/devices/system/cpu/cpu0/cpufreq/scaling_setspeed
+}
diff --git a/Documentation/devicetree/bindings/arm/pmu.txt b/Documentation/devicetree/bindings/arm/pmu.txt
index 343781b9f24..4ce82d045a6 100644
--- a/Documentation/devicetree/bindings/arm/pmu.txt
+++ b/Documentation/devicetree/bindings/arm/pmu.txt
@@ -16,6 +16,9 @@ Required properties:
"arm,arm1176-pmu"
"arm,arm1136-pmu"
- interrupts : 1 combined interrupt or 1 per core.
+- cluster : a phandle to the cluster to which it belongs
+ If there are more than one cluster with same CPU type
+ then there should be separate PMU nodes per cluster.
Example:
diff --git a/Documentation/devicetree/bindings/arm/rtsm-dcscb.txt b/Documentation/devicetree/bindings/arm/rtsm-dcscb.txt
new file mode 100644
index 00000000000..3b8fbf3c00c
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/rtsm-dcscb.txt
@@ -0,0 +1,19 @@
+ARM Dual Cluster System Configuration Block
+-------------------------------------------
+
+The Dual Cluster System Configuration Block (DCSCB) provides basic
+functionality for controlling clocks, resets and configuration pins in
+the Dual Cluster System implemented by the Real-Time System Model (RTSM).
+
+Required properties:
+
+- compatible : should be "arm,rtsm,dcscb"
+
+- reg : physical base address and the size of the registers window
+
+Example:
+
+ dcscb@60000000 {
+ compatible = "arm,rtsm,dcscb";
+ reg = <0x60000000 0x1000>;
+ };
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 8ccbf27aead..69e11cfa94e 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1212,6 +1212,15 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
See comment before ip2_setup() in
drivers/char/ip2/ip2base.c.
+ irqaffinity= [SMP] Set the default irq affinity mask
+ Format:
+ <cpu number>,...,<cpu number>
+ or
+ <cpu number>-<cpu number>
+ (must be a positive range in ascending order)
+ or a mixture
+ <cpu number>,...,<cpu number>-<cpu number>
+
irqfixup [HW]
When an interrupt is not handled search all handlers
for it. Intended to get systems with badly broken
diff --git a/Documentation/powerpc/transactional_memory.txt b/Documentation/powerpc/transactional_memory.txt
index c907be41d60..dc23e58ae26 100644
--- a/Documentation/powerpc/transactional_memory.txt
+++ b/Documentation/powerpc/transactional_memory.txt
@@ -147,6 +147,25 @@ Example signal handler:
fix_the_problem(ucp->dar);
}
+When in an active transaction that takes a signal, we need to be careful with
+the stack. It's possible that the stack has moved back up after the tbegin.
+The obvious case here is when the tbegin is called inside a function that
+returns before a tend. In this case, the stack is part of the checkpointed
+transactional memory state. If we write over this non transactionally or in
+suspend, we are in trouble because if we get a tm abort, the program counter and
+stack pointer will be back at the tbegin but our in memory stack won't be valid
+anymore.
+
+To avoid this, when taking a signal in an active transaction, we need to use
+the stack pointer from the checkpointed state, rather than the speculated
+state. This ensures that the signal context (written tm suspended) will be
+written below the stack required for the rollback. The transaction is aborted
+becuase of the treclaim, so any memory written between the tbegin and the
+signal will be rolled back anyway.
+
+For signals taken in non-TM or suspended mode, we use the
+normal/non-checkpointed stack pointer.
+
Failure cause codes used by kernel
==================================
@@ -155,14 +174,18 @@ These are defined in <asm/reg.h>, and distinguish different reasons why the
kernel aborted a transaction:
TM_CAUSE_RESCHED Thread was rescheduled.
+ TM_CAUSE_TLBI Software TLB invalide.
TM_CAUSE_FAC_UNAV FP/VEC/VSX unavailable trap.
TM_CAUSE_SYSCALL Currently unused; future syscalls that must abort
transactions for consistency will use this.
TM_CAUSE_SIGNAL Signal delivered.
TM_CAUSE_MISC Currently unused.
+ TM_CAUSE_ALIGNMENT Alignment fault.
+ TM_CAUSE_EMULATE Emulation that touched memory.
-These can be checked by the user program's abort handler as TEXASR[0:7].
-
+These can be checked by the user program's abort handler as TEXASR[0:7]. If
+bit 7 is set, it indicates that the error is consider persistent. For example
+a TM_CAUSE_ALIGNMENT will be persistent while a TM_CAUSE_RESCHED will not.q
GDB
===
diff --git a/Makefile b/Makefile
index 8fe69916e72..4a4030737be 100644
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
VERSION = 3
PATCHLEVEL = 9
-SUBLEVEL = 0
+SUBLEVEL = 6
EXTRAVERSION =
-NAME = Unicycling Gorilla
+NAME = Black Squirrel Wakeup Call
# *DOCUMENTATION*
# To see a list of typical targets execute "make help"
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
index b7e36684c09..8ca472c2f5c 100644
--- a/arch/arc/include/asm/pgtable.h
+++ b/arch/arc/include/asm/pgtable.h
@@ -57,9 +57,9 @@
#define _PAGE_ACCESSED (1<<1) /* Page is accessed (S) */
#define _PAGE_CACHEABLE (1<<2) /* Page is cached (H) */
-#define _PAGE_EXECUTE (1<<3) /* Page has user execute perm (H) */
-#define _PAGE_WRITE (1<<4) /* Page has user write perm (H) */
-#define _PAGE_READ (1<<5) /* Page has user read perm (H) */
+#define _PAGE_U_EXECUTE (1<<3) /* Page has user execute perm (H) */
+#define _PAGE_U_WRITE (1<<4) /* Page has user write perm (H) */
+#define _PAGE_U_READ (1<<5) /* Page has user read perm (H) */
#define _PAGE_K_EXECUTE (1<<6) /* Page has kernel execute perm (H) */
#define _PAGE_K_WRITE (1<<7) /* Page has kernel write perm (H) */
#define _PAGE_K_READ (1<<8) /* Page has kernel perm (H) */
@@ -72,9 +72,9 @@
/* PD1 */
#define _PAGE_CACHEABLE (1<<0) /* Page is cached (H) */
-#define _PAGE_EXECUTE (1<<1) /* Page has user execute perm (H) */
-#define _PAGE_WRITE (1<<2) /* Page has user write perm (H) */
-#define _PAGE_READ (1<<3) /* Page has user read perm (H) */
+#define _PAGE_U_EXECUTE (1<<1) /* Page has user execute perm (H) */
+#define _PAGE_U_WRITE (1<<2) /* Page has user write perm (H) */
+#define _PAGE_U_READ (1<<3) /* Page has user read perm (H) */
#define _PAGE_K_EXECUTE (1<<4) /* Page has kernel execute perm (H) */
#define _PAGE_K_WRITE (1<<5) /* Page has kernel write perm (H) */
#define _PAGE_K_READ (1<<6) /* Page has kernel perm (H) */
@@ -93,7 +93,8 @@
#endif
/* Kernel allowed all permissions for all pages */
-#define _K_PAGE_PERMS (_PAGE_K_EXECUTE | _PAGE_K_WRITE | _PAGE_K_READ)
+#define _K_PAGE_PERMS (_PAGE_K_EXECUTE | _PAGE_K_WRITE | _PAGE_K_READ | \
+ _PAGE_GLOBAL | _PAGE_PRESENT)
#ifdef CONFIG_ARC_CACHE_PAGES
#define _PAGE_DEF_CACHEABLE _PAGE_CACHEABLE
@@ -106,7 +107,11 @@
* -by default cached, unless config otherwise
* -present in memory
*/
-#define ___DEF (_PAGE_PRESENT | _K_PAGE_PERMS | _PAGE_DEF_CACHEABLE)
+#define ___DEF (_PAGE_PRESENT | _PAGE_DEF_CACHEABLE)
+
+#define _PAGE_READ (_PAGE_U_READ | _PAGE_K_READ)
+#define _PAGE_WRITE (_PAGE_U_WRITE | _PAGE_K_WRITE)
+#define _PAGE_EXECUTE (_PAGE_U_EXECUTE | _PAGE_K_EXECUTE)
/* Set of bits not changed in pte_modify */
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED)
@@ -125,11 +130,10 @@
* kernel vaddr space - visible in all addr spaces, but kernel mode only
* Thus Global, all-kernel-access, no-user-access, cached
*/
-#define PAGE_KERNEL __pgprot(___DEF | _PAGE_GLOBAL)
+#define PAGE_KERNEL __pgprot(_K_PAGE_PERMS | _PAGE_DEF_CACHEABLE)
/* ioremap */
-#define PAGE_KERNEL_NO_CACHE __pgprot(_PAGE_PRESENT | _K_PAGE_PERMS | \
- _PAGE_GLOBAL)
+#define PAGE_KERNEL_NO_CACHE __pgprot(_K_PAGE_PERMS)
/**************************************************************************
* Mapping of vm_flags (Generic VM) to PTE flags (arch specific)
diff --git a/arch/arc/include/asm/tlb.h b/arch/arc/include/asm/tlb.h
index 3eb2ce0bdfa..5e0ee3003e2 100644
--- a/arch/arc/include/asm/tlb.h
+++ b/arch/arc/include/asm/tlb.h
@@ -16,7 +16,7 @@
/* Masks for actual TLB "PD"s */
#define PTE_BITS_IN_PD0 (_PAGE_GLOBAL | _PAGE_PRESENT)
#define PTE_BITS_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE | \
- _PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ | \
+ _PAGE_U_EXECUTE | _PAGE_U_WRITE | _PAGE_U_READ | \
_PAGE_K_EXECUTE | _PAGE_K_WRITE | _PAGE_K_READ)
#ifndef __ASSEMBLY__
diff --git a/arch/arc/mm/tlbex.S b/arch/arc/mm/tlbex.S
index 9df765dc7c3..3357d26ffe5 100644
--- a/arch/arc/mm/tlbex.S
+++ b/arch/arc/mm/tlbex.S
@@ -277,7 +277,7 @@ ARC_ENTRY EV_TLBMissI
;----------------------------------------------------------------
; VERIFY_PTE: Check if PTE permissions approp for executing code
cmp_s r2, VMALLOC_START
- mov.lo r2, (_PAGE_PRESENT | _PAGE_READ | _PAGE_EXECUTE)
+ mov.lo r2, (_PAGE_PRESENT | _PAGE_U_READ | _PAGE_U_EXECUTE)
mov.hs r2, (_PAGE_PRESENT | _PAGE_K_READ | _PAGE_K_EXECUTE)
and r3, r0, r2 ; Mask out NON Flag bits from PTE
@@ -320,9 +320,9 @@ ARC_ENTRY EV_TLBMissD
mov_s r2, 0
lr r3, [ecr]
btst_s r3, ECR_C_BIT_DTLB_LD_MISS ; Read Access
- or.nz r2, r2, _PAGE_READ ; chk for Read flag in PTE
+ or.nz r2, r2, _PAGE_U_READ ; chk for Read flag in PTE
btst_s r3, ECR_C_BIT_DTLB_ST_MISS ; Write Access
- or.nz r2, r2, _PAGE_WRITE ; chk for Write flag in PTE
+ or.nz r2, r2, _PAGE_U_WRITE ; chk for Write flag in PTE
; Above laddering takes care of XCHG access
; which is both Read and Write
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 8468902333a..366658cf7cb 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1588,6 +1588,91 @@ config SCHED_SMT
MultiThreading at a cost of slightly increased overhead in some
places. If unsure say N here.
+config DISABLE_CPU_SCHED_DOMAIN_BALANCE
+ bool "(EXPERIMENTAL) Disable CPU level scheduler load-balancing"
+ help
+ Disables scheduler load-balancing at CPU sched domain level.
+
+config SCHED_HMP
+ bool "(EXPERIMENTAL) Heterogenous multiprocessor scheduling"
+ depends on DISABLE_CPU_SCHED_DOMAIN_BALANCE && SCHED_MC && FAIR_GROUP_SCHED && !SCHED_AUTOGROUP
+ help
+ Experimental scheduler optimizations for heterogeneous platforms.
+ Attempts to introspectively select task affinity to optimize power
+ and performance. Basic support for multiple (>2) cpu types is in place,
+ but it has only been tested with two types of cpus.
+ There is currently no support for migration of task groups, hence
+ !SCHED_AUTOGROUP. Furthermore, normal load-balancing must be disabled
+ between cpus of different type (DISABLE_CPU_SCHED_DOMAIN_BALANCE).
+
+config SCHED_HMP_PRIO_FILTER
+ bool "(EXPERIMENTAL) Filter HMP migrations by task priority"
+ depends on SCHED_HMP
+ default y
+ help
+ Enables task priority based HMP migration filter. Any task with
+ a NICE value above the threshold will always be on low-power cpus
+ with less compute capacity.
+
+config SCHED_HMP_PRIO_FILTER_VAL
+ int "NICE priority threshold"
+ default 5
+ depends on SCHED_HMP_PRIO_FILTER
+
+config HMP_FAST_CPU_MASK
+ string "HMP scheduler fast CPU mask"
+ depends on SCHED_HMP
+ help
+ Leave empty to use device tree information.
+ Specify the cpuids of the fast CPUs in the system as a list string,
+ e.g. cpuid 0+1 should be specified as 0-1.
+
+config HMP_SLOW_CPU_MASK
+ string "HMP scheduler slow CPU mask"
+ depends on SCHED_HMP
+ help
+ Leave empty to use device tree information.
+ Specify the cpuids of the slow CPUs in the system as a list string,
+ e.g. cpuid 0+1 should be specified as 0-1.
+
+config HMP_VARIABLE_SCALE
+ bool "Allows changing the load tracking scale through sysfs"
+ depends on SCHED_HMP
+ help
+ When turned on, this option exports the thresholds and load average
+ period value for the load tracking patches through sysfs.
+ The values can be modified to change the rate of load accumulation
+ and the thresholds used for HMP migration.
+ The load_avg_period_ms is the time in ms to reach a load average of
+ 0.5 for an idle task of 0 load average ratio that start a busy loop.
+ The up_threshold and down_threshold is the value to go to a faster
+ CPU or to go back to a slower cpu.
+ The {up,down}_threshold are devided by 1024 before being compared
+ to the load average.
+ For examples, with load_avg_period_ms = 128 and up_threshold = 512,
+ a running task with a load of 0 will be migrated to a bigger CPU after
+ 128ms, because after 128ms its load_avg_ratio is 0.5 and the real
+ up_threshold is 0.5.
+ This patch has the same behavior as changing the Y of the load
+ average computation to
+ (1002/1024)^(LOAD_AVG_PERIOD/load_avg_period_ms)
+ but it remove intermadiate overflows in computation.
+
+config HMP_FREQUENCY_INVARIANT_SCALE
+ bool "(EXPERIMENTAL) Frequency-Invariant Tracked Load for HMP"
+ depends on HMP_VARIABLE_SCALE && CPU_FREQ
+ help
+ Scales the current load contribution in line with the frequency
+ of the CPU that the task was executed on.
+ In this version, we use a simple linear scale derived from the
+ maximum frequency reported by CPUFreq.
+ Restricting tracked load to be scaled by the CPU's frequency
+ represents the consumption of possible compute capacity
+ (rather than consumption of actual instantaneous capacity as
+ normal) and allows the HMP migration's simple threshold
+ migration strategy to interact more predictably with CPUFreq's
+ asynchronous compute capacity changes.
+
config HAVE_ARM_SCU
bool
help
@@ -1606,6 +1691,39 @@ config HAVE_ARM_TWD
help
This options enables support for the ARM timer and watchdog unit
+config MCPM
+ bool "Multi-Cluster Power Management"
+ depends on CPU_V7 && SMP
+ help
+ This option provides the common power management infrastructure
+ for (multi-)cluster based systems, such as big.LITTLE based
+ systems.
+
+config BIG_LITTLE
+ bool "big.LITTLE support (Experimental)"
+ depends on CPU_V7 && SMP
+ select MCPM
+ help
+ This option enables support for the big.LITTLE architecture.
+
+config BL_SWITCHER
+ bool "big.LITTLE switcher support"
+ depends on BIG_LITTLE && MCPM && HOTPLUG_CPU
+ select CPU_PM
+ select ARM_CPU_SUSPEND
+ help
+ The big.LITTLE "switcher" provides the core functionality to
+ transparently handle transition between a cluster of A15's
+ and a cluster of A7's in a big.LITTLE system.
+
+config BL_SWITCHER_DUMMY_IF
+ tristate "Simple big.LITTLE switcher user interface"
+ depends on BL_SWITCHER && DEBUG_KERNEL
+ help
+ This is a simple and dummy char dev interface to control
+ the big.LITTLE switcher core code. It is meant for
+ debugging purposes only.
+
choice
prompt "Memory split"
default VMSPLIT_3G
@@ -1693,8 +1811,9 @@ config SCHED_HRTICK
def_bool HIGH_RES_TIMERS
config THUMB2_KERNEL
- bool "Compile the kernel in Thumb-2 mode"
+ bool "Compile the kernel in Thumb-2 mode" if !CPU_THUMBONLY
depends on CPU_V7 && !CPU_V6 && !CPU_V6K
+ default y if CPU_THUMBONLY
select AEABI
select ARM_ASM_UNIFIED
select ARM_UNWIND
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index afed28e37ea..27f36042134 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -121,7 +121,7 @@ KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
endif
ccflags-y := -fpic -mno-single-pic-base -fno-builtin -I$(obj)
-asflags-y := -Wa,-march=all -DZIMAGE
+asflags-y := -DZIMAGE
# Supply kernel BSS size to the decompressor via a linker symbol.
KBSS_SZ = $(shell $(CROSS_COMPILE)size $(obj)/../../../../vmlinux | \
diff --git a/arch/arm/boot/compressed/head-sa1100.S b/arch/arm/boot/compressed/head-sa1100.S
index 6179d94dd5c..3115e313d9f 100644
--- a/arch/arm/boot/compressed/head-sa1100.S
+++ b/arch/arm/boot/compressed/head-sa1100.S
@@ -11,6 +11,7 @@
#include <asm/mach-types.h>
.section ".start", "ax"
+ .arch armv4
__SA1100_start:
diff --git a/arch/arm/boot/compressed/head-shark.S b/arch/arm/boot/compressed/head-shark.S
index 089c560e07f..92b56897ed6 100644
--- a/arch/arm/boot/compressed/head-shark.S
+++ b/arch/arm/boot/compressed/head-shark.S
@@ -18,6 +18,7 @@
.section ".start", "ax"
+ .arch armv4
b __beginning
__ofw_data: .long 0 @ the number of memory blocks
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 9a94f344df4..2581642d64e 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -11,6 +11,7 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
+ .arch armv7-a
/*
* Debugging stuff
*
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index 9c6255884cb..2fc92bf10cc 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -169,7 +169,14 @@ dtb-$(CONFIG_ARCH_VEXPRESS) += vexpress-v2p-ca5s.dtb \
vexpress-v2p-ca9.dtb \
vexpress-v2p-ca15-tc1.dtb \
vexpress-v2p-ca15_a7.dtb \
- xenvm-4.2.dtb
+ xenvm-4.2.dtb \
+ rtsm_ve-cortex_a9x2.dtb \
+ rtsm_ve-cortex_a9x4.dtb \
+ rtsm_ve-cortex_a15x1.dtb \
+ rtsm_ve-cortex_a15x2.dtb \
+ rtsm_ve-cortex_a15x4.dtb \
+ rtsm_ve-v2p-ca15x1-ca7x1.dtb \
+ rtsm_ve-v2p-ca15x4-ca7x4.dtb
dtb-$(CONFIG_ARCH_VT8500) += vt8500-bv07.dtb \
wm8505-ref.dtb \
wm8650-mid.dtb \
diff --git a/arch/arm/boot/dts/at91sam9260.dtsi b/arch/arm/boot/dts/at91sam9260.dtsi
index cb7bcc51608..755a61e8ca8 100644
--- a/arch/arm/boot/dts/at91sam9260.dtsi
+++ b/arch/arm/boot/dts/at91sam9260.dtsi
@@ -158,8 +158,8 @@
usart1 {
pinctrl_usart1: usart1-0 {
atmel,pins =
- <2 6 0x1 0x1 /* PB6 periph A with pullup */
- 2 7 0x1 0x0>; /* PB7 periph A */
+ <1 6 0x1 0x1 /* PB6 periph A with pullup */
+ 1 7 0x1 0x0>; /* PB7 periph A */
};
pinctrl_usart1_rts: usart1_rts-0 {
@@ -194,18 +194,18 @@
usart3 {
pinctrl_usart3: usart3-0 {
atmel,pins =
- <2 10 0x1 0x1 /* PB10 periph A with pullup */
- 2 11 0x1 0x0>; /* PB11 periph A */
+ <1 10 0x1 0x1 /* PB10 periph A with pullup */
+ 1 11 0x1 0x0>; /* PB11 periph A */
};
pinctrl_usart3_rts: usart3_rts-0 {
atmel,pins =
- <3 8 0x2 0x0>; /* PB8 periph B */
+ <2 8 0x2 0x0>; /* PC8 periph B */
};
pinctrl_usart3_cts: usart3_cts-0 {
atmel,pins =
- <3 10 0x2 0x0>; /* PB10 periph B */
+ <2 10 0x2 0x0>; /* PC10 periph B */
};
};
@@ -220,8 +220,8 @@
uart1 {
pinctrl_uart1: uart1-0 {
atmel,pins =
- <2 12 0x1 0x1 /* PB12 periph A with pullup */
- 2 13 0x1 0x0>; /* PB13 periph A */
+ <1 12 0x1 0x1 /* PB12 periph A with pullup */
+ 1 13 0x1 0x0>; /* PB13 periph A */
};
};
@@ -264,7 +264,7 @@
atmel,pins =
<0 10 0x2 0x0 /* PA10 periph B */
0 11 0x2 0x0 /* PA11 periph B */
- 0 24 0x2 0x0 /* PA24 periph B */
+ 0 22 0x2 0x0 /* PA22 periph B */
0 25 0x2 0x0 /* PA25 periph B */
0 26 0x2 0x0 /* PA26 periph B */
0 27 0x2 0x0 /* PA27 periph B */
diff --git a/arch/arm/boot/dts/at91sam9g15.dtsi b/arch/arm/boot/dts/at91sam9g15.dtsi
index fbe7a7089c2..28467fd6bf9 100644
--- a/arch/arm/boot/dts/at91sam9g15.dtsi
+++ b/arch/arm/boot/dts/at91sam9g15.dtsi
@@ -10,7 +10,7 @@
/ {
model = "Atmel AT91SAM9G15 SoC";
- compatible = "atmel, at91sam9g15, atmel,at91sam9x5";
+ compatible = "atmel,at91sam9g15", "atmel,at91sam9x5";
ahb {
apb {
diff --git a/arch/arm/boot/dts/at91sam9g15ek.dts b/arch/arm/boot/dts/at91sam9g15ek.dts
index 86dd3f6d938..5427b2dba87 100644
--- a/arch/arm/boot/dts/at91sam9g15ek.dts
+++ b/arch/arm/boot/dts/at91sam9g15ek.dts
@@ -11,6 +11,6 @@
/include/ "at91sam9x5ek.dtsi"
/ {
- model = "Atmel AT91SAM9G25-EK";
+ model = "Atmel AT91SAM9G15-EK";
compatible = "atmel,at91sam9g15ek", "atmel,at91sam9x5ek", "atmel,at91sam9x5", "atmel,at91sam9";
};
diff --git a/arch/arm/boot/dts/at91sam9g25.dtsi b/arch/arm/boot/dts/at91sam9g25.dtsi
index 05a718fb83c..5fd32df03f2 100644
--- a/arch/arm/boot/dts/at91sam9g25.dtsi
+++ b/arch/arm/boot/dts/at91sam9g25.dtsi
@@ -10,7 +10,7 @@
/ {
model = "Atmel AT91SAM9G25 SoC";
- compatible = "atmel, at91sam9g25, atmel,at91sam9x5";
+ compatible = "atmel,at91sam9g25", "atmel,at91sam9x5";
ahb {
apb {
diff --git a/arch/arm/boot/dts/at91sam9g35.dtsi b/arch/arm/boot/dts/at91sam9g35.dtsi
index f9d14a72279..d6fa8af5072 100644
--- a/arch/arm/boot/dts/at91sam9g35.dtsi
+++ b/arch/arm/boot/dts/at91sam9g35.dtsi
@@ -10,7 +10,7 @@
/ {
model = "Atmel AT91SAM9G35 SoC";
- compatible = "atmel, at91sam9g35, atmel,at91sam9x5";
+ compatible = "atmel,at91sam9g35", "atmel,at91sam9x5";
ahb {
apb {
diff --git a/arch/arm/boot/dts/at91sam9x25.dtsi b/arch/arm/boot/dts/at91sam9x25.dtsi
index 54eb33ba6d2..9ac2bc2b4f0 100644
--- a/arch/arm/boot/dts/at91sam9x25.dtsi
+++ b/arch/arm/boot/dts/at91sam9x25.dtsi
@@ -10,7 +10,7 @@
/ {
model = "Atmel AT91SAM9X25 SoC";
- compatible = "atmel, at91sam9x25, atmel,at91sam9x5";
+ compatible = "atmel,at91sam9x25", "atmel,at91sam9x5";
ahb {
apb {
diff --git a/arch/arm/boot/dts/at91sam9x25ek.dts b/arch/arm/boot/dts/at91sam9x25ek.dts
index af907eaa1f2..80015b0d4c1 100644
--- a/arch/arm/boot/dts/at91sam9x25ek.dts
+++ b/arch/arm/boot/dts/at91sam9x25ek.dts
@@ -11,6 +11,6 @@
/include/ "at91sam9x5ek.dtsi"
/ {
- model = "Atmel AT91SAM9G25-EK";
+ model = "Atmel AT91SAM9X25-EK";
compatible = "atmel,at91sam9x25ek", "atmel,at91sam9x5ek", "atmel,at91sam9x5", "atmel,at91sam9";
};
diff --git a/arch/arm/boot/dts/at91sam9x35.dtsi b/arch/arm/boot/dts/at91sam9x35.dtsi
index fb102d6126c..ba67d83d17a 100644
--- a/arch/arm/boot/dts/at91sam9x35.dtsi
+++ b/arch/arm/boot/dts/at91sam9x35.dtsi
@@ -10,7 +10,7 @@
/ {
model = "Atmel AT91SAM9X35 SoC";
- compatible = "atmel, at91sam9x35, atmel,at91sam9x5";
+ compatible = "atmel,at91sam9x35", "atmel,at91sam9x5";
ahb {
apb {
diff --git a/arch/arm/boot/dts/at91sam9x5ek.dtsi b/arch/arm/boot/dts/at91sam9x5ek.dtsi
index 8a7cf1d9cf5..ccab2568b0d 100644
--- a/arch/arm/boot/dts/at91sam9x5ek.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5ek.dtsi
@@ -13,7 +13,7 @@
compatible = "atmel,at91sam9x5ek", "atmel,at91sam9x5", "atmel,at91sam9";
chosen {
- bootargs = "128M console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=ubifs ubi.mtd=1 root=ubi0:rootfs";
+ bootargs = "console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=ubifs ubi.mtd=1 root=ubi0:rootfs";
};
ahb {
diff --git a/arch/arm/boot/dts/clcd-panels.dtsi b/arch/arm/boot/dts/clcd-panels.dtsi
new file mode 100644
index 00000000000..0b0ff6ead4b
--- /dev/null
+++ b/arch/arm/boot/dts/clcd-panels.dtsi
@@ -0,0 +1,52 @@
+/*
+ * ARM Ltd. Versatile Express
+ *
+ */
+
+/ {
+ panels {
+ panel@0 {
+ compatible = "panel";
+ mode = "VGA";
+ refresh = <60>;
+ xres = <640>;
+ yres = <480>;
+ pixclock = <39721>;
+ left_margin = <40>;
+ right_margin = <24>;
+ upper_margin = <32>;
+ lower_margin = <11>;
+ hsync_len = <96>;
+ vsync_len = <2>;
+ sync = <0>;
+ vmode = "FB_VMODE_NONINTERLACED";
+
+ tim2 = "TIM2_BCD", "TIM2_IPC";
+ cntl = "CNTL_LCDTFT", "CNTL_BGR", "CNTL_LCDVCOMP(1)";
+ caps = "CLCD_CAP_5551", "CLCD_CAP_565", "CLCD_CAP_888";
+ bpp = <16>;
+ };
+
+ panel@1 {
+ compatible = "panel";
+ mode = "XVGA";
+ refresh = <60>;
+ xres = <1024>;
+ yres = <768>;
+ pixclock = <15748>;
+ left_margin = <152>;
+ right_margin = <48>;
+ upper_margin = <23>;
+ lower_margin = <3>;
+ hsync_len = <104>;
+ vsync_len = <4>;
+ sync = <0>;
+ vmode = "FB_VMODE_NONINTERLACED";
+
+ tim2 = "TIM2_BCD", "TIM2_IPC";
+ cntl = "CNTL_LCDTFT", "CNTL_BGR", "CNTL_LCDVCOMP(1)";
+ caps = "CLCD_CAP_5551", "CLCD_CAP_565", "CLCD_CAP_888";
+ bpp = <16>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/rtsm_ve-cortex_a15x1.dts b/arch/arm/boot/dts/rtsm_ve-cortex_a15x1.dts
new file mode 100644
index 00000000000..c9eee916aa7
--- /dev/null
+++ b/arch/arm/boot/dts/rtsm_ve-cortex_a15x1.dts
@@ -0,0 +1,159 @@
+/*
+ * ARM Ltd. Fast Models
+ *
+ * Versatile Express (VE) system model
+ * ARMCortexA15x1CT
+ *
+ * RTSM_VE_Cortex_A15x1.lisa
+ */
+
+/dts-v1/;
+
+/ {
+ model = "RTSM_VE_CortexA15x1";
+ arm,vexpress,site = <0xf>;
+ compatible = "arm,rtsm_ve,cortex_a15x1", "arm,vexpress";
+ interrupt-parent = <&gic>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ chosen { };
+
+ aliases {
+ serial0 = &v2m_serial0;
+ serial1 = &v2m_serial1;
+ serial2 = &v2m_serial2;
+ serial3 = &v2m_serial3;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a15";
+ reg = <0>;
+ };
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0 0x80000000 0 0x80000000>;
+ };
+
+ gic: interrupt-controller@2c001000 {
+ compatible = "arm,cortex-a15-gic", "arm,cortex-a9-gic";
+ #interrupt-cells = <3>;
+ #address-cells = <0>;
+ interrupt-controller;
+ reg = <0 0x2c001000 0 0x1000>,
+ <0 0x2c002000 0 0x1000>,
+ <0 0x2c004000 0 0x2000>,
+ <0 0x2c006000 0 0x2000>;
+ interrupts = <1 9 0xf04>;
+ };
+
+ timer {
+ compatible = "arm,armv7-timer";
+ interrupts = <1 13 0xf08>,
+ <1 14 0xf08>,
+ <1 11 0xf08>,
+ <1 10 0xf08>;
+ };
+
+ dcc {
+ compatible = "arm,vexpress,config-bus";
+ arm,vexpress,config-bridge = <&v2m_sysreg>;
+
+ osc@0 {
+ /* ACLK clock to the AXI master port on the test chip */
+ compatible = "arm,vexpress-osc";
+ arm,vexpress-sysreg,func = <1 0>;
+ freq-range = <30000000 50000000>;
+ #clock-cells = <0>;
+ clock-output-names = "extsaxiclk";
+ };
+
+ oscclk1: osc@1 {
+ /* Reference clock for the CLCD */
+ compatible = "arm,vexpress-osc";
+ arm,vexpress-sysreg,func = <1 1>;
+ freq-range = <10000000 80000000>;
+ #clock-cells = <0>;
+ clock-output-names = "clcdclk";
+ };
+
+ smbclk: oscclk2: osc@2 {
+ /* Reference clock for the test chip internal PLLs */
+ compatible = "arm,vexpress-osc";
+ arm,vexpress-sysreg,func = <1 2>;
+ freq-range = <33000000 100000000>;
+ #clock-cells = <0>;
+ clock-output-names = "tcrefclk";
+ };
+ };
+
+ smb {
+ compatible = "simple-bus";
+
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges = <0 0 0 0x08000000 0x04000000>,
+ <1 0 0 0x14000000 0x04000000>,
+ <2 0 0 0x18000000 0x04000000>,
+ <3 0 0 0x1c000000 0x04000000>,
+ <4 0 0 0x0c000000 0x04000000>,
+ <5 0 0 0x10000000 0x04000000>;
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 63>;
+ interrupt-map = <0 0 0 &gic 0 0 4>,
+ <0 0 1 &gic 0 1 4>,
+ <0 0 2 &gic 0 2 4>,
+ <0 0 3 &gic 0 3 4>,
+ <0 0 4 &gic 0 4 4>,
+ <0 0 5 &gic 0 5 4>,
+ <0 0 6 &gic 0 6 4>,
+ <0 0 7 &gic 0 7 4>,
+ <0 0 8 &gic 0 8 4>,
+ <0 0 9 &gic 0 9 4>,
+ <0 0 10 &gic 0 10 4>,
+ <0 0 11 &gic 0 11 4>,
+ <0 0 12 &gic 0 12 4>,
+ <0 0 13 &gic 0 13 4>,
+ <0 0 14 &gic 0 14 4>,
+ <0 0 15 &gic 0 15 4>,
+ <0 0 16 &gic 0 16 4>,
+ <0 0 17 &gic 0 17 4>,
+ <0 0 18 &gic 0 18 4>,
+ <0 0 19 &gic 0 19 4>,
+ <0 0 20 &gic 0 20 4>,
+ <0 0 21 &gic 0 21 4>,
+ <0 0 22 &gic 0 22 4>,
+ <0 0 23 &gic 0 23 4>,
+ <0 0 24 &gic 0 24 4>,
+ <0 0 25 &gic 0 25 4>,
+ <0 0 26 &gic 0 26 4>,
+ <0 0 27 &gic 0 27 4>,
+ <0 0 28 &gic 0 28 4>,
+ <0 0 29 &gic 0 29 4>,
+ <0 0 30 &gic 0 30 4>,
+ <0 0 31 &gic 0 31 4>,
+ <0 0 32 &gic 0 32 4>,
+ <0 0 33 &gic 0 33 4>,
+ <0 0 34 &gic 0 34 4>,
+ <0 0 35 &gic 0 35 4>,
+ <0 0 36 &gic 0 36 4>,
+ <0 0 37 &gic 0 37 4>,
+ <0 0 38 &gic 0 38 4>,
+ <0 0 39 &gic 0 39 4>,
+ <0 0 40 &gic 0 40 4>,
+ <0 0 41 &gic 0 41 4>,
+ <0 0 42 &gic 0 42 4>;
+
+ /include/ "rtsm_ve-motherboard.dtsi"
+ };
+};
+
+/include/ "clcd-panels.dtsi"
diff --git a/arch/arm/boot/dts/rtsm_ve-cortex_a15x2.dts b/arch/arm/boot/dts/rtsm_ve-cortex_a15x2.dts
new file mode 100644
index 00000000000..853a166e3c3
--- /dev/null
+++ b/arch/arm/boot/dts/rtsm_ve-cortex_a15x2.dts
@@ -0,0 +1,165 @@
+/*
+ * ARM Ltd. Fast Models
+ *
+ * Versatile Express (VE) system model
+ * ARMCortexA15x2CT
+ *
+ * RTSM_VE_Cortex_A15x2.lisa
+ */
+
+/dts-v1/;
+
+/ {
+ model = "RTSM_VE_CortexA15x2";
+ arm,vexpress,site = <0xf>;
+ compatible = "arm,rtsm_ve,cortex_a15x2", "arm,vexpress";
+ interrupt-parent = <&gic>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ chosen { };
+
+ aliases {
+ serial0 = &v2m_serial0;
+ serial1 = &v2m_serial1;
+ serial2 = &v2m_serial2;
+ serial3 = &v2m_serial3;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a15";
+ reg = <0>;
+ };
+
+ cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a15";
+ reg = <1>;
+ };
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0 0x80000000 0 0x80000000>;
+ };
+
+ gic: interrupt-controller@2c001000 {
+ compatible = "arm,cortex-a15-gic", "arm,cortex-a9-gic";
+ #interrupt-cells = <3>;
+ #address-cells = <0>;
+ interrupt-controller;
+ reg = <0 0x2c001000 0 0x1000>,
+ <0 0x2c002000 0 0x1000>,
+ <0 0x2c004000 0 0x2000>,
+ <0 0x2c006000 0 0x2000>;
+ interrupts = <1 9 0xf04>;
+ };
+
+ timer {
+ compatible = "arm,armv7-timer";
+ interrupts = <1 13 0xf08>,
+ <1 14 0xf08>,
+ <1 11 0xf08>,
+ <1 10 0xf08>;
+ };
+
+ dcc {
+ compatible = "arm,vexpress,config-bus";
+ arm,vexpress,config-bridge = <&v2m_sysreg>;
+
+ osc@0 {
+ /* ACLK clock to the AXI master port on the test chip */
+ compatible = "arm,vexpress-osc";
+ arm,vexpress-sysreg,func = <1 0>;
+ freq-range = <30000000 50000000>;
+ #clock-cells = <0>;
+ clock-output-names = "extsaxiclk";
+ };
+
+ oscclk1: osc@1 {
+ /* Reference clock for the CLCD */
+ compatible = "arm,vexpress-osc";
+ arm,vexpress-sysreg,func = <1 1>;
+ freq-range = <10000000 80000000>;
+ #clock-cells = <0>;
+ clock-output-names = "clcdclk";
+ };
+
+ smbclk: oscclk2: osc@2 {
+ /* Reference clock for the test chip internal PLLs */
+ compatible = "arm,vexpress-osc";
+ arm,vexpress-sysreg,func = <1 2>;
+ freq-range = <33000000 100000000>;
+ #clock-cells = <0>;
+ clock-output-names = "tcrefclk";
+ };
+ };
+
+ smb {
+ compatible = "simple-bus";
+
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges = <0 0 0 0x08000000 0x04000000>,
+ <1 0 0 0x14000000 0x04000000>,
+ <2 0 0 0x18000000 0x04000000>,
+ <3 0 0 0x1c000000 0x04000000>,
+ <4 0 0 0x0c000000 0x04000000>,
+ <5 0 0 0x10000000 0x04000000>;
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 63>;
+ interrupt-map = <0 0 0 &gic 0 0 4>,
+ <0 0 1 &gic 0 1 4>,
+ <0 0 2 &gic 0 2 4>,
+ <0 0 3 &gic 0 3 4>,
+ <0 0 4 &gic 0 4 4>,
+ <0 0 5 &gic 0 5 4>,
+ <0 0 6 &gic 0 6 4>,
+ <0 0 7 &gic 0 7 4>,
+ <0 0 8 &gic 0 8 4>,
+ <0 0 9 &gic 0 9 4>,
+ <0 0 10 &gic 0 10 4>,
+ <0 0 11 &gic 0 11 4>,
+ <0 0 12 &gic 0 12 4>,
+ <0 0 13 &gic 0 13 4>,
+ <0 0 14 &gic 0 14 4>,
+ <0 0 15 &gic 0 15 4>,
+ <0 0 16 &gic 0 16 4>,
+ <0 0 17 &gic 0 17 4>,
+ <0 0 18 &gic 0 18 4>,
+ <0 0 19 &gic 0 19 4>,
+ <0 0 20 &gic 0 20 4>,
+ <0 0 21 &gic 0 21 4>,
+ <0 0 22 &gic 0 22 4>,
+ <0 0 23 &gic 0 23 4>,
+ <0 0 24 &gic 0 24 4>,
+ <0 0 25 &gic 0 25 4>,
+ <0 0 26 &gic 0 26 4>,
+ <0 0 27 &gic 0 27 4>,
+ <0 0 28 &gic 0 28 4>,
+ <0 0 29 &gic 0 29 4>,
+ <0 0 30 &gic 0 30 4>,
+ <0 0 31 &gic 0 31 4>,
+ <0 0 32 &gic 0 32 4>,
+ <0 0 33 &gic 0 33 4>,
+ <0 0 34 &gic 0 34 4>,
+ <0 0 35 &gic 0 35 4>,
+ <0 0 36 &gic 0 36 4>,
+ <0 0 37 &gic 0 37 4>,
+ <0 0 38 &gic 0 38 4>,
+ <0 0 39 &gic 0 39 4>,
+ <0 0 40 &gic 0 40 4>,
+ <0 0 41 &gic 0 41 4>,
+ <0 0 42 &gic 0 42 4>;
+
+ /include/ "rtsm_ve-motherboard.dtsi"
+ };
+};
+
+/include/ "clcd-panels.dtsi"
diff --git a/arch/arm/boot/dts/rtsm_ve-cortex_a15x4.dts b/arch/arm/boot/dts/rtsm_ve-cortex_a15x4.dts
new file mode 100644
index 00000000000..c1947a3a5c8
--- /dev/null
+++ b/arch/arm/boot/dts/rtsm_ve-cortex_a15x4.dts
@@ -0,0 +1,177 @@
+/*
+ * ARM Ltd. Fast Models
+ *
+ * Versatile Express (VE) system model
+ * ARMCortexA15x4CT
+ *
+ * RTSM_VE_Cortex_A15x4.lisa
+ */
+
+/dts-v1/;
+
+/ {
+ model = "RTSM_VE_CortexA15x4";
+ arm,vexpress,site = <0xf>;
+ compatible = "arm,rtsm_ve,cortex_a15x4", "arm,vexpress";
+ interrupt-parent = <&gic>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ chosen { };
+
+ aliases {
+ serial0 = &v2m_serial0;
+ serial1 = &v2m_serial1;
+ serial2 = &v2m_serial2;
+ serial3 = &v2m_serial3;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a15";
+ reg = <0>;
+ };
+
+ cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a15";
+ reg = <1>;
+ };
+
+ cpu@2 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a15";
+ reg = <2>;
+ };
+
+ cpu@3 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a15";
+ reg = <3>;
+ };
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0 0x80000000 0 0x80000000>;
+ };
+
+ gic: interrupt-controller@2c001000 {
+ compatible = "arm,cortex-a15-gic", "arm,cortex-a9-gic";
+ #interrupt-cells = <3>;
+ #address-cells = <0>;
+ interrupt-controller;
+ reg = <0 0x2c001000 0 0x1000>,
+ <0 0x2c002000 0 0x1000>,
+ <0 0x2c004000 0 0x2000>,
+ <0 0x2c006000 0 0x2000>;
+ interrupts = <1 9 0xf04>;
+ };
+
+ timer {
+ compatible = "arm,armv7-timer";
+ interrupts = <1 13 0xf08>,
+ <1 14 0xf08>,
+ <1 11 0xf08>,
+ <1 10 0xf08>;
+ };
+
+ dcc {
+ compatible = "arm,vexpress,config-bus";
+ arm,vexpress,config-bridge = <&v2m_sysreg>;
+
+ osc@0 {
+ /* ACLK clock to the AXI master port on the test chip */
+ compatible = "arm,vexpress-osc";
+ arm,vexpress-sysreg,func = <1 0>;
+ freq-range = <30000000 50000000>;
+ #clock-cells = <0>;
+ clock-output-names = "extsaxiclk";
+ };
+
+ oscclk1: osc@1 {
+ /* Reference clock for the CLCD */
+ compatible = "arm,vexpress-osc";
+ arm,vexpress-sysreg,func = <1 1>;
+ freq-range = <10000000 80000000>;
+ #clock-cells = <0>;
+ clock-output-names = "clcdclk";
+ };
+
+ smbclk: oscclk2: osc@2 {
+ /* Reference clock for the test chip internal PLLs */
+ compatible = "arm,vexpress-osc";
+ arm,vexpress-sysreg,func = <1 2>;
+ freq-range = <33000000 100000000>;
+ #clock-cells = <0>;
+ clock-output-names = "tcrefclk";
+ };
+ };
+
+ smb {
+ compatible = "simple-bus";
+
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges = <0 0 0 0x08000000 0x04000000>,
+ <1 0 0 0x14000000 0x04000000>,
+ <2 0 0 0x18000000 0x04000000>,
+ <3 0 0 0x1c000000 0x04000000>,
+ <4 0 0 0x0c000000 0x04000000>,
+ <5 0 0 0x10000000 0x04000000>;
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 63>;
+ interrupt-map = <0 0 0 &gic 0 0 4>,
+ <0 0 1 &gic 0 1 4>,
+ <0 0 2 &gic 0 2 4>,
+ <0 0 3 &gic 0 3 4>,
+ <0 0 4 &gic 0 4 4>,
+ <0 0 5 &gic 0 5 4>,
+ <0 0 6 &gic 0 6 4>,
+ <0 0 7 &gic 0 7 4>,
+ <0 0 8 &gic 0 8 4>,
+ <0 0 9 &gic 0 9 4>,
+ <0 0 10 &gic 0 10 4>,
+ <0 0 11 &gic 0 11 4>,
+ <0 0 12 &gic 0 12 4>,
+ <0 0 13 &gic 0 13 4>,
+ <0 0 14 &gic 0 14 4>,
+ <0 0 15 &gic 0 15 4>,
+ <0 0 16 &gic 0 16 4>,
+ <0 0 17 &gic 0 17 4>,
+ <0 0 18 &gic 0 18 4>,
+ <0 0 19 &gic 0 19 4>,
+ <0 0 20 &gic 0 20 4>,
+ <0 0 21 &gic 0 21 4>,
+ <0 0 22 &gic 0 22 4>,
+ <0 0 23 &gic 0 23 4>,
+ <0 0 24 &gic 0 24 4>,
+ <0 0 25 &gic 0 25 4>,
+ <0 0 26 &gic 0 26 4>,
+ <0 0 27 &gic 0 27 4>,
+ <0 0 28 &gic 0 28 4>,
+ <0 0 29 &gic 0 29 4>,
+ <0 0 30 &gic 0 30 4>,
+ <0 0 31 &gic 0 31 4>,
+ <0 0 32 &gic 0 32 4>,
+ <0 0 33 &gic 0 33 4>,
+ <0 0 34 &gic 0 34 4>,
+ <0 0 35 &gic 0 35 4>,
+ <0 0 36 &gic 0 36 4>,
+ <0 0 37 &gic 0 37 4>,
+ <0 0 38 &gic 0 38 4>,
+ <0 0 39 &gic 0 39 4>,
+ <0 0 40 &gic 0 40 4>,
+ <0 0 41 &gic 0 41 4>,
+ <0 0 42 &gic 0 42 4>;
+
+ /include/ "rtsm_ve-motherboard.dtsi"
+ };
+};
+
+/include/ "clcd-panels.dtsi"
diff --git a/arch/arm/boot/dts/rtsm_ve-cortex_a9x2.dts b/arch/arm/boot/dts/rtsm_ve-cortex_a9x2.dts
new file mode 100644
index 00000000000..fca6b2f7967
--- /dev/null
+++ b/arch/arm/boot/dts/rtsm_ve-cortex_a9x2.dts
@@ -0,0 +1,171 @@
+/*
+ * ARM Ltd. Fast Models
+ *
+ * Versatile Express (VE) system model
+ * ARMCortexA9MPx2CT
+ *
+ * RTSM_VE_Cortex_A9x2.lisa
+ */
+
+/dts-v1/;
+
+/ {
+ model = "RTSM_VE_CortexA9x2";
+ arm,vexpress,site = <0xf>;
+ compatible = "arm,rtsm_ve,cortex_a9x2", "arm,vexpress";
+ interrupt-parent = <&gic>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ chosen { };
+
+ aliases {
+ serial0 = &v2m_serial0;
+ serial1 = &v2m_serial1;
+ serial2 = &v2m_serial2;
+ serial3 = &v2m_serial3;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a9";
+ reg = <0>;
+ };
+
+ cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a9";
+ reg = <1>;
+ };
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0x80000000 0x80000000>;
+ };
+
+ scu@2c000000 {
+ compatible = "arm,cortex-a9-scu";
+ reg = <0x2c000000 0x58>;
+ };
+
+ timer@2c000600 {
+ compatible = "arm,cortex-a9-twd-timer";
+ reg = <0x2c000600 0x20>;
+ interrupts = <1 13 0xf04>;
+ };
+
+ watchdog@2c000620 {
+ compatible = "arm,cortex-a9-twd-wdt";
+ reg = <0x2c000620 0x20>;
+ interrupts = <1 14 0xf04>;
+ };
+
+ gic: interrupt-controller@2c001000 {
+ compatible = "arm,cortex-a9-gic";
+ #interrupt-cells = <3>;
+ #address-cells = <0>;
+ interrupt-controller;
+ reg = <0x2c001000 0x1000>,
+ <0x2c000100 0x100>;
+ };
+
+ dcc {
+ compatible = "arm,vexpress,config-bus";
+ arm,vexpress,config-bridge = <&v2m_sysreg>;
+
+ osc@0 {
+ /* ACLK clock to the AXI master port on the test chip */
+ compatible = "arm,vexpress-osc";
+ arm,vexpress-sysreg,func = <1 0>;
+ freq-range = <30000000 50000000>;
+ #clock-cells = <0>;
+ clock-output-names = "extsaxiclk";
+ };
+
+ oscclk1: osc@1 {
+ /* Reference clock for the CLCD */
+ compatible = "arm,vexpress-osc";
+ arm,vexpress-sysreg,func = <1 1>;
+ freq-range = <10000000 80000000>;
+ #clock-cells = <0>;
+ clock-output-names = "clcdclk";
+ };
+
+ smbclk: oscclk2: osc@2 {
+ /* Reference clock for the test chip internal PLLs */
+ compatible = "arm,vexpress-osc";
+ arm,vexpress-sysreg,func = <1 2>;
+ freq-range = <33000000 100000000>;
+ #clock-cells = <0>;
+ clock-output-names = "tcrefclk";
+ };
+ };
+
+ smb {
+ compatible = "simple-bus";
+
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges = <0 0 0x08000000 0x04000000>,
+ <1 0 0x14000000 0x04000000>,
+ <2 0 0x18000000 0x04000000>,
+ <3 0 0x1c000000 0x04000000>,
+ <4 0 0x0c000000 0x04000000>,
+ <5 0 0x10000000 0x04000000>;
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 63>;
+ interrupt-map = <0 0 0 &gic 0 0 4>,
+ <0 0 1 &gic 0 1 4>,
+ <0 0 2 &gic 0 2 4>,
+ <0 0 3 &gic 0 3 4>,
+ <0 0 4 &gic 0 4 4>,
+ <0 0 5 &gic 0 5 4>,
+ <0 0 6 &gic 0 6 4>,
+ <0 0 7 &gic 0 7 4>,
+ <0 0 8 &gic 0 8 4>,
+ <0 0 9 &gic 0 9 4>,
+ <0 0 10 &gic 0 10 4>,
+ <0 0 11 &gic 0 11 4>,
+ <0 0 12 &gic 0 12 4>,
+ <0 0 13 &gic 0 13 4>,
+ <0 0 14 &gic 0 14 4>,
+ <0 0 15 &gic 0 15 4>,
+ <0 0 16 &gic 0 16 4>,
+ <0 0 17 &gic 0 17 4>,
+ <0 0 18 &gic 0 18 4>,
+ <0 0 19 &gic 0 19 4>,
+ <0 0 20 &gic 0 20 4>,
+ <0 0 21 &gic 0 21 4>,
+ <0 0 22 &gic 0 22 4>,
+ <0 0 23 &gic 0 23 4>,
+ <0 0 24 &gic 0 24 4>,
+ <0 0 25 &gic 0 25 4>,
+ <0 0 26 &gic 0 26 4>,
+ <0 0 27 &gic 0 27 4>,
+ <0 0 28 &gic 0 28 4>,
+ <0 0 29 &gic 0 29 4>,
+ <0 0 30 &gic 0 30 4>,
+ <0 0 31 &gic 0 31 4>,
+ <0 0 32 &gic 0 32 4>,
+ <0 0 33 &gic 0 33 4>,
+ <0 0 34 &gic 0 34 4>,
+ <0 0 35 &gic 0 35 4>,
+ <0 0 36 &gic 0 36 4>,
+ <0 0 37 &gic 0 37 4>,
+ <0 0 38 &gic 0 38 4>,
+ <0 0 39 &gic 0 39 4>,
+ <0 0 40 &gic 0 40 4>,
+ <0 0 41 &gic 0 41 4>,
+ <0 0 42 &gic 0 42 4>;
+
+ /include/ "rtsm_ve-motherboard.dtsi"
+ };
+};
+
+/include/ "clcd-panels.dtsi"
diff --git a/arch/arm/boot/dts/rtsm_ve-cortex_a9x4.dts b/arch/arm/boot/dts/rtsm_ve-cortex_a9x4.dts
new file mode 100644
index 00000000000..fd8a6ed97a0
--- /dev/null
+++ b/arch/arm/boot/dts/rtsm_ve-cortex_a9x4.dts
@@ -0,0 +1,183 @@
+/*
+ * ARM Ltd. Fast Models
+ *
+ * Versatile Express (VE) system model
+ * ARMCortexA9MPx4CT
+ *
+ * RTSM_VE_Cortex_A9x4.lisa
+ */
+
+/dts-v1/;
+
+/ {
+ model = "RTSM_VE_CortexA9x4";
+ arm,vexpress,site = <0xf>;
+ compatible = "arm,rtsm_ve,cortex_a9x4", "arm,vexpress";
+ interrupt-parent = <&gic>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ chosen { };
+
+ aliases {
+ serial0 = &v2m_serial0;
+ serial1 = &v2m_serial1;
+ serial2 = &v2m_serial2;
+ serial3 = &v2m_serial3;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a9";
+ reg = <0>;
+ };
+
+ cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a9";
+ reg = <1>;
+ };
+
+ cpu@2 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a9";
+ reg = <2>;
+ };
+
+ cpu@3 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a9";
+ reg = <3>;
+ };
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0x80000000 0x80000000>;
+ };
+
+ scu@2c000000 {
+ compatible = "arm,cortex-a9-scu";
+ reg = <0x2c000000 0x58>;
+ };
+
+ timer@2c000600 {
+ compatible = "arm,cortex-a9-twd-timer";
+ reg = <0x2c000600 0x20>;
+ interrupts = <1 13 0xf04>;
+ };
+
+ watchdog@2c000620 {
+ compatible = "arm,cortex-a9-twd-wdt";
+ reg = <0x2c000620 0x20>;
+ interrupts = <1 14 0xf04>;
+ };
+
+ gic: interrupt-controller@2c001000 {
+ compatible = "arm,cortex-a9-gic";
+ #interrupt-cells = <3>;
+ #address-cells = <0>;
+ interrupt-controller;
+ reg = <0x2c001000 0x1000>,
+ <0x2c000100 0x100>;
+ };
+
+ dcc {
+ compatible = "arm,vexpress,config-bus";
+ arm,vexpress,config-bridge = <&v2m_sysreg>;
+
+ osc@0 {
+ /* ACLK clock to the AXI master port on the test chip */
+ compatible = "arm,vexpress-osc";
+ arm,vexpress-sysreg,func = <1 0>;
+ freq-range = <30000000 50000000>;
+ #clock-cells = <0>;
+ clock-output-names = "extsaxiclk";
+ };
+
+ oscclk1: osc@1 {
+ /* Reference clock for the CLCD */
+ compatible = "arm,vexpress-osc";
+ arm,vexpress-sysreg,func = <1 1>;
+ freq-range = <10000000 80000000>;
+ #clock-cells = <0>;
+ clock-output-names = "clcdclk";
+ };
+
+ smbclk: oscclk2: osc@2 {
+ /* Reference clock for the test chip internal PLLs */
+ compatible = "arm,vexpress-osc";
+ arm,vexpress-sysreg,func = <1 2>;
+ freq-range = <33000000 100000000>;
+ #clock-cells = <0>;
+ clock-output-names = "tcrefclk";
+ };
+ };
+
+ smb {
+ compatible = "simple-bus";
+
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges = <0 0 0x08000000 0x04000000>,
+ <1 0 0x14000000 0x04000000>,
+ <2 0 0x18000000 0x04000000>,
+ <3 0 0x1c000000 0x04000000>,
+ <4 0 0x0c000000 0x04000000>,
+ <5 0 0x10000000 0x04000000>;
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 63>;
+ interrupt-map = <0 0 0 &gic 0 0 4>,
+ <0 0 1 &gic 0 1 4>,
+ <0 0 2 &gic 0 2 4>,
+ <0 0 3 &gic 0 3 4>,
+ <0 0 4 &gic 0 4 4>,
+ <0 0 5 &gic 0 5 4>,
+ <0 0 6 &gic 0 6 4>,
+ <0 0 7 &gic 0 7 4>,
+ <0 0 8 &gic 0 8 4>,
+ <0 0 9 &gic 0 9 4>,
+ <0 0 10 &gic 0 10 4>,
+ <0 0 11 &gic 0 11 4>,
+ <0 0 12 &gic 0 12 4>,
+ <0 0 13 &gic 0 13 4>,
+ <0 0 14 &gic 0 14 4>,
+ <0 0 15 &gic 0 15 4>,
+ <0 0 16 &gic 0 16 4>,
+ <0 0 17 &gic 0 17 4>,
+ <0 0 18 &gic 0 18 4>,
+ <0 0 19 &gic 0 19 4>,
+ <0 0 20 &gic 0 20 4>,
+ <0 0 21 &gic 0 21 4>,
+ <0 0 22 &gic 0 22 4>,
+ <0 0 23 &gic 0 23 4>,
+ <0 0 24 &gic 0 24 4>,
+ <0 0 25 &gic 0 25 4>,
+ <0 0 26 &gic 0 26 4>,
+ <0 0 27 &gic 0 27 4>,
+ <0 0 28 &gic 0 28 4>,
+ <0 0 29 &gic 0 29 4>,
+ <0 0 30 &gic 0 30 4>,
+ <0 0 31 &gic 0 31 4>,
+ <0 0 32 &gic 0 32 4>,
+ <0 0 33 &gic 0 33 4>,
+ <0 0 34 &gic 0 34 4>,
+ <0 0 35 &gic 0 35 4>,
+ <0 0 36 &gic 0 36 4>,
+ <0 0 37 &gic 0 37 4>,
+ <0 0 38 &gic 0 38 4>,
+ <0 0 39 &gic 0 39 4>,
+ <0 0 40 &gic 0 40 4>,
+ <0 0 41 &gic 0 41 4>,
+ <0 0 42 &gic 0 42 4>;
+
+ /include/ "rtsm_ve-motherboard.dtsi"
+ };
+};
+
+/include/ "clcd-panels.dtsi"
diff --git a/arch/arm/boot/dts/rtsm_ve-motherboard.dtsi b/arch/arm/boot/dts/rtsm_ve-motherboard.dtsi
new file mode 100644
index 00000000000..6d125662612
--- /dev/null
+++ b/arch/arm/boot/dts/rtsm_ve-motherboard.dtsi
@@ -0,0 +1,224 @@
+/*
+ * ARM Ltd. Fast Models
+ *
+ * Versatile Express (VE) system model
+ * Motherboard component
+ *
+ * VEMotherBoard.lisa
+ */
+
+ motherboard {
+ compatible = "arm,vexpress,v2m-p1", "simple-bus";
+ arm,hbi = <0x190>;
+ arm,vexpress,site = <0>;
+ arm,v2m-memory-map = "rs1";
+ #address-cells = <2>; /* SMB chipselect number and offset */
+ #size-cells = <1>;
+ #interrupt-cells = <1>;
+ ranges;
+
+ flash@0,00000000 {
+ compatible = "arm,vexpress-flash", "cfi-flash";
+ reg = <0 0x00000000 0x04000000>,
+ <4 0x00000000 0x04000000>;
+ bank-width = <4>;
+ };
+
+ vram@2,00000000 {
+ compatible = "arm,vexpress-vram";
+ reg = <2 0x00000000 0x00800000>;
+ };
+
+ ethernet@2,02000000 {
+ compatible = "smsc,lan91c111";
+ reg = <2 0x02000000 0x10000>;
+ interrupts = <15>;
+ };
+
+ iofpga@3,00000000 {
+ compatible = "arm,amba-bus", "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 3 0 0x200000>;
+
+ v2m_sysreg: sysreg@010000 {
+ compatible = "arm,vexpress-sysreg";
+ reg = <0x010000 0x1000>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ v2m_sysctl: sysctl@020000 {
+ compatible = "arm,sp810", "arm,primecell";
+ reg = <0x020000 0x1000>;
+ clocks = <&v2m_refclk32khz>, <&v2m_refclk1mhz>, <&smbclk>;
+ clock-names = "refclk", "timclk", "apb_pclk";
+ #clock-cells = <1>;
+ clock-output-names = "timerclken0", "timerclken1", "timerclken2", "timerclken3";
+ };
+
+ aaci@040000 {
+ compatible = "arm,pl041", "arm,primecell";
+ reg = <0x040000 0x1000>;
+ interrupts = <11>;
+ clocks = <&smbclk>;
+ clock-names = "apb_pclk";
+ };
+
+ mmci@050000 {
+ compatible = "arm,pl180", "arm,primecell";
+ reg = <0x050000 0x1000>;
+ interrupts = <9 10>;
+ cd-gpios = <&v2m_sysreg 0 0>;
+ wp-gpios = <&v2m_sysreg 1 0>;
+ max-frequency = <12000000>;
+ vmmc-supply = <&v2m_fixed_3v3>;
+ clocks = <&v2m_clk24mhz>, <&smbclk>;
+ clock-names = "mclk", "apb_pclk";
+ };
+
+ kmi@060000 {
+ compatible = "arm,pl050", "arm,primecell";
+ reg = <0x060000 0x1000>;
+ interrupts = <12>;
+ clocks = <&v2m_clk24mhz>, <&smbclk>;
+ clock-names = "KMIREFCLK", "apb_pclk";
+ };
+
+ kmi@070000 {
+ compatible = "arm,pl050", "arm,primecell";
+ reg = <0x070000 0x1000>;
+ interrupts = <13>;
+ clocks = <&v2m_clk24mhz>, <&smbclk>;
+ clock-names = "KMIREFCLK", "apb_pclk";
+ };
+
+ v2m_serial0: uart@090000 {
+ compatible = "arm,pl011", "arm,primecell";
+ reg = <0x090000 0x1000>;
+ interrupts = <5>;
+ clocks = <&v2m_clk24mhz>, <&smbclk>;
+ clock-names = "uartclk", "apb_pclk";
+ };
+
+ v2m_serial1: uart@0a0000 {
+ compatible = "arm,pl011", "arm,primecell";
+ reg = <0x0a0000 0x1000>;
+ interrupts = <6>;
+ clocks = <&v2m_clk24mhz>, <&smbclk>;
+ clock-names = "uartclk", "apb_pclk";
+ };
+
+ v2m_serial2: uart@0b0000 {
+ compatible = "arm,pl011", "arm,primecell";
+ reg = <0x0b0000 0x1000>;
+ interrupts = <7>;
+ clocks = <&v2m_clk24mhz>, <&smbclk>;
+ clock-names = "uartclk", "apb_pclk";
+ };
+
+ v2m_serial3: uart@0c0000 {
+ compatible = "arm,pl011", "arm,primecell";
+ reg = <0x0c0000 0x1000>;
+ interrupts = <8>;
+ clocks = <&v2m_clk24mhz>, <&smbclk>;
+ clock-names = "uartclk", "apb_pclk";
+ };
+
+ wdt@0f0000 {
+ compatible = "arm,sp805", "arm,primecell";
+ reg = <0x0f0000 0x1000>;
+ interrupts = <0>;
+ clocks = <&v2m_refclk32khz>, <&smbclk>;
+ clock-names = "wdogclk", "apb_pclk";
+ };
+
+ v2m_timer01: timer@110000 {
+ compatible = "arm,sp804", "arm,primecell";
+ reg = <0x110000 0x1000>;
+ interrupts = <2>;
+ clocks = <&v2m_sysctl 0>, <&v2m_sysctl 1>, <&smbclk>;
+ clock-names = "timclken1", "timclken2", "apb_pclk";
+ };
+
+ v2m_timer23: timer@120000 {
+ compatible = "arm,sp804", "arm,primecell";
+ reg = <0x120000 0x1000>;
+ interrupts = <3>;
+ clocks = <&v2m_sysctl 2>, <&v2m_sysctl 3>, <&smbclk>;
+ clock-names = "timclken1", "timclken2", "apb_pclk";
+ };
+
+ rtc@170000 {
+ compatible = "arm,pl031", "arm,primecell";
+ reg = <0x170000 0x1000>;
+ interrupts = <4>;
+ clocks = <&smbclk>;
+ clock-names = "apb_pclk";
+ };
+
+ clcd@1f0000 {
+ compatible = "arm,pl111", "arm,primecell";
+ reg = <0x1f0000 0x1000>;
+ interrupts = <14>;
+ clocks = <&v2m_oscclk1>, <&smbclk>;
+ clock-names = "v2m:oscclk1", "apb_pclk";
+ mode = "VGA";
+ use_dma = <0>;
+ framebuffer = <0x18000000 0x00180000>;
+ };
+ };
+
+ v2m_fixed_3v3: fixedregulator@0 {
+ compatible = "regulator-fixed";
+ regulator-name = "3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ v2m_clk24mhz: clk24mhz {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <24000000>;
+ clock-output-names = "v2m:clk24mhz";
+ };
+
+ v2m_refclk1mhz: refclk1mhz {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <1000000>;
+ clock-output-names = "v2m:refclk1mhz";
+ };
+
+ v2m_refclk32khz: refclk32khz {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ clock-output-names = "v2m:refclk32khz";
+ };
+
+ mcc {
+ compatible = "simple-bus";
+ arm,vexpress,config-bridge = <&v2m_sysreg>;
+
+ v2m_oscclk1: osc@1 {
+ /* CLCD clock */
+ compatible = "arm,vexpress-osc";
+ arm,vexpress-sysreg,func = <1 1>;
+ freq-range = <23750000 63500000>;
+ #clock-cells = <0>;
+ clock-output-names = "v2m:oscclk1";
+ };
+
+ muxfpga@0 {
+ compatible = "arm,vexpress-muxfpga";
+ arm,vexpress-sysreg,func = <7 0>;
+ };
+
+ shutdown@0 {
+ compatible = "arm,vexpress-shutdown";
+ arm,vexpress-sysreg,func = <8 0>;
+ };
+ };
+ };
diff --git a/arch/arm/boot/dts/rtsm_ve-v2p-ca15x1-ca7x1.dts b/arch/arm/boot/dts/rtsm_ve-v2p-ca15x1-ca7x1.dts
new file mode 100644
index 00000000000..55d4f5ce019
--- /dev/null
+++ b/arch/arm/boot/dts/rtsm_ve-v2p-ca15x1-ca7x1.dts
@@ -0,0 +1,227 @@
+/*
+ * ARM Ltd. Fast Models
+ *
+ * Versatile Express (VE) system model
+ * ARMCortexA15x4CT
+ * ARMCortexA7x4CT
+ * RTSM_VE_Cortex_A15x1_A7x1.lisa
+ */
+
+/dts-v1/;
+
+/memreserve/ 0xff000000 0x01000000;
+
+/ {
+ model = "RTSM_VE_CortexA15x1-A7x1";
+ arm,vexpress,site = <0xf>;
+ compatible = "arm,rtsm_ve,cortex_a15x1_a7x1", "arm,vexpress";
+ interrupt-parent = <&gic>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ chosen { };
+
+ aliases {
+ serial0 = &v2m_serial0;
+ serial1 = &v2m_serial1;
+ serial2 = &v2m_serial2;
+ serial3 = &v2m_serial3;
+ };
+
+ clusters {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cluster0: cluster@0 {
+ reg = <0>;
+// freqs = <500000000 600000000 700000000 800000000 900000000 1000000000 1100000000 1200000000>;
+ cores {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ core0: core@0 {
+ reg = <0>;
+ };
+
+ };
+ };
+
+ cluster1: cluster@1 {
+ reg = <1>;
+// freqs = <350000000 400000000 500000000 600000000 700000000 800000000 900000000 1000000000>;
+ cores {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ core1: core@0 {
+ reg = <0>;
+ };
+
+ };
+ };
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a15";
+ reg = <0>;
+ cluster = <&cluster0>;
+ core = <&core0>;
+// clock-frequency = <1000000000>;
+ };
+
+ cpu1: cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a7";
+ reg = <0x100>;
+ cluster = <&cluster1>;
+ core = <&core1>;
+// clock-frequency = <800000000>;
+ };
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0 0x80000000 0 0x80000000>;
+ };
+
+ cci@2c090000 {
+ compatible = "arm,cci";
+ reg = <0 0x2c090000 0 0x8000>;
+ };
+
+ dcscb@60000000 {
+ compatible = "arm,rtsm,dcscb";
+ reg = <0 0x60000000 0 0x1000>;
+ };
+
+ gic: interrupt-controller@2c001000 {
+ compatible = "arm,cortex-a15-gic", "arm,cortex-a9-gic";
+ #interrupt-cells = <3>;
+ #address-cells = <0>;
+ interrupt-controller;
+ reg = <0 0x2c001000 0 0x1000>,
+ <0 0x2c002000 0 0x1000>,
+ <0 0x2c004000 0 0x2000>,
+ <0 0x2c006000 0 0x2000>;
+ interrupts = <1 9 0xf04>;
+
+ gic-cpuif@0 {
+ compatible = "arm,gic-cpuif";
+ cpuif-id = <0>;
+ cpu = <&cpu0>;
+ };
+ gic-cpuif@1 {
+ compatible = "arm,gic-cpuif";
+ cpuif-id = <1>;
+ cpu = <&cpu1>;
+ };
+ };
+
+ timer {
+ compatible = "arm,armv7-timer";
+ interrupts = <1 13 0xf08>,
+ <1 14 0xf08>,
+ <1 11 0xf08>,
+ <1 10 0xf08>;
+ };
+
+ dcc {
+ compatible = "arm,vexpress,config-bus";
+ arm,vexpress,config-bridge = <&v2m_sysreg>;
+
+ osc@0 {
+ /* ACLK clock to the AXI master port on the test chip */
+ compatible = "arm,vexpress-osc";
+ arm,vexpress-sysreg,func = <1 0>;
+ freq-range = <30000000 50000000>;
+ #clock-cells = <0>;
+ clock-output-names = "extsaxiclk";
+ };
+
+ oscclk1: osc@1 {
+ /* Reference clock for the CLCD */
+ compatible = "arm,vexpress-osc";
+ arm,vexpress-sysreg,func = <1 1>;
+ freq-range = <10000000 80000000>;
+ #clock-cells = <0>;
+ clock-output-names = "clcdclk";
+ };
+
+ smbclk: oscclk2: osc@2 {
+ /* Reference clock for the test chip internal PLLs */
+ compatible = "arm,vexpress-osc";
+ arm,vexpress-sysreg,func = <1 2>;
+ freq-range = <33000000 100000000>;
+ #clock-cells = <0>;
+ clock-output-names = "tcrefclk";
+ };
+ };
+
+ smb {
+ compatible = "simple-bus";
+
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges = <0 0 0 0x08000000 0x04000000>,
+ <1 0 0 0x14000000 0x04000000>,
+ <2 0 0 0x18000000 0x04000000>,
+ <3 0 0 0x1c000000 0x04000000>,
+ <4 0 0 0x0c000000 0x04000000>,
+ <5 0 0 0x10000000 0x04000000>;
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 63>;
+ interrupt-map = <0 0 0 &gic 0 0 4>,
+ <0 0 1 &gic 0 1 4>,
+ <0 0 2 &gic 0 2 4>,
+ <0 0 3 &gic 0 3 4>,
+ <0 0 4 &gic 0 4 4>,
+ <0 0 5 &gic 0 5 4>,
+ <0 0 6 &gic 0 6 4>,
+ <0 0 7 &gic 0 7 4>,
+ <0 0 8 &gic 0 8 4>,
+ <0 0 9 &gic 0 9 4>,
+ <0 0 10 &gic 0 10 4>,
+ <0 0 11 &gic 0 11 4>,
+ <0 0 12 &gic 0 12 4>,
+ <0 0 13 &gic 0 13 4>,
+ <0 0 14 &gic 0 14 4>,
+ <0 0 15 &gic 0 15 4>,
+ <0 0 16 &gic 0 16 4>,
+ <0 0 17 &gic 0 17 4>,
+ <0 0 18 &gic 0 18 4>,
+ <0 0 19 &gic 0 19 4>,
+ <0 0 20 &gic 0 20 4>,
+ <0 0 21 &gic 0 21 4>,
+ <0 0 22 &gic 0 22 4>,
+ <0 0 23 &gic 0 23 4>,
+ <0 0 24 &gic 0 24 4>,
+ <0 0 25 &gic 0 25 4>,
+ <0 0 26 &gic 0 26 4>,
+ <0 0 27 &gic 0 27 4>,
+ <0 0 28 &gic 0 28 4>,
+ <0 0 29 &gic 0 29 4>,
+ <0 0 30 &gic 0 30 4>,
+ <0 0 31 &gic 0 31 4>,
+ <0 0 32 &gic 0 32 4>,
+ <0 0 33 &gic 0 33 4>,
+ <0 0 34 &gic 0 34 4>,
+ <0 0 35 &gic 0 35 4>,
+ <0 0 36 &gic 0 36 4>,
+ <0 0 37 &gic 0 37 4>,
+ <0 0 38 &gic 0 38 4>,
+ <0 0 39 &gic 0 39 4>,
+ <0 0 40 &gic 0 40 4>,
+ <0 0 41 &gic 0 41 4>,
+ <0 0 42 &gic 0 42 4>;
+
+ /include/ "rtsm_ve-motherboard.dtsi"
+ };
+};
+
+/include/ "clcd-panels.dtsi"
diff --git a/arch/arm/boot/dts/rtsm_ve-v2p-ca15x4-ca7x4.dts b/arch/arm/boot/dts/rtsm_ve-v2p-ca15x4-ca7x4.dts
new file mode 100644
index 00000000000..a2d4441568a
--- /dev/null
+++ b/arch/arm/boot/dts/rtsm_ve-v2p-ca15x4-ca7x4.dts
@@ -0,0 +1,335 @@
+/*
+ * ARM Ltd. Fast Models
+ *
+ * Versatile Express (VE) system model
+ * ARMCortexA15x4CT
+ * ARMCortexA7x4CT
+ * RTSM_VE_Cortex_A15x4_A7x4.lisa
+ */
+
+/dts-v1/;
+
+/memreserve/ 0xff000000 0x01000000;
+
+/ {
+ model = "RTSM_VE_CortexA15x4-A7x4";
+ arm,vexpress,site = <0xf>;
+ compatible = "arm,rtsm_ve,cortex_a15x4_a7x4", "arm,vexpress";
+ interrupt-parent = <&gic>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ chosen { };
+
+ aliases {
+ serial0 = &v2m_serial0;
+ serial1 = &v2m_serial1;
+ serial2 = &v2m_serial2;
+ serial3 = &v2m_serial3;
+ };
+
+ clusters {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cluster0: cluster@0 {
+ reg = <0>;
+// freqs = <500000000 600000000 700000000 800000000 900000000 1000000000 1100000000 1200000000>;
+ cores {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ core0: core@0 {
+ reg = <0>;
+ };
+
+ core1: core@1 {
+ reg = <1>;
+ };
+
+ core2: core@2 {
+ reg = <2>;
+ };
+
+ core3: core@3 {
+ reg = <3>;
+ };
+
+ };
+ };
+
+ cluster1: cluster@1 {
+ reg = <1>;
+// freqs = <350000000 400000000 500000000 600000000 700000000 800000000 900000000 1000000000>;
+ cores {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ core4: core@0 {
+ reg = <0>;
+ };
+
+ core5: core@1 {
+ reg = <1>;
+ };
+
+ core6: core@2 {
+ reg = <2>;
+ };
+
+ core7: core@3 {
+ reg = <3>;
+ };
+
+ };
+ };
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a15";
+ reg = <0>;
+ cluster = <&cluster0>;
+ core = <&core0>;
+// clock-frequency = <1000000000>;
+ };
+
+ cpu1: cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a15";
+ reg = <1>;
+ cluster = <&cluster0>;
+ core = <&core1>;
+// clock-frequency = <1000000000>;
+ };
+
+ cpu2: cpu@2 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a15";
+ reg = <2>;
+ cluster = <&cluster0>;
+ core = <&core2>;
+// clock-frequency = <1000000000>;
+ };
+
+ cpu3: cpu@3 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a15";
+ reg = <3>;
+ cluster = <&cluster0>;
+ core = <&core3>;
+// clock-frequency = <1000000000>;
+ };
+
+ cpu4: cpu@4 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a7";
+ reg = <0x100>;
+ cluster = <&cluster1>;
+ core = <&core4>;
+// clock-frequency = <800000000>;
+ };
+
+ cpu5: cpu@5 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a7";
+ reg = <0x101>;
+ cluster = <&cluster1>;
+ core = <&core5>;
+// clock-frequency = <800000000>;
+ };
+
+ cpu6: cpu@6 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a7";
+ reg = <0x102>;
+ cluster = <&cluster1>;
+ core = <&core6>;
+// clock-frequency = <800000000>;
+ };
+
+ cpu7: cpu@7 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a7";
+ reg = <0x103>;
+ cluster = <&cluster1>;
+ core = <&core7>;
+// clock-frequency = <800000000>;
+ };
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0 0x80000000 0 0x80000000>;
+ };
+
+ cci@2c090000 {
+ compatible = "arm,cci";
+ reg = <0 0x2c090000 0 0x8000>;
+ };
+
+ dcscb@60000000 {
+ compatible = "arm,rtsm,dcscb";
+ reg = <0 0x60000000 0 0x1000>;
+ };
+
+ gic: interrupt-controller@2c001000 {
+ compatible = "arm,cortex-a15-gic", "arm,cortex-a9-gic";
+ #interrupt-cells = <3>;
+ #address-cells = <0>;
+ interrupt-controller;
+ reg = <0 0x2c001000 0 0x1000>,
+ <0 0x2c002000 0 0x1000>,
+ <0 0x2c004000 0 0x2000>,
+ <0 0x2c006000 0 0x2000>;
+ interrupts = <1 9 0xf04>;
+
+ gic-cpuif@0 {
+ compatible = "arm,gic-cpuif";
+ cpuif-id = <0>;
+ cpu = <&cpu0>;
+ };
+ gic-cpuif@1 {
+ compatible = "arm,gic-cpuif";
+ cpuif-id = <1>;
+ cpu = <&cpu1>;
+ };
+ gic-cpuif@2 {
+ compatible = "arm,gic-cpuif";
+ cpuif-id = <2>;
+ cpu = <&cpu2>;
+ };
+ gic-cpuif@3 {
+ compatible = "arm,gic-cpuif";
+ cpuif-id = <3>;
+ cpu = <&cpu3>;
+ };
+ gic-cpuif@4 {
+ compatible = "arm,gic-cpuif";
+ cpuif-id = <4>;
+ cpu = <&cpu4>;
+ };
+ gic-cpuif@5 {
+ compatible = "arm,gic-cpuif";
+ cpuif-id = <5>;
+ cpu = <&cpu5>;
+ };
+ gic-cpuif@6 {
+ compatible = "arm,gic-cpuif";
+ cpuif-id = <6>;
+ cpu = <&cpu6>;
+ };
+ gic-cpuif@7 {
+ compatible = "arm,gic-cpuif";
+ cpuif-id = <7>;
+ cpu = <&cpu7>;
+ };
+ };
+
+ timer {
+ compatible = "arm,armv7-timer";
+ interrupts = <1 13 0xf08>,
+ <1 14 0xf08>,
+ <1 11 0xf08>,
+ <1 10 0xf08>;
+ };
+
+ dcc {
+ compatible = "arm,vexpress,config-bus";
+ arm,vexpress,config-bridge = <&v2m_sysreg>;
+
+ osc@0 {
+ /* ACLK clock to the AXI master port on the test chip */
+ compatible = "arm,vexpress-osc";
+ arm,vexpress-sysreg,func = <1 0>;
+ freq-range = <30000000 50000000>;
+ #clock-cells = <0>;
+ clock-output-names = "extsaxiclk";
+ };
+
+ oscclk1: osc@1 {
+ /* Reference clock for the CLCD */
+ compatible = "arm,vexpress-osc";
+ arm,vexpress-sysreg,func = <1 1>;
+ freq-range = <10000000 80000000>;
+ #clock-cells = <0>;
+ clock-output-names = "clcdclk";
+ };
+
+ smbclk: oscclk2: osc@2 {
+ /* Reference clock for the test chip internal PLLs */
+ compatible = "arm,vexpress-osc";
+ arm,vexpress-sysreg,func = <1 2>;
+ freq-range = <33000000 100000000>;
+ #clock-cells = <0>;
+ clock-output-names = "tcrefclk";
+ };
+ };
+
+ smb {
+ compatible = "simple-bus";
+
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges = <0 0 0 0x08000000 0x04000000>,
+ <1 0 0 0x14000000 0x04000000>,
+ <2 0 0 0x18000000 0x04000000>,
+ <3 0 0 0x1c000000 0x04000000>,
+ <4 0 0 0x0c000000 0x04000000>,
+ <5 0 0 0x10000000 0x04000000>;
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 63>;
+ interrupt-map = <0 0 0 &gic 0 0 4>,
+ <0 0 1 &gic 0 1 4>,
+ <0 0 2 &gic 0 2 4>,
+ <0 0 3 &gic 0 3 4>,
+ <0 0 4 &gic 0 4 4>,
+ <0 0 5 &gic 0 5 4>,
+ <0 0 6 &gic 0 6 4>,
+ <0 0 7 &gic 0 7 4>,
+ <0 0 8 &gic 0 8 4>,
+ <0 0 9 &gic 0 9 4>,
+ <0 0 10 &gic 0 10 4>,
+ <0 0 11 &gic 0 11 4>,
+ <0 0 12 &gic 0 12 4>,
+ <0 0 13 &gic 0 13 4>,
+ <0 0 14 &gic 0 14 4>,
+ <0 0 15 &gic 0 15 4>,
+ <0 0 16 &gic 0 16 4>,
+ <0 0 17 &gic 0 17 4>,
+ <0 0 18 &gic 0 18 4>,
+ <0 0 19 &gic 0 19 4>,
+ <0 0 20 &gic 0 20 4>,
+ <0 0 21 &gic 0 21 4>,
+ <0 0 22 &gic 0 22 4>,
+ <0 0 23 &gic 0 23 4>,
+ <0 0 24 &gic 0 24 4>,
+ <0 0 25 &gic 0 25 4>,
+ <0 0 26 &gic 0 26 4>,
+ <0 0 27 &gic 0 27 4>,
+ <0 0 28 &gic 0 28 4>,
+ <0 0 29 &gic 0 29 4>,
+ <0 0 30 &gic 0 30 4>,
+ <0 0 31 &gic 0 31 4>,
+ <0 0 32 &gic 0 32 4>,
+ <0 0 33 &gic 0 33 4>,
+ <0 0 34 &gic 0 34 4>,
+ <0 0 35 &gic 0 35 4>,
+ <0 0 36 &gic 0 36 4>,
+ <0 0 37 &gic 0 37 4>,
+ <0 0 38 &gic 0 38 4>,
+ <0 0 39 &gic 0 39 4>,
+ <0 0 40 &gic 0 40 4>,
+ <0 0 41 &gic 0 41 4>,
+ <0 0 42 &gic 0 42 4>;
+
+ /include/ "rtsm_ve-motherboard.dtsi"
+ };
+};
+
+/include/ "clcd-panels.dtsi"
diff --git a/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi b/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi
index ac870fb3fa0..9584232ee6b 100644
--- a/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi
+++ b/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi
@@ -228,6 +228,7 @@
};
clcd@1f0000 {
+ status = "disabled";
compatible = "arm,pl111", "arm,primecell";
reg = <0x1f0000 0x1000>;
interrupts = <14>;
diff --git a/arch/arm/boot/dts/vexpress-v2m.dtsi b/arch/arm/boot/dts/vexpress-v2m.dtsi
index f1420368355..6593398c11a 100644
--- a/arch/arm/boot/dts/vexpress-v2m.dtsi
+++ b/arch/arm/boot/dts/vexpress-v2m.dtsi
@@ -227,6 +227,7 @@
};
clcd@1f000 {
+ status = "disabled";
compatible = "arm,pl111", "arm,primecell";
reg = <0x1f000 0x1000>;
interrupts = <14>;
diff --git a/arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts b/arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts
index 73187173117..cc6a8c0cfe3 100644
--- a/arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts
+++ b/arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts
@@ -9,6 +9,8 @@
/dts-v1/;
+/memreserve/ 0xbf000000 0x01000000;
+
/ {
model = "V2P-CA15";
arm,hbi = <0x237>;
@@ -57,6 +59,8 @@
interrupts = <0 85 4>;
clocks = <&oscclk5>;
clock-names = "pxlclk";
+ mode = "1024x768-16@60";
+ framebuffer = <0 0xff000000 0 0x01000000>;
};
memory-controller@2b0a0000 {
@@ -117,7 +121,7 @@
};
pmu {
- compatible = "arm,cortex-a15-pmu", "arm,cortex-a9-pmu";
+ compatible = "arm,cortex-a15-pmu";
interrupts = <0 68 4>,
<0 69 4>;
};
diff --git a/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
index dfe371ec274..b37fdd8c146 100644
--- a/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
+++ b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
@@ -9,11 +9,13 @@
/dts-v1/;
+/memreserve/ 0xff000000 0x01000000;
+
/ {
model = "V2P-CA15_CA7";
arm,hbi = <0x249>;
arm,vexpress,site = <0xf>;
- compatible = "arm,vexpress,v2p-ca15_a7", "arm,vexpress";
+ compatible = "arm,vexpress,v2p-ca15_a7", "arm,vexpress", "arm,generic";
interrupt-parent = <&gic>;
#address-cells = <2>;
#size-cells = <2>;
@@ -29,6 +31,48 @@
i2c1 = &v2m_i2c_pcie;
};
+ clusters {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cluster0: cluster@0 {
+ reg = <0>;
+ cores {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ core0: core@0 {
+ reg = <0>;
+ };
+
+ core1: core@1 {
+ reg = <1>;
+ };
+
+ };
+ };
+
+ cluster1: cluster@1 {
+ reg = <1>;
+ cores {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ core2: core@0 {
+ reg = <0>;
+ };
+
+ core3: core@1 {
+ reg = <1>;
+ };
+
+ core4: core@2 {
+ reg = <2>;
+ };
+ };
+ };
+ };
+
cpus {
#address-cells = <1>;
#size-cells = <0>;
@@ -37,36 +81,51 @@
device_type = "cpu";
compatible = "arm,cortex-a15";
reg = <0>;
+ cluster = <&cluster0>;
+ core = <&core0>;
+ clock-frequency = <1000000000>;
};
cpu1: cpu@1 {
device_type = "cpu";
compatible = "arm,cortex-a15";
reg = <1>;
+ cluster = <&cluster0>;
+ core = <&core1>;
+ clock-frequency = <1000000000>;
};
cpu2: cpu@2 {
device_type = "cpu";
compatible = "arm,cortex-a7";
reg = <0x100>;
+ cluster = <&cluster1>;
+ core = <&core2>;
+ clock-frequency = <800000000>;
};
cpu3: cpu@3 {
device_type = "cpu";
compatible = "arm,cortex-a7";
reg = <0x101>;
+ cluster = <&cluster1>;
+ core = <&core3>;
+ clock-frequency = <800000000>;
};
cpu4: cpu@4 {
device_type = "cpu";
compatible = "arm,cortex-a7";
reg = <0x102>;
+ cluster = <&cluster1>;
+ core = <&core4>;
+ clock-frequency = <800000000>;
};
};
memory@80000000 {
device_type = "memory";
- reg = <0 0x80000000 0 0x40000000>;
+ reg = <0 0x80000000 0 0x80000000>;
};
wdt@2a490000 {
@@ -81,6 +140,8 @@
compatible = "arm,hdlcd";
reg = <0 0x2b000000 0 0x1000>;
interrupts = <0 85 4>;
+ mode = "1024x768-16@60";
+ framebuffer = <0 0xff000000 0 0x01000000>;
clocks = <&oscclk5>;
clock-names = "pxlclk";
};
@@ -102,6 +163,44 @@
<0 0x2c004000 0 0x2000>,
<0 0x2c006000 0 0x2000>;
interrupts = <1 9 0xf04>;
+
+ gic-cpuif@0 {
+ compatible = "arm,gic-cpuif";
+ cpuif-id = <0>;
+ cpu = <&cpu0>;
+ };
+ gic-cpuif@1 {
+ compatible = "arm,gic-cpuif";
+ cpuif-id = <1>;
+ cpu = <&cpu1>;
+ };
+ gic-cpuif@2 {
+ compatible = "arm,gic-cpuif";
+ cpuif-id = <2>;
+ cpu = <&cpu2>;
+ };
+
+ gic-cpuif@3 {
+ compatible = "arm,gic-cpuif";
+ cpuif-id = <3>;
+ cpu = <&cpu3>;
+ };
+
+ gic-cpuif@4 {
+ compatible = "arm,gic-cpuif";
+ cpuif-id = <4>;
+ cpu = <&cpu4>;
+ };
+ };
+
+ cci@2c090000 {
+ compatible = "arm,cci";
+ reg = <0 0x2c090000 0 0x10000>;
+ interrupts = <0 101 4>,
+ <0 102 4>,
+ <0 103 4>,
+ <0 104 4>,
+ <0 105 4>;
};
memory-controller@7ffd0000 {
@@ -125,6 +224,12 @@
clock-names = "apb_pclk";
};
+ spc@7fff0000 {
+ compatible = "arm,spc";
+ reg = <0 0x7fff0000 0 0x1000>;
+ interrupts = <0 95 4>;
+ };
+
timer {
compatible = "arm,armv7-timer";
interrupts = <1 13 0xf08>,
@@ -133,12 +238,21 @@
<1 10 0xf08>;
};
- pmu {
- compatible = "arm,cortex-a15-pmu", "arm,cortex-a9-pmu";
+ pmu_a15 {
+ compatible = "arm,cortex-a15-pmu";
+ cluster = <&cluster0>;
interrupts = <0 68 4>,
<0 69 4>;
};
+ pmu_a7 {
+ compatible = "arm,cortex-a7-pmu";
+ cluster = <&cluster1>;
+ interrupts = <0 128 4>,
+ <0 129 4>,
+ <0 130 4>;
+ };
+
oscclk6a: oscclk6a {
/* Reference 24MHz clock */
compatible = "fixed-clock";
@@ -147,6 +261,15 @@
clock-output-names = "oscclk6a";
};
+ psci {
+ compatible = "arm,psci";
+ method = "smc";
+ cpu_suspend = <0x80100001>;
+ cpu_off = <0x80100002>;
+ cpu_on = <0x80100003>;
+ migrate = <0x80100004>;
+ };
+
dcc {
compatible = "arm,vexpress,config-bus";
arm,vexpress,config-bridge = <&v2m_sysreg>;
diff --git a/arch/arm/boot/dts/vexpress-v2p-ca5s.dts b/arch/arm/boot/dts/vexpress-v2p-ca5s.dts
index 6328cbc71d3..cf633ed6a1b 100644
--- a/arch/arm/boot/dts/vexpress-v2p-ca5s.dts
+++ b/arch/arm/boot/dts/vexpress-v2p-ca5s.dts
@@ -9,6 +9,8 @@
/dts-v1/;
+/memreserve/ 0xbf000000 0x01000000;
+
/ {
model = "V2P-CA5s";
arm,hbi = <0x225>;
@@ -59,6 +61,8 @@
interrupts = <0 85 4>;
clocks = <&oscclk3>;
clock-names = "pxlclk";
+ mode = "640x480-16@60";
+ framebuffer = <0xbf000000 0x01000000>;
};
memory-controller@2a150000 {
@@ -111,7 +115,7 @@
};
pmu {
- compatible = "arm,cortex-a5-pmu", "arm,cortex-a9-pmu";
+ compatible = "arm,cortex-a5-pmu";
interrupts = <0 68 4>,
<0 69 4>;
};
diff --git a/arch/arm/boot/dts/vexpress-v2p-ca9.dts b/arch/arm/boot/dts/vexpress-v2p-ca9.dts
index 1420bb14d95..663fa5927e7 100644
--- a/arch/arm/boot/dts/vexpress-v2p-ca9.dts
+++ b/arch/arm/boot/dts/vexpress-v2p-ca9.dts
@@ -9,6 +9,8 @@
/dts-v1/;
+/include/ "clcd-panels.dtsi"
+
/ {
model = "V2P-CA9";
arm,hbi = <0x191>;
@@ -73,6 +75,8 @@
interrupts = <0 44 4>;
clocks = <&oscclk1>, <&oscclk2>;
clock-names = "clcdclk", "apb_pclk";
+ mode = "XVGA";
+ use_dma = <1>;
};
memory-controller@100e0000 {
diff --git a/arch/arm/common/Makefile b/arch/arm/common/Makefile
index 8d024cf08f0..c187710c1fe 100644
--- a/arch/arm/common/Makefile
+++ b/arch/arm/common/Makefile
@@ -11,5 +11,11 @@ obj-$(CONFIG_SHARP_PARAM) += sharpsl_param.o
obj-$(CONFIG_SHARP_SCOOP) += scoop.o
obj-$(CONFIG_PCI_HOST_ITE8152) += it8152.o
obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp.o
+obj-$(CONFIG_MCPM) += mcpm_head.o mcpm_entry.o mcpm_platsmp.o vlock.o
+CFLAGS_REMOVE_mcpm_entry.o = -pg
+AFLAGS_mcpm_head.o := -march=armv7-a
+AFLAGS_vlock.o := -march=armv7-a
+obj-$(CONFIG_BL_SWITCHER) += bL_switcher.o
+obj-$(CONFIG_BL_SWITCHER_DUMMY_IF) += bL_switcher_dummy_if.o
obj-$(CONFIG_FIQ_GLUE) += fiq_glue.o fiq_glue_setup.o
obj-$(CONFIG_FIQ_DEBUGGER) += fiq_debugger.o
diff --git a/arch/arm/common/bL_switcher.c b/arch/arm/common/bL_switcher.c
new file mode 100644
index 00000000000..c3b9427f0a0
--- /dev/null
+++ b/arch/arm/common/bL_switcher.c
@@ -0,0 +1,783 @@
+/*
+ * arch/arm/common/bL_switcher.c -- big.LITTLE cluster switcher core driver
+ *
+ * Created by: Nicolas Pitre, March 2012
+ * Copyright: (C) 2012 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/cpu_pm.h>
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/kthread.h>
+#include <linux/wait.h>
+#include <linux/time.h>
+#include <linux/clockchips.h>
+#include <linux/hrtimer.h>
+#include <linux/tick.h>
+#include <linux/notifier.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include <linux/smp.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include <linux/irqchip/arm-gic.h>
+#include <linux/moduleparam.h>
+
+#include <asm/smp_plat.h>
+#include <asm/cacheflush.h>
+#include <asm/cputype.h>
+#include <asm/suspend.h>
+#include <asm/mcpm.h>
+#include <asm/bL_switcher.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/power_cpu_migrate.h>
+
+
+/*
+ * Use our own MPIDR accessors as the generic ones in asm/cputype.h have
+ * __attribute_const__ and we don't want the compiler to assume any
+ * constness here.
+ */
+
+static int read_mpidr(void)
+{
+ unsigned int id;
+ asm volatile ("mrc\tp15, 0, %0, c0, c0, 5" : "=r" (id));
+ return id & MPIDR_HWID_BITMASK;
+}
+
+/*
+ * Get a global nanosecond time stamp for tracing.
+ */
+static s64 get_ns(void)
+{
+ struct timespec ts;
+ getnstimeofday(&ts);
+ return timespec_to_ns(&ts);
+}
+
+/*
+ * bL switcher core code.
+ */
+
+static void bL_do_switch(void *_arg)
+{
+ unsigned ib_mpidr, ib_cpu, ib_cluster;
+ long volatile handshake, **handshake_ptr = _arg;
+
+ pr_debug("%s\n", __func__);
+
+ ib_mpidr = cpu_logical_map(smp_processor_id());
+ ib_cpu = MPIDR_AFFINITY_LEVEL(ib_mpidr, 0);
+ ib_cluster = MPIDR_AFFINITY_LEVEL(ib_mpidr, 1);
+
+ /* Advertise our handshake location */
+ if (handshake_ptr) {
+ handshake = 0;
+ *handshake_ptr = &handshake;
+ } else
+ handshake = -1;
+
+ /*
+ * Our state has been saved at this point. Let's release our
+ * inbound CPU.
+ */
+ mcpm_set_entry_vector(ib_cpu, ib_cluster, cpu_resume);
+ sev();
+
+ /*
+ * From this point, we must assume that our counterpart CPU might
+ * have taken over in its parallel world already, as if execution
+ * just returned from cpu_suspend(). It is therefore important to
+ * be very careful not to make any change the other guy is not
+ * expecting. This is why we need stack isolation.
+ *
+ * Fancy under cover tasks could be performed here. For now
+ * we have none.
+ */
+
+ /*
+ * Let's wait until our inbound is alive.
+ */
+ while (!handshake) {
+ wfe();
+ smp_mb();
+ }
+
+ /* Let's put ourself down. */
+ mcpm_cpu_power_down();
+
+ /* should never get here */
+ BUG();
+}
+
+/*
+ * Stack isolation. To ensure 'current' remains valid, we just use another
+ * piece of our thread's stack space which should be fairly lightly used.
+ * The selected area starts just above the thread_info structure located
+ * at the very bottom of the stack, aligned to a cache line, and indexed
+ * with the cluster number.
+ */
+#define STACK_SIZE 512
+extern void call_with_stack(void (*fn)(void *), void *arg, void *sp);
+static int bL_switchpoint(unsigned long _arg)
+{
+ unsigned int mpidr = read_mpidr();
+ unsigned int clusterid = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+ void *stack = current_thread_info() + 1;
+ stack = PTR_ALIGN(stack, L1_CACHE_BYTES);
+ stack += clusterid * STACK_SIZE + STACK_SIZE;
+ call_with_stack(bL_do_switch, (void *)_arg, stack);
+ BUG();
+}
+
+/*
+ * Generic switcher interface
+ */
+
+static unsigned int bL_gic_id[MAX_CPUS_PER_CLUSTER][MAX_NR_CLUSTERS];
+static int bL_switcher_cpu_pairing[NR_CPUS];
+
+/*
+ * bL_switch_to - Switch to a specific cluster for the current CPU
+ * @new_cluster_id: the ID of the cluster to switch to.
+ *
+ * This function must be called on the CPU to be switched.
+ * Returns 0 on success, else a negative status code.
+ */
+static int bL_switch_to(unsigned int new_cluster_id)
+{
+ unsigned int mpidr, this_cpu, that_cpu;
+ unsigned int ob_mpidr, ob_cpu, ob_cluster, ib_mpidr, ib_cpu, ib_cluster;
+ struct completion inbound_alive;
+ struct tick_device *tdev;
+ enum clock_event_mode tdev_mode;
+ long volatile *handshake_ptr;
+ int ipi_nr, ret;
+
+ this_cpu = smp_processor_id();
+ ob_mpidr = read_mpidr();
+ ob_cpu = MPIDR_AFFINITY_LEVEL(ob_mpidr, 0);
+ ob_cluster = MPIDR_AFFINITY_LEVEL(ob_mpidr, 1);
+ BUG_ON(cpu_logical_map(this_cpu) != ob_mpidr);
+
+ if (new_cluster_id == ob_cluster)
+ return 0;
+
+ that_cpu = bL_switcher_cpu_pairing[this_cpu];
+ ib_mpidr = cpu_logical_map(that_cpu);
+ ib_cpu = MPIDR_AFFINITY_LEVEL(ib_mpidr, 0);
+ ib_cluster = MPIDR_AFFINITY_LEVEL(ib_mpidr, 1);
+
+ pr_debug("before switch: CPU %d MPIDR %#x -> %#x\n",
+ this_cpu, ob_mpidr, ib_mpidr);
+
+ /* Close the gate for our entry vectors */
+ mcpm_set_entry_vector(ob_cpu, ob_cluster, NULL);
+ mcpm_set_entry_vector(ib_cpu, ib_cluster, NULL);
+
+ /* Install our "inbound alive" notifier. */
+ init_completion(&inbound_alive);
+ ipi_nr = register_ipi_completion(&inbound_alive, this_cpu);
+ ipi_nr |= ((1 << 16) << bL_gic_id[ob_cpu][ob_cluster]);
+ mcpm_set_early_poke(ib_cpu, ib_cluster, gic_get_sgir_physaddr(), ipi_nr);
+
+ /*
+ * Let's wake up the inbound CPU now in case it requires some delay
+ * to come online, but leave it gated in our entry vector code.
+ */
+ ret = mcpm_cpu_power_up(ib_cpu, ib_cluster);
+ if (ret) {
+ pr_err("%s: mcpm_cpu_power_up() returned %d\n", __func__, ret);
+ return ret;
+ }
+
+ /*
+ * Raise a SGI on the inbound CPU to make sure it doesn't stall
+ * in a possible WFI, such as in bL_power_down().
+ */
+ gic_send_sgi(bL_gic_id[ib_cpu][ib_cluster], 0);
+
+ /*
+ * Wait for the inbound to come up. This allows for other
+ * tasks to be scheduled in the mean time.
+ */
+ wait_for_completion(&inbound_alive);
+ mcpm_set_early_poke(ib_cpu, ib_cluster, 0, 0);
+
+ /*
+ * From this point we are entering the switch critical zone
+ * and can't sleep/schedule anymore.
+ */
+ local_irq_disable();
+ local_fiq_disable();
+ trace_cpu_migrate_begin(get_ns(), ob_mpidr);
+
+ /* redirect GIC's SGIs to our counterpart */
+ gic_migrate_target(bL_gic_id[ib_cpu][ib_cluster]);
+
+ tdev = tick_get_device(this_cpu);
+ if (tdev && !cpumask_equal(tdev->evtdev->cpumask, cpumask_of(this_cpu)))
+ tdev = NULL;
+ if (tdev) {
+ tdev_mode = tdev->evtdev->mode;
+ clockevents_set_mode(tdev->evtdev, CLOCK_EVT_MODE_SHUTDOWN);
+ }
+
+ ret = cpu_pm_enter();
+
+ /* we can not tolerate errors at this point */
+ if (ret)
+ panic("%s: cpu_pm_enter() returned %d\n", __func__, ret);
+
+ /*
+ * Swap the physical CPUs in the logical map for this logical CPU.
+ * This must be flushed to RAM as the resume code
+ * needs to access it while the caches are still disabled.
+ */
+ cpu_logical_map(this_cpu) = ib_mpidr;
+ cpu_logical_map(that_cpu) = ob_mpidr;
+ sync_cache_w(&cpu_logical_map(this_cpu));
+
+ /* Let's do the actual CPU switch. */
+ ret = cpu_suspend((unsigned long)&handshake_ptr, bL_switchpoint);
+ if (ret > 0)
+ panic("%s: cpu_suspend() returned %d\n", __func__, ret);
+
+ /* We are executing on the inbound CPU at this point */
+ mpidr = read_mpidr();
+ pr_debug("after switch: CPU %d MPIDR %#x\n", this_cpu, mpidr);
+ BUG_ON(mpidr != ib_mpidr);
+
+ mcpm_cpu_powered_up();
+
+ ret = cpu_pm_exit();
+
+ if (tdev) {
+ clockevents_set_mode(tdev->evtdev, tdev_mode);
+ clockevents_program_event(tdev->evtdev,
+ tdev->evtdev->next_event, 1);
+ }
+
+ trace_cpu_migrate_finish(get_ns(), ib_mpidr);
+ local_fiq_enable();
+ local_irq_enable();
+
+ *handshake_ptr = 1;
+ dsb_sev();
+
+ if (ret)
+ pr_err("%s exiting with error %d\n", __func__, ret);
+ return ret;
+}
+
+struct bL_thread {
+ struct task_struct *task;
+ wait_queue_head_t wq;
+ int wanted_cluster;
+ struct completion started;
+};
+
+static struct bL_thread bL_threads[NR_CPUS];
+
+static int bL_switcher_thread(void *arg)
+{
+ struct bL_thread *t = arg;
+ struct sched_param param = { .sched_priority = 1 };
+ int cluster;
+
+ sched_setscheduler_nocheck(current, SCHED_FIFO, &param);
+ complete(&t->started);
+
+ do {
+ if (signal_pending(current))
+ flush_signals(current);
+ wait_event_interruptible(t->wq,
+ t->wanted_cluster != -1 ||
+ kthread_should_stop());
+ cluster = xchg(&t->wanted_cluster, -1);
+ if (cluster != -1)
+ bL_switch_to(cluster);
+ } while (!kthread_should_stop());
+
+ return 0;
+}
+
+static struct task_struct * bL_switcher_thread_create(int cpu, void *arg)
+{
+ struct task_struct *task;
+
+ task = kthread_create_on_node(bL_switcher_thread, arg,
+ cpu_to_node(cpu), "kswitcher_%d", cpu);
+ if (!IS_ERR(task)) {
+ kthread_bind(task, cpu);
+ wake_up_process(task);
+ } else
+ pr_err("%s failed for CPU %d\n", __func__, cpu);
+ return task;
+}
+
+/*
+ * bL_switch_request - Switch to a specific cluster for the given CPU
+ *
+ * @cpu: the CPU to switch
+ * @new_cluster_id: the ID of the cluster to switch to.
+ *
+ * This function causes a cluster switch on the given CPU by waking up
+ * the appropriate switcher thread. This function may or may not return
+ * before the switch has occurred.
+ */
+int bL_switch_request(unsigned int cpu, unsigned int new_cluster_id)
+{
+ struct bL_thread *t;
+
+ if (cpu >= ARRAY_SIZE(bL_threads)) {
+ pr_err("%s: cpu %d out of bounds\n", __func__, cpu);
+ return -EINVAL;
+ }
+
+ t = &bL_threads[cpu];
+ if (IS_ERR(t->task))
+ return PTR_ERR(t->task);
+ if (!t->task)
+ return -ESRCH;
+
+ t->wanted_cluster = new_cluster_id;
+ wake_up(&t->wq);
+ return 0;
+}
+
+EXPORT_SYMBOL_GPL(bL_switch_request);
+
+/*
+ * Activation and configuration code.
+ */
+
+static DEFINE_MUTEX(bL_switcher_activation_lock);
+static BLOCKING_NOTIFIER_HEAD(bL_activation_notifier);
+static unsigned int bL_switcher_active;
+static unsigned int bL_switcher_cpu_original_cluster[NR_CPUS];
+static cpumask_t bL_switcher_removed_logical_cpus;
+
+int bL_switcher_register_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&bL_activation_notifier, nb);
+}
+EXPORT_SYMBOL_GPL(bL_switcher_register_notifier);
+
+int bL_switcher_unregister_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&bL_activation_notifier, nb);
+}
+EXPORT_SYMBOL_GPL(bL_switcher_unregister_notifier);
+
+static int bL_activation_notify(unsigned long val)
+{
+ int ret;
+
+ ret = blocking_notifier_call_chain(&bL_activation_notifier, val, NULL);
+ if (ret & NOTIFY_STOP_MASK)
+ pr_err("%s: notifier chain failed with status 0x%x\n",
+ __func__, ret);
+ return notifier_to_errno(ret);
+}
+
+static void bL_switcher_restore_cpus(void)
+{
+ int i;
+
+ for_each_cpu(i, &bL_switcher_removed_logical_cpus)
+ cpu_up(i);
+}
+
+static int bL_switcher_halve_cpus(void)
+{
+ int i, j, cluster_0, gic_id, ret;
+ unsigned int cpu, cluster, mask;
+ cpumask_t available_cpus;
+
+ /* First pass to validate what we have */
+ mask = 0;
+ for_each_online_cpu(i) {
+ cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0);
+ cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1);
+ if (cluster >= 2) {
+ pr_err("%s: only dual cluster systems are supported\n", __func__);
+ return -EINVAL;
+ }
+ if (WARN_ON(cpu >= MAX_CPUS_PER_CLUSTER))
+ return -EINVAL;
+ mask |= (1 << cluster);
+ }
+ if (mask != 3) {
+ pr_err("%s: no CPU pairing possible\n", __func__);
+ return -EINVAL;
+ }
+
+ /*
+ * Now let's do the pairing. We match each CPU with another CPU
+ * from a different cluster. To get a uniform scheduling behavior
+ * without fiddling with CPU topology and compute capacity data,
+ * we'll use logical CPUs initially belonging to the same cluster.
+ */
+ memset(bL_switcher_cpu_pairing, -1, sizeof(bL_switcher_cpu_pairing));
+ cpumask_copy(&available_cpus, cpu_online_mask);
+ cluster_0 = -1;
+ for_each_cpu(i, &available_cpus) {
+ int match = -1;
+ cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1);
+ if (cluster_0 == -1)
+ cluster_0 = cluster;
+ if (cluster != cluster_0)
+ continue;
+ cpumask_clear_cpu(i, &available_cpus);
+ for_each_cpu(j, &available_cpus) {
+ cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(j), 1);
+ /*
+ * Let's remember the last match to create "odd"
+ * pairing on purpose in order for other code not
+ * to assume any relation between physical and
+ * logical CPU numbers.
+ */
+ if (cluster != cluster_0)
+ match = j;
+ }
+ if (match != -1) {
+ bL_switcher_cpu_pairing[i] = match;
+ cpumask_clear_cpu(match, &available_cpus);
+ pr_info("CPU%d paired with CPU%d\n", i, match);
+ }
+ }
+
+ /*
+ * Now we disable the unwanted CPUs i.e. everything that has no
+ * pairing information (that includes the pairing counterparts).
+ */
+ cpumask_clear(&bL_switcher_removed_logical_cpus);
+ for_each_online_cpu(i) {
+ cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0);
+ cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1);
+
+ /* Let's take note of the GIC ID for this CPU */
+ gic_id = gic_get_cpu_id(i);
+ if (gic_id < 0) {
+ pr_err("%s: bad GIC ID for CPU %d\n", __func__, i);
+ bL_switcher_restore_cpus();
+ return -EINVAL;
+ }
+ bL_gic_id[cpu][cluster] = gic_id;
+ pr_info("GIC ID for CPU %u cluster %u is %u\n",
+ cpu, cluster, gic_id);
+
+ if (bL_switcher_cpu_pairing[i] != -1) {
+ bL_switcher_cpu_original_cluster[i] = cluster;
+ continue;
+ }
+
+ ret = cpu_down(i);
+ if (ret) {
+ bL_switcher_restore_cpus();
+ return ret;
+ }
+ cpumask_set_cpu(i, &bL_switcher_removed_logical_cpus);
+ }
+
+ return 0;
+}
+
+/* Determine the logical CPU a given physical CPU is grouped on. */
+int bL_switcher_get_logical_index(u32 mpidr)
+{
+ int cpu;
+
+ if (!bL_switcher_active)
+ return -EUNATCH;
+
+ mpidr &= MPIDR_HWID_BITMASK;
+ for_each_online_cpu(cpu) {
+ int pairing = bL_switcher_cpu_pairing[cpu];
+ if (pairing == -1)
+ continue;
+ if ((mpidr == cpu_logical_map(cpu)) ||
+ (mpidr == cpu_logical_map(pairing)))
+ return cpu;
+ }
+ return -EINVAL;
+}
+
+static void bL_switcher_trace_trigger_cpu(void *__always_unused info)
+{
+ trace_cpu_migrate_current(get_ns(), read_mpidr());
+}
+
+int bL_switcher_trace_trigger(void)
+{
+ int ret;
+
+ preempt_disable();
+
+ bL_switcher_trace_trigger_cpu(NULL);
+ ret = smp_call_function(bL_switcher_trace_trigger_cpu, NULL, true);
+
+ preempt_enable();
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(bL_switcher_trace_trigger);
+
+static int bL_switcher_enable(void)
+{
+ int cpu, ret;
+
+ mutex_lock(&bL_switcher_activation_lock);
+ cpu_hotplug_driver_lock();
+ if (bL_switcher_active) {
+ cpu_hotplug_driver_unlock();
+ mutex_unlock(&bL_switcher_activation_lock);
+ return 0;
+ }
+
+ pr_info("big.LITTLE switcher initializing\n");
+
+ ret = bL_activation_notify(BL_NOTIFY_PRE_ENABLE);
+ if (ret)
+ goto error;
+
+ ret = bL_switcher_halve_cpus();
+ if (ret)
+ goto error;
+
+ bL_switcher_trace_trigger();
+
+ for_each_online_cpu(cpu) {
+ struct bL_thread *t = &bL_threads[cpu];
+ init_waitqueue_head(&t->wq);
+ init_completion(&t->started);
+ t->wanted_cluster = -1;
+ t->task = bL_switcher_thread_create(cpu, t);
+ }
+
+ bL_switcher_active = 1;
+ bL_activation_notify(BL_NOTIFY_POST_ENABLE);
+ pr_info("big.LITTLE switcher initialized\n");
+ goto out;
+
+error:
+ pr_warning("big.LITTLE switcher initialization failed\n");
+ bL_activation_notify(BL_NOTIFY_POST_DISABLE);
+
+out:
+ cpu_hotplug_driver_unlock();
+ mutex_unlock(&bL_switcher_activation_lock);
+ return ret;
+}
+
+#ifdef CONFIG_SYSFS
+
+static void bL_switcher_disable(void)
+{
+ unsigned int cpu, cluster;
+ struct bL_thread *t;
+ struct task_struct *task;
+
+ mutex_lock(&bL_switcher_activation_lock);
+ cpu_hotplug_driver_lock();
+
+ if (!bL_switcher_active)
+ goto out;
+
+ if (bL_activation_notify(BL_NOTIFY_PRE_DISABLE) != 0) {
+ bL_activation_notify(BL_NOTIFY_POST_ENABLE);
+ goto out;
+ }
+
+ bL_switcher_active = 0;
+
+ /*
+ * To deactivate the switcher, we must shut down the switcher
+ * threads to prevent any other requests from being accepted.
+ * Then, if the final cluster for given logical CPU is not the
+ * same as the original one, we'll recreate a switcher thread
+ * just for the purpose of switching the CPU back without any
+ * possibility for interference from external requests.
+ */
+ for_each_online_cpu(cpu) {
+ t = &bL_threads[cpu];
+ task = t->task;
+ t->task = NULL;
+ if (!task || IS_ERR(task))
+ continue;
+ kthread_stop(task);
+ /* no more switch may happen on this CPU at this point */
+ cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1);
+ if (cluster == bL_switcher_cpu_original_cluster[cpu])
+ continue;
+ init_completion(&t->started);
+ t->wanted_cluster = bL_switcher_cpu_original_cluster[cpu];
+ task = bL_switcher_thread_create(cpu, t);
+ if (!IS_ERR(task)) {
+ wait_for_completion(&t->started);
+ kthread_stop(task);
+ cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1);
+ if (cluster == bL_switcher_cpu_original_cluster[cpu])
+ continue;
+ }
+ /* If execution gets here, we're in trouble. */
+ pr_crit("%s: unable to restore original cluster for CPU %d\n",
+ __func__, cpu);
+ pr_crit("%s: CPU %d can't be restored\n",
+ __func__, bL_switcher_cpu_pairing[cpu]);
+ cpumask_clear_cpu(bL_switcher_cpu_pairing[cpu],
+ &bL_switcher_removed_logical_cpus);
+ }
+
+ bL_switcher_restore_cpus();
+ bL_switcher_trace_trigger();
+
+ bL_activation_notify(BL_NOTIFY_POST_DISABLE);
+
+out:
+ cpu_hotplug_driver_unlock();
+ mutex_unlock(&bL_switcher_activation_lock);
+}
+
+static ssize_t bL_switcher_active_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%u\n", bL_switcher_active);
+}
+
+static ssize_t bL_switcher_active_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ int ret;
+
+ switch (buf[0]) {
+ case '0':
+ bL_switcher_disable();
+ ret = 0;
+ break;
+ case '1':
+ ret = bL_switcher_enable();
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return (ret >= 0) ? count : ret;
+}
+
+static ssize_t bL_switcher_trace_trigger_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ int ret = bL_switcher_trace_trigger();
+
+ return ret ? ret : count;
+}
+
+static struct kobj_attribute bL_switcher_active_attr =
+ __ATTR(active, 0644, bL_switcher_active_show, bL_switcher_active_store);
+
+static struct kobj_attribute bL_switcher_trace_trigger_attr =
+ __ATTR(trace_trigger, 0200, NULL, bL_switcher_trace_trigger_store);
+
+static struct attribute *bL_switcher_attrs[] = {
+ &bL_switcher_active_attr.attr,
+ &bL_switcher_trace_trigger_attr.attr,
+ NULL,
+};
+
+static struct attribute_group bL_switcher_attr_group = {
+ .attrs = bL_switcher_attrs,
+};
+
+static struct kobject *bL_switcher_kobj;
+
+static int __init bL_switcher_sysfs_init(void)
+{
+ int ret;
+
+ bL_switcher_kobj = kobject_create_and_add("bL_switcher", kernel_kobj);
+ if (!bL_switcher_kobj)
+ return -ENOMEM;
+ ret = sysfs_create_group(bL_switcher_kobj, &bL_switcher_attr_group);
+ if (ret)
+ kobject_put(bL_switcher_kobj);
+ return ret;
+}
+
+#endif /* CONFIG_SYSFS */
+
+bool bL_switcher_get_enabled(void)
+{
+ mutex_lock(&bL_switcher_activation_lock);
+
+ return bL_switcher_active;
+}
+EXPORT_SYMBOL_GPL(bL_switcher_get_enabled);
+
+void bL_switcher_put_enabled(void)
+{
+ mutex_unlock(&bL_switcher_activation_lock);
+}
+EXPORT_SYMBOL_GPL(bL_switcher_put_enabled);
+
+/*
+ * Veto any CPU hotplug operation while the switcher is active.
+ * We're just not ready to deal with that given the trickery involved.
+ */
+static int bL_switcher_hotplug_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ switch (action) {
+ case CPU_UP_PREPARE:
+ case CPU_DOWN_PREPARE:
+ if (bL_switcher_active)
+ return NOTIFY_BAD;
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block bL_switcher_hotplug_notifier =
+ { &bL_switcher_hotplug_callback, NULL, 0 };
+
+static bool no_bL_switcher;
+core_param(no_bL_switcher, no_bL_switcher, bool, 0644);
+
+static int __init bL_switcher_init(void)
+{
+ int ret;
+
+ if (MAX_NR_CLUSTERS != 2) {
+ pr_err("%s: only dual cluster systems are supported\n", __func__);
+ return -EINVAL;
+ }
+
+ register_cpu_notifier(&bL_switcher_hotplug_notifier);
+
+ if (!no_bL_switcher) {
+ ret = bL_switcher_enable();
+ if (ret)
+ return ret;
+ }
+
+#ifdef CONFIG_SYSFS
+ ret = bL_switcher_sysfs_init();
+ if (ret)
+ pr_err("%s: unable to create sysfs entry\n", __func__);
+#endif
+
+ return 0;
+}
+
+late_initcall(bL_switcher_init);
diff --git a/arch/arm/common/bL_switcher_dummy_if.c b/arch/arm/common/bL_switcher_dummy_if.c
new file mode 100644
index 00000000000..5e2dd197e72
--- /dev/null
+++ b/arch/arm/common/bL_switcher_dummy_if.c
@@ -0,0 +1,71 @@
+/*
+ * arch/arm/common/bL_switcher_dummy_if.c -- b.L switcher dummy interface
+ *
+ * Created by: Nicolas Pitre, November 2012
+ * Copyright: (C) 2012 Linaro Limited
+ *
+ * Dummy interface to user space for debugging purpose only.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <asm/uaccess.h>
+#include <asm/bL_switcher.h>
+
+static ssize_t bL_switcher_write(struct file *file, const char __user *buf,
+ size_t len, loff_t *pos)
+{
+ unsigned char val[3];
+ unsigned int cpu, cluster;
+ int ret;
+
+ pr_debug("%s\n", __func__);
+
+ if (len < 3)
+ return -EINVAL;
+
+ if (copy_from_user(val, buf, 3))
+ return -EFAULT;
+
+ /* format: <cpu#>,<cluster#> */
+ if (val[0] < '0' || val[0] > '4' ||
+ val[1] != ',' ||
+ val[2] < '0' || val[2] > '1')
+ return -EINVAL;
+
+ cpu = val[0] - '0';
+ cluster = val[2] - '0';
+ ret = bL_switch_request(cpu, cluster);
+
+ return ret ? : len;
+}
+
+static const struct file_operations bL_switcher_fops = {
+ .write = bL_switcher_write,
+ .owner = THIS_MODULE,
+};
+
+static struct miscdevice bL_switcher_device = {
+ MISC_DYNAMIC_MINOR,
+ "b.L_switcher",
+ &bL_switcher_fops
+};
+
+static int __init bL_switcher_dummy_if_init(void)
+{
+ return misc_register(&bL_switcher_device);
+}
+
+static void __exit bL_switcher_dummy_if_exit(void)
+{
+ misc_deregister(&bL_switcher_device);
+}
+
+module_init(bL_switcher_dummy_if_init);
+module_exit(bL_switcher_dummy_if_exit);
diff --git a/arch/arm/common/mcpm_entry.c b/arch/arm/common/mcpm_entry.c
new file mode 100644
index 00000000000..4a2b32fd53a
--- /dev/null
+++ b/arch/arm/common/mcpm_entry.c
@@ -0,0 +1,275 @@
+/*
+ * arch/arm/common/mcpm_entry.c -- entry point for multi-cluster PM
+ *
+ * Created by: Nicolas Pitre, March 2012
+ * Copyright: (C) 2012-2013 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/irqflags.h>
+
+#include <asm/mcpm.h>
+#include <asm/cacheflush.h>
+#include <asm/idmap.h>
+#include <asm/cputype.h>
+
+extern unsigned long mcpm_entry_vectors[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER];
+
+void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr)
+{
+ unsigned long val = ptr ? virt_to_phys(ptr) : 0;
+ mcpm_entry_vectors[cluster][cpu] = val;
+ sync_cache_w(&mcpm_entry_vectors[cluster][cpu]);
+}
+
+extern unsigned long mcpm_entry_early_pokes[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER][2];
+
+void mcpm_set_early_poke(unsigned cpu, unsigned cluster,
+ unsigned long poke_phys_addr, unsigned long poke_val)
+{
+ unsigned long *poke = &mcpm_entry_early_pokes[cluster][cpu][0];
+ poke[0] = poke_phys_addr;
+ poke[1] = poke_val;
+ __cpuc_flush_dcache_area((void *)poke, 8);
+ outer_clean_range(__pa(poke), __pa(poke + 2));
+}
+
+static const struct mcpm_platform_ops *platform_ops;
+
+int __init mcpm_platform_register(const struct mcpm_platform_ops *ops)
+{
+ if (platform_ops)
+ return -EBUSY;
+ platform_ops = ops;
+ return 0;
+}
+
+int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster)
+{
+ if (!platform_ops)
+ return -EUNATCH; /* try not to shadow power_up errors */
+ might_sleep();
+ return platform_ops->power_up(cpu, cluster);
+}
+
+typedef void (*phys_reset_t)(unsigned long);
+
+void mcpm_cpu_power_down(void)
+{
+ phys_reset_t phys_reset;
+
+ BUG_ON(!platform_ops);
+ BUG_ON(!irqs_disabled());
+
+ /*
+ * Do this before calling into the power_down method,
+ * as it might not always be safe to do afterwards.
+ */
+ setup_mm_for_reboot();
+
+ platform_ops->power_down();
+
+ /*
+ * It is possible for a power_up request to happen concurrently
+ * with a power_down request for the same CPU. In this case the
+ * power_down method might not be able to actually enter a
+ * powered down state with the WFI instruction if the power_up
+ * method has removed the required reset condition. The
+ * power_down method is then allowed to return. We must perform
+ * a re-entry in the kernel as if the power_up method just had
+ * deasserted reset on the CPU.
+ *
+ * To simplify race issues, the platform specific implementation
+ * must accommodate for the possibility of unordered calls to
+ * power_down and power_up with a usage count. Therefore, if a
+ * call to power_up is issued for a CPU that is not down, then
+ * the next call to power_down must not attempt a full shutdown
+ * but only do the minimum (normally disabling L1 cache and CPU
+ * coherency) and return just as if a concurrent power_up request
+ * had happened as described above.
+ */
+
+ phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
+ phys_reset(virt_to_phys(mcpm_entry_point));
+
+ /* should never get here */
+ BUG();
+}
+
+void mcpm_cpu_suspend(u64 expected_residency)
+{
+ phys_reset_t phys_reset;
+
+ BUG_ON(!platform_ops);
+ BUG_ON(!irqs_disabled());
+
+ /* Very similar to mcpm_cpu_power_down() */
+ setup_mm_for_reboot();
+ platform_ops->suspend(expected_residency);
+ phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
+ phys_reset(virt_to_phys(mcpm_entry_point));
+ BUG();
+}
+
+int mcpm_cpu_powered_up(void)
+{
+ if (!platform_ops)
+ return -EUNATCH;
+ if (platform_ops->powered_up)
+ platform_ops->powered_up();
+ return 0;
+}
+
+struct sync_struct mcpm_sync;
+
+/*
+ * __mcpm_cpu_going_down: Indicates that the cpu is being torn down.
+ * This must be called at the point of committing to teardown of a CPU.
+ * The CPU cache (SCTRL.C bit) is expected to still be active.
+ */
+void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster)
+{
+ mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_GOING_DOWN;
+ sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu);
+}
+
+/*
+ * __mcpm_cpu_down: Indicates that cpu teardown is complete and that the
+ * cluster can be torn down without disrupting this CPU.
+ * To avoid deadlocks, this must be called before a CPU is powered down.
+ * The CPU cache (SCTRL.C bit) is expected to be off.
+ * However L2 cache might or might not be active.
+ */
+void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster)
+{
+ dmb();
+ mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN;
+ sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu);
+ dsb_sev();
+}
+
+/*
+ * __mcpm_outbound_leave_critical: Leave the cluster teardown critical section.
+ * @state: the final state of the cluster:
+ * CLUSTER_UP: no destructive teardown was done and the cluster has been
+ * restored to the previous state (CPU cache still active); or
+ * CLUSTER_DOWN: the cluster has been torn-down, ready for power-off
+ * (CPU cache disabled, L2 cache either enabled or disabled).
+ */
+void __mcpm_outbound_leave_critical(unsigned int cluster, int state)
+{
+ dmb();
+ mcpm_sync.clusters[cluster].cluster = state;
+ sync_cache_w(&mcpm_sync.clusters[cluster].cluster);
+ dsb_sev();
+}
+
+/*
+ * __mcpm_outbound_enter_critical: Enter the cluster teardown critical section.
+ * This function should be called by the last man, after local CPU teardown
+ * is complete. CPU cache expected to be active.
+ *
+ * Returns:
+ * false: the critical section was not entered because an inbound CPU was
+ * observed, or the cluster is already being set up;
+ * true: the critical section was entered: it is now safe to tear down the
+ * cluster.
+ */
+bool __mcpm_outbound_enter_critical(unsigned int cpu, unsigned int cluster)
+{
+ unsigned int i;
+ struct mcpm_sync_struct *c = &mcpm_sync.clusters[cluster];
+
+ /* Warn inbound CPUs that the cluster is being torn down: */
+ c->cluster = CLUSTER_GOING_DOWN;
+ sync_cache_w(&c->cluster);
+
+ /* Back out if the inbound cluster is already in the critical region: */
+ sync_cache_r(&c->inbound);
+ if (c->inbound == INBOUND_COMING_UP)
+ goto abort;
+
+ /*
+ * Wait for all CPUs to get out of the GOING_DOWN state, so that local
+ * teardown is complete on each CPU before tearing down the cluster.
+ *
+ * If any CPU has been woken up again from the DOWN state, then we
+ * shouldn't be taking the cluster down at all: abort in that case.
+ */
+ sync_cache_r(&c->cpus);
+ for (i = 0; i < MAX_CPUS_PER_CLUSTER; i++) {
+ int cpustate;
+
+ if (i == cpu)
+ continue;
+
+ while (1) {
+ cpustate = c->cpus[i].cpu;
+ if (cpustate != CPU_GOING_DOWN)
+ break;
+
+ wfe();
+ sync_cache_r(&c->cpus[i].cpu);
+ }
+
+ switch (cpustate) {
+ case CPU_DOWN:
+ continue;
+
+ default:
+ goto abort;
+ }
+ }
+
+ return true;
+
+abort:
+ __mcpm_outbound_leave_critical(cluster, CLUSTER_UP);
+ return false;
+}
+
+int __mcpm_cluster_state(unsigned int cluster)
+{
+ sync_cache_r(&mcpm_sync.clusters[cluster].cluster);
+ return mcpm_sync.clusters[cluster].cluster;
+}
+
+extern unsigned long mcpm_power_up_setup_phys;
+
+int __init mcpm_sync_init(
+ void (*power_up_setup)(unsigned int affinity_level))
+{
+ unsigned int i, j, mpidr, this_cluster;
+
+ BUILD_BUG_ON(MCPM_SYNC_CLUSTER_SIZE * MAX_NR_CLUSTERS != sizeof mcpm_sync);
+ BUG_ON((unsigned long)&mcpm_sync & (__CACHE_WRITEBACK_GRANULE - 1));
+
+ /*
+ * Set initial CPU and cluster states.
+ * Only one cluster is assumed to be active at this point.
+ */
+ for (i = 0; i < MAX_NR_CLUSTERS; i++) {
+ mcpm_sync.clusters[i].cluster = CLUSTER_DOWN;
+ mcpm_sync.clusters[i].inbound = INBOUND_NOT_COMING_UP;
+ for (j = 0; j < MAX_CPUS_PER_CLUSTER; j++)
+ mcpm_sync.clusters[i].cpus[j].cpu = CPU_DOWN;
+ }
+ mpidr = read_cpuid_mpidr();
+ this_cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+ for_each_online_cpu(i)
+ mcpm_sync.clusters[this_cluster].cpus[i].cpu = CPU_UP;
+ mcpm_sync.clusters[this_cluster].cluster = CLUSTER_UP;
+ sync_cache_w(&mcpm_sync);
+
+ if (power_up_setup) {
+ mcpm_power_up_setup_phys = virt_to_phys(power_up_setup);
+ sync_cache_w(&mcpm_power_up_setup_phys);
+ }
+
+ return 0;
+}
diff --git a/arch/arm/common/mcpm_head.S b/arch/arm/common/mcpm_head.S
new file mode 100644
index 00000000000..057e9c5a9e1
--- /dev/null
+++ b/arch/arm/common/mcpm_head.S
@@ -0,0 +1,231 @@
+/*
+ * arch/arm/common/mcpm_head.S -- kernel entry point for multi-cluster PM
+ *
+ * Created by: Nicolas Pitre, March 2012
+ * Copyright: (C) 2012-2013 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *
+ * Refer to Documentation/arm/cluster-pm-race-avoidance.txt
+ * for details of the synchronisation algorithms used here.
+ */
+
+#include <linux/linkage.h>
+#include <asm/mcpm.h>
+
+#include "vlock.h"
+
+.if MCPM_SYNC_CLUSTER_CPUS
+.error "cpus must be the first member of struct mcpm_sync_struct"
+.endif
+
+ .macro pr_dbg string
+#if defined(CONFIG_DEBUG_LL) && defined(DEBUG)
+ b 1901f
+1902: .asciz "CPU"
+1903: .asciz " cluster"
+1904: .asciz ": \string"
+ .align
+1901: adr r0, 1902b
+ bl printascii
+ mov r0, r9
+ bl printhex8
+ adr r0, 1903b
+ bl printascii
+ mov r0, r10
+ bl printhex8
+ adr r0, 1904b
+ bl printascii
+#endif
+ .endm
+
+ .arm
+ .align
+
+ENTRY(mcpm_entry_point)
+
+ THUMB( adr r12, BSYM(1f) )
+ THUMB( bx r12 )
+ THUMB( .thumb )
+1:
+ mrc p15, 0, r0, c0, c0, 5 @ MPIDR
+ ubfx r9, r0, #0, #8 @ r9 = cpu
+ ubfx r10, r0, #8, #8 @ r10 = cluster
+ mov r3, #MAX_CPUS_PER_CLUSTER
+ mla r4, r3, r10, r9 @ r4 = canonical CPU index
+ cmp r4, #(MAX_CPUS_PER_CLUSTER * MAX_NR_CLUSTERS)
+ blo 2f
+
+ /* We didn't expect this CPU. Try to cheaply make it quiet. */
+1: wfi
+ wfe
+ b 1b
+
+2: pr_dbg "kernel mcpm_entry_point\n"
+
+ /*
+ * MMU is off so we need to get to various variables in a
+ * position independent way.
+ */
+ adr r5, 3f
+ ldmia r5, {r0, r6, r7, r8, r11}
+ add r0, r5, r0 @ r0 = mcpm_entry_early_pokes
+ add r6, r5, r6 @ r6 = mcpm_entry_vectors
+ ldr r7, [r5, r7] @ r7 = mcpm_power_up_setup_phys
+ add r8, r5, r8 @ r8 = mcpm_sync
+ add r11, r5, r11 @ r11 = first_man_locks
+
+ @ Perform an early poke, if any
+ add r0, r0, r4, lsl #3
+ ldmia r0, {r0, r1}
+ teq r0, #0
+ strne r1, [r0]
+
+ mov r0, #MCPM_SYNC_CLUSTER_SIZE
+ mla r8, r0, r10, r8 @ r8 = sync cluster base
+
+ @ Signal that this CPU is coming UP:
+ mov r0, #CPU_COMING_UP
+ mov r5, #MCPM_SYNC_CPU_SIZE
+ mla r5, r9, r5, r8 @ r5 = sync cpu address
+ strb r0, [r5]
+
+ @ At this point, the cluster cannot unexpectedly enter the GOING_DOWN
+ @ state, because there is at least one active CPU (this CPU).
+
+ mov r0, #VLOCK_SIZE
+ mla r11, r0, r10, r11 @ r11 = cluster first man lock
+ mov r0, r11
+ mov r1, r9 @ cpu
+ bl vlock_trylock @ implies DMB
+
+ cmp r0, #0 @ failed to get the lock?
+ bne mcpm_setup_wait @ wait for cluster setup if so
+
+ ldrb r0, [r8, #MCPM_SYNC_CLUSTER_CLUSTER]
+ cmp r0, #CLUSTER_UP @ cluster already up?
+ bne mcpm_setup @ if not, set up the cluster
+
+ @ Otherwise, release the first man lock and skip setup:
+ mov r0, r11
+ bl vlock_unlock
+ b mcpm_setup_complete
+
+mcpm_setup:
+ @ Control dependency implies strb not observable before previous ldrb.
+
+ @ Signal that the cluster is being brought up:
+ mov r0, #INBOUND_COMING_UP
+ strb r0, [r8, #MCPM_SYNC_CLUSTER_INBOUND]
+ dmb
+
+ @ Any CPU trying to take the cluster into CLUSTER_GOING_DOWN from this
+ @ point onwards will observe INBOUND_COMING_UP and abort.
+
+ @ Wait for any previously-pending cluster teardown operations to abort
+ @ or complete:
+mcpm_teardown_wait:
+ ldrb r0, [r8, #MCPM_SYNC_CLUSTER_CLUSTER]
+ cmp r0, #CLUSTER_GOING_DOWN
+ bne first_man_setup
+ wfe
+ b mcpm_teardown_wait
+
+first_man_setup:
+ dmb
+
+ @ If the outbound gave up before teardown started, skip cluster setup:
+
+ cmp r0, #CLUSTER_UP
+ beq mcpm_setup_leave
+
+ @ power_up_setup is now responsible for setting up the cluster:
+
+ cmp r7, #0
+ mov r0, #1 @ second (cluster) affinity level
+ blxne r7 @ Call power_up_setup if defined
+ dmb
+
+ mov r0, #CLUSTER_UP
+ strb r0, [r8, #MCPM_SYNC_CLUSTER_CLUSTER]
+ dmb
+
+mcpm_setup_leave:
+ @ Leave the cluster setup critical section:
+
+ mov r0, #INBOUND_NOT_COMING_UP
+ strb r0, [r8, #MCPM_SYNC_CLUSTER_INBOUND]
+ dsb
+ sev
+
+ mov r0, r11
+ bl vlock_unlock @ implies DMB
+ b mcpm_setup_complete
+
+ @ In the contended case, non-first men wait here for cluster setup
+ @ to complete:
+mcpm_setup_wait:
+ ldrb r0, [r8, #MCPM_SYNC_CLUSTER_CLUSTER]
+ cmp r0, #CLUSTER_UP
+ wfene
+ bne mcpm_setup_wait
+ dmb
+
+mcpm_setup_complete:
+ @ If a platform-specific CPU setup hook is needed, it is
+ @ called from here.
+
+ cmp r7, #0
+ mov r0, #0 @ first (CPU) affinity level
+ blxne r7 @ Call power_up_setup if defined
+ dmb
+
+ @ Mark the CPU as up:
+
+ mov r0, #CPU_UP
+ strb r0, [r5]
+
+ @ Observability order of CPU_UP and opening of the gate does not matter.
+
+mcpm_entry_gated:
+ ldr r5, [r6, r4, lsl #2] @ r5 = CPU entry vector
+ cmp r5, #0
+ wfeeq
+ beq mcpm_entry_gated
+ dmb
+
+ pr_dbg "released\n"
+ bx r5
+
+ .align 2
+
+3: .word mcpm_entry_early_pokes - .
+ .word mcpm_entry_vectors - 3b
+ .word mcpm_power_up_setup_phys - 3b
+ .word mcpm_sync - 3b
+ .word first_man_locks - 3b
+
+ENDPROC(mcpm_entry_point)
+
+ .bss
+
+ .align CACHE_WRITEBACK_ORDER
+ .type first_man_locks, #object
+first_man_locks:
+ .space VLOCK_SIZE * MAX_NR_CLUSTERS
+ .align CACHE_WRITEBACK_ORDER
+
+ .type mcpm_entry_vectors, #object
+ENTRY(mcpm_entry_vectors)
+ .space 4 * MAX_NR_CLUSTERS * MAX_CPUS_PER_CLUSTER
+
+ .type mcpm_entry_early_pokes, #object
+ENTRY(mcpm_entry_early_pokes)
+ .space 8 * MAX_NR_CLUSTERS * MAX_CPUS_PER_CLUSTER
+
+ .type mcpm_power_up_setup_phys, #object
+ENTRY(mcpm_power_up_setup_phys)
+ .space 4 @ set by mcpm_sync_init()
diff --git a/arch/arm/common/mcpm_platsmp.c b/arch/arm/common/mcpm_platsmp.c
new file mode 100644
index 00000000000..3caed0db698
--- /dev/null
+++ b/arch/arm/common/mcpm_platsmp.c
@@ -0,0 +1,89 @@
+/*
+ * linux/arch/arm/mach-vexpress/mcpm_platsmp.c
+ *
+ * Created by: Nicolas Pitre, November 2012
+ * Copyright: (C) 2012-2013 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Code to handle secondary CPU bringup and hotplug for the cluster power API.
+ */
+
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+
+#include <asm/mcpm.h>
+#include <asm/smp.h>
+#include <asm/smp_plat.h>
+
+static void __init simple_smp_init_cpus(void)
+{
+}
+
+static int __cpuinit mcpm_boot_secondary(unsigned int cpu, struct task_struct *idle)
+{
+ unsigned int mpidr, pcpu, pcluster, ret;
+ extern void secondary_startup(void);
+
+ mpidr = cpu_logical_map(cpu);
+ pcpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+ pcluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+ pr_debug("%s: logical CPU %d is physical CPU %d cluster %d\n",
+ __func__, cpu, pcpu, pcluster);
+
+ mcpm_set_entry_vector(pcpu, pcluster, NULL);
+ ret = mcpm_cpu_power_up(pcpu, pcluster);
+ if (ret)
+ return ret;
+ mcpm_set_entry_vector(pcpu, pcluster, secondary_startup);
+ arch_send_wakeup_ipi_mask(cpumask_of(cpu));
+ dsb_sev();
+ return 0;
+}
+
+static void __cpuinit mcpm_secondary_init(unsigned int cpu)
+{
+ mcpm_cpu_powered_up();
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+static int mcpm_cpu_disable(unsigned int cpu)
+{
+ /*
+ * We assume all CPUs may be shut down.
+ * This would be the hook to use for eventual Secure
+ * OS migration requests as described in the PSCI spec.
+ */
+ return 0;
+}
+
+static void mcpm_cpu_die(unsigned int cpu)
+{
+ unsigned int mpidr, pcpu, pcluster;
+ mpidr = read_cpuid_mpidr();
+ pcpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+ pcluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+ mcpm_set_entry_vector(pcpu, pcluster, NULL);
+ mcpm_cpu_power_down();
+}
+
+#endif
+
+static struct smp_operations __initdata mcpm_smp_ops = {
+ .smp_init_cpus = simple_smp_init_cpus,
+ .smp_boot_secondary = mcpm_boot_secondary,
+ .smp_secondary_init = mcpm_secondary_init,
+#ifdef CONFIG_HOTPLUG_CPU
+ .cpu_disable = mcpm_cpu_disable,
+ .cpu_die = mcpm_cpu_die,
+#endif
+};
+
+void __init mcpm_smp_set_ops(void)
+{
+ smp_set_ops(&mcpm_smp_ops);
+}
diff --git a/arch/arm/common/vlock.S b/arch/arm/common/vlock.S
new file mode 100644
index 00000000000..ff198583f68
--- /dev/null
+++ b/arch/arm/common/vlock.S
@@ -0,0 +1,108 @@
+/*
+ * vlock.S - simple voting lock implementation for ARM
+ *
+ * Created by: Dave Martin, 2012-08-16
+ * Copyright: (C) 2012-2013 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ * This algorithm is described in more detail in
+ * Documentation/arm/vlocks.txt.
+ */
+
+#include <linux/linkage.h>
+#include "vlock.h"
+
+/* Select different code if voting flags can fit in a single word. */
+#if VLOCK_VOTING_SIZE > 4
+#define FEW(x...)
+#define MANY(x...) x
+#else
+#define FEW(x...) x
+#define MANY(x...)
+#endif
+
+@ voting lock for first-man coordination
+
+.macro voting_begin rbase:req, rcpu:req, rscratch:req
+ mov \rscratch, #1
+ strb \rscratch, [\rbase, \rcpu]
+ dmb
+.endm
+
+.macro voting_end rbase:req, rcpu:req, rscratch:req
+ dmb
+ mov \rscratch, #0
+ strb \rscratch, [\rbase, \rcpu]
+ dsb
+ sev
+.endm
+
+/*
+ * The vlock structure must reside in Strongly-Ordered or Device memory.
+ * This implementation deliberately eliminates most of the barriers which
+ * would be required for other memory types, and assumes that independent
+ * writes to neighbouring locations within a cacheline do not interfere
+ * with one another.
+ */
+
+@ r0: lock structure base
+@ r1: CPU ID (0-based index within cluster)
+ENTRY(vlock_trylock)
+ add r1, r1, #VLOCK_VOTING_OFFSET
+
+ voting_begin r0, r1, r2
+
+ ldrb r2, [r0, #VLOCK_OWNER_OFFSET] @ check whether lock is held
+ cmp r2, #VLOCK_OWNER_NONE
+ bne trylock_fail @ fail if so
+
+ @ Control dependency implies strb not observable before previous ldrb.
+
+ strb r1, [r0, #VLOCK_OWNER_OFFSET] @ submit my vote
+
+ voting_end r0, r1, r2 @ implies DMB
+
+ @ Wait for the current round of voting to finish:
+
+ MANY( mov r3, #VLOCK_VOTING_OFFSET )
+0:
+ MANY( ldr r2, [r0, r3] )
+ FEW( ldr r2, [r0, #VLOCK_VOTING_OFFSET] )
+ cmp r2, #0
+ wfene
+ bne 0b
+ MANY( add r3, r3, #4 )
+ MANY( cmp r3, #VLOCK_VOTING_OFFSET + VLOCK_VOTING_SIZE )
+ MANY( bne 0b )
+
+ @ Check who won:
+
+ dmb
+ ldrb r2, [r0, #VLOCK_OWNER_OFFSET]
+ eor r0, r1, r2 @ zero if I won, else nonzero
+ bx lr
+
+trylock_fail:
+ voting_end r0, r1, r2
+ mov r0, #1 @ nonzero indicates that I lost
+ bx lr
+ENDPROC(vlock_trylock)
+
+@ r0: lock structure base
+ENTRY(vlock_unlock)
+ dmb
+ mov r1, #VLOCK_OWNER_NONE
+ strb r1, [r0, #VLOCK_OWNER_OFFSET]
+ dsb
+ sev
+ bx lr
+ENDPROC(vlock_unlock)
diff --git a/arch/arm/common/vlock.h b/arch/arm/common/vlock.h
new file mode 100644
index 00000000000..3b441475a59
--- /dev/null
+++ b/arch/arm/common/vlock.h
@@ -0,0 +1,29 @@
+/*
+ * vlock.h - simple voting lock implementation
+ *
+ * Created by: Dave Martin, 2012-08-16
+ * Copyright: (C) 2012-2013 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __VLOCK_H
+#define __VLOCK_H
+
+#include <asm/mcpm.h>
+
+/* Offsets and sizes are rounded to a word (4 bytes) */
+#define VLOCK_OWNER_OFFSET 0
+#define VLOCK_VOTING_OFFSET 4
+#define VLOCK_VOTING_SIZE ((MAX_CPUS_PER_CLUSTER + 3) / 4 * 4)
+#define VLOCK_SIZE (VLOCK_VOTING_OFFSET + VLOCK_VOTING_SIZE)
+#define VLOCK_OWNER_NONE 0
+
+#endif /* ! __VLOCK_H */
diff --git a/arch/arm/configs/at91sam9g45_defconfig b/arch/arm/configs/at91sam9g45_defconfig
index 606d48f3b8f..8aab786863d 100644
--- a/arch/arm/configs/at91sam9g45_defconfig
+++ b/arch/arm/configs/at91sam9g45_defconfig
@@ -173,7 +173,6 @@ CONFIG_MMC=y
# CONFIG_MMC_BLOCK_BOUNCE is not set
CONFIG_SDIO_UART=m
CONFIG_MMC_ATMELMCI=y
-CONFIG_MMC_ATMELMCI_DMA=y
CONFIG_LEDS_ATMEL_PWM=y
CONFIG_LEDS_GPIO=y
CONFIG_LEDS_TRIGGER_TIMER=y
diff --git a/arch/arm/configs/vexpress_bL_defconfig b/arch/arm/configs/vexpress_bL_defconfig
new file mode 100644
index 00000000000..0d18cbd3f7c
--- /dev/null
+++ b/arch/arm/configs/vexpress_bL_defconfig
@@ -0,0 +1,157 @@
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_CGROUPS=y
+CONFIG_CPUSETS=y
+# CONFIG_UTS_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_ARCH_VEXPRESS_CORTEX_A5_A9_ERRATA is not set
+CONFIG_ARCH_VEXPRESS_TC2=y
+CONFIG_ARCH_VEXPRESS_DCSCB=y
+CONFIG_ARM_ERRATA_720789=y
+CONFIG_PL310_ERRATA_753970=y
+CONFIG_SMP=y
+CONFIG_HAVE_ARM_ARCH_TIMER=y
+CONFIG_BIG_LITTLE=y
+CONFIG_BL_SWITCHER=y
+CONFIG_BL_SWITCHER_DUMMY_IF=y
+CONFIG_NR_CPUS=8
+CONFIG_AEABI=y
+# CONFIG_OABI_COMPAT is not set
+CONFIG_HIGHMEM=y
+CONFIG_HIGHPTE=y
+# CONFIG_COMPACTION is not set
+# CONFIG_ATAGS is not set
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_ARM_APPENDED_DTB=y
+CONFIG_ARM_ATAG_DTB_COMPAT=y
+CONFIG_CMDLINE="console=ttyAMA0,38400"
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_STAT_DETAILS=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE=y
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_ARM_VEXPRESS_BL_CPUFREQ=y
+CONFIG_CPU_IDLE=y
+CONFIG_VFP=y
+CONFIG_NEON=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_IPV6 is not set
+# CONFIG_WIRELESS is not set
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_PROC_DEVICETREE=y
+CONFIG_SCSI=y
+# CONFIG_SCSI_PROC_FS is not set
+CONFIG_BLK_DEV_SD=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_NETDEVICES=y
+CONFIG_SMSC911X=y
+# CONFIG_WLAN is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_SERIO_SERPORT is not set
+CONFIG_SERIO_AMBAKMI=y
+CONFIG_LEGACY_PTY_COUNT=16
+CONFIG_SERIAL_AMBA_PL011=y
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_SENSORS_VEXPRESS=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_VEXPRESS=y
+CONFIG_FB=y
+CONFIG_FB_ARMCLCD=y
+CONFIG_FB_ARMHDLCD=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_MIXER_OSS=y
+CONFIG_SND_PCM_OSS=y
+# CONFIG_SND_DRIVERS is not set
+CONFIG_SND_ARMAACI=y
+CONFIG_HID_DRAGONRISE=y
+CONFIG_HID_GYRATION=y
+CONFIG_HID_TWINHAN=y
+CONFIG_HID_LOGITECH_DJ=m
+CONFIG_HID_NTRIG=y
+CONFIG_HID_PANTHERLORD=y
+CONFIG_HID_PETALYNX=y
+CONFIG_HID_SAMSUNG=y
+CONFIG_HID_SONY=y
+CONFIG_HID_SUNPLUS=y
+CONFIG_HID_GREENASIA=y
+CONFIG_HID_SMARTJOYPLUS=y
+CONFIG_HID_TOPSEED=y
+CONFIG_HID_THRUSTMASTER=y
+CONFIG_HID_ZEROPLUS=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_MON=y
+CONFIG_USB_ISP1760_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_MMC=y
+CONFIG_MMC_ARMMMCI=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_PL031=y
+CONFIG_STAGING=y
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_ASHMEM=y
+CONFIG_ANDROID_LOGGER=y
+CONFIG_ANDROID_TIMED_GPIO=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_ANDROID_INTF_ALARM_DEV=y
+CONFIG_EXT4_FS=y
+CONFIG_FUSE_FS=y
+CONFIG_CUSE=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_JFFS2_FS=y
+CONFIG_CRAMFS=y
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_FTRACE is not set
+CONFIG_DEBUG_USER=y
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_HW is not set
diff --git a/arch/arm/crypto/sha1-armv4-large.S b/arch/arm/crypto/sha1-armv4-large.S
index 92c6eed7aac..99207c45ec1 100644
--- a/arch/arm/crypto/sha1-armv4-large.S
+++ b/arch/arm/crypto/sha1-armv4-large.S
@@ -195,6 +195,7 @@ ENTRY(sha1_block_data_order)
add r3,r3,r10 @ E+=F_00_19(B,C,D)
cmp r14,sp
bne .L_00_15 @ [((11+4)*5+2)*3]
+ sub sp,sp,#25*4
#if __ARM_ARCH__<7
ldrb r10,[r1,#2]
ldrb r9,[r1,#3]
@@ -290,7 +291,6 @@ ENTRY(sha1_block_data_order)
add r3,r3,r10 @ E+=F_00_19(B,C,D)
ldr r8,.LK_20_39 @ [+15+16*4]
- sub sp,sp,#25*4
cmn sp,#0 @ [+3], clear carry to denote 20_39
.L_20_39_or_60_79:
ldr r9,[r14,#15*4]
diff --git a/arch/arm/include/asm/bL_switcher.h b/arch/arm/include/asm/bL_switcher.h
new file mode 100644
index 00000000000..ebf8d9872a6
--- /dev/null
+++ b/arch/arm/include/asm/bL_switcher.h
@@ -0,0 +1,69 @@
+/*
+ * arch/arm/include/asm/bL_switcher.h
+ *
+ * Created by: Nicolas Pitre, April 2012
+ * Copyright: (C) 2012 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef ASM_BL_SWITCHER_H
+#define ASM_BL_SWITCHER_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+int bL_switch_request(unsigned int cpu, unsigned int new_cluster_id);
+
+/*
+ * Register here to be notified about runtime enabling/disabling of
+ * the switcher.
+ *
+ * The notifier chain is called with the switcher activation lock held:
+ * the switcher will not be enabled or disabled during callbacks.
+ * Callbacks must not call bL_switcher_{get,put}_enabled().
+ */
+#define BL_NOTIFY_PRE_ENABLE 0
+#define BL_NOTIFY_POST_ENABLE 1
+#define BL_NOTIFY_PRE_DISABLE 2
+#define BL_NOTIFY_POST_DISABLE 3
+
+#ifdef CONFIG_BL_SWITCHER
+
+int bL_switcher_register_notifier(struct notifier_block *nb);
+int bL_switcher_unregister_notifier(struct notifier_block *nb);
+
+/*
+ * Use these functions to temporarily prevent enabling/disabling of
+ * the switcher.
+ * bL_switcher_get_enabled() returns true if the switcher is currently
+ * enabled. Each call to bL_switcher_get_enabled() must be followed
+ * by a call to bL_switcher_put_enabled(). These functions are not
+ * recursive.
+ */
+bool bL_switcher_get_enabled(void);
+void bL_switcher_put_enabled(void);
+
+int bL_switcher_trace_trigger(void);
+int bL_switcher_get_logical_index(u32 mpidr);
+
+#else
+static inline int bL_switcher_register_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int bL_switcher_unregister_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline bool bL_switcher_get_enabled(void) { return false; }
+static inline void bL_switcher_put_enabled(void) { }
+static inline int bL_switcher_trace_trigger(void) { return 0; }
+static inline int bL_switcher_get_logical_index(u32 mpidr) { return -EUNATCH; }
+#endif /* CONFIG_BL_SWITCHER */
+
+#endif
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 4e8217b204a..1fb8d5d41fc 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -364,4 +364,79 @@ static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
flush_cache_all();
}
+/*
+ * Memory synchronization helpers for mixed cached vs non cached accesses.
+ *
+ * Some synchronization algorithms have to set states in memory with the
+ * cache enabled or disabled depending on the code path. It is crucial
+ * to always ensure proper cache maintenance to update main memory right
+ * away in that case.
+ *
+ * Any cached write must be followed by a cache clean operation.
+ * Any cached read must be preceded by a cache invalidate operation.
+ * Yet, in the read case, a cache flush i.e. atomic clean+invalidate
+ * operation is needed to avoid discarding possible concurrent writes to the
+ * accessed memory.
+ *
+ * Also, in order to prevent a cached writer from interfering with an
+ * adjacent non-cached writer, each state variable must be located to
+ * a separate cache line.
+ */
+
+/*
+ * This needs to be >= the max cache writeback size of all
+ * supported platforms included in the current kernel configuration.
+ * This is used to align state variables to their own cache lines.
+ */
+#define __CACHE_WRITEBACK_ORDER 6 /* guessed from existing platforms */
+#define __CACHE_WRITEBACK_GRANULE (1 << __CACHE_WRITEBACK_ORDER)
+
+/*
+ * There is no __cpuc_clean_dcache_area but we use it anyway for
+ * code intent clarity, and alias it to __cpuc_flush_dcache_area.
+ */
+#define __cpuc_clean_dcache_area __cpuc_flush_dcache_area
+
+/*
+ * Ensure preceding writes to *p by this CPU are visible to
+ * subsequent reads by other CPUs:
+ */
+static inline void __sync_cache_range_w(volatile void *p, size_t size)
+{
+ char *_p = (char *)p;
+
+ __cpuc_clean_dcache_area(_p, size);
+ outer_clean_range(__pa(_p), __pa(_p + size));
+}
+
+/*
+ * Ensure preceding writes to *p by other CPUs are visible to
+ * subsequent reads by this CPU. We must be careful not to
+ * discard data simultaneously written by another CPU, hence the
+ * usage of flush rather than invalidate operations.
+ */
+static inline void __sync_cache_range_r(volatile void *p, size_t size)
+{
+ char *_p = (char *)p;
+
+#ifdef CONFIG_OUTER_CACHE
+ if (outer_cache.flush_range) {
+ /*
+ * Ensure dirty data migrated from other CPUs into our cache
+ * are cleaned out safely before the outer cache is cleaned:
+ */
+ __cpuc_clean_dcache_area(_p, size);
+
+ /* Clean and invalidate stale data for *p from outer ... */
+ outer_flush_range(__pa(_p), __pa(_p + size));
+ }
+#endif
+
+ /* ... and inner cache: */
+ __cpuc_flush_dcache_area(_p, size);
+}
+
+#define sync_cache_w(ptr) __sync_cache_range_w(ptr, sizeof *(ptr))
+#define sync_cache_r(ptr) __sync_cache_range_r(ptr, sizeof *(ptr))
+
#endif
diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
index 7eb18c1d8d6..4f009c10540 100644
--- a/arch/arm/include/asm/cmpxchg.h
+++ b/arch/arm/include/asm/cmpxchg.h
@@ -233,15 +233,15 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
((__typeof__(*(ptr)))atomic64_cmpxchg(container_of((ptr), \
atomic64_t, \
counter), \
- (unsigned long)(o), \
- (unsigned long)(n)))
+ (unsigned long long)(o), \
+ (unsigned long long)(n)))
#define cmpxchg64_local(ptr, o, n) \
((__typeof__(*(ptr)))local64_cmpxchg(container_of((ptr), \
local64_t, \
a), \
- (unsigned long)(o), \
- (unsigned long)(n)))
+ (unsigned long long)(o), \
+ (unsigned long long)(n)))
#endif /* __LINUX_ARM_ARCH__ >= 6 */
diff --git a/arch/arm/include/asm/cp15.h b/arch/arm/include/asm/cp15.h
index 5ef4d8015a6..cedd3721318 100644
--- a/arch/arm/include/asm/cp15.h
+++ b/arch/arm/include/asm/cp15.h
@@ -42,6 +42,8 @@
#define vectors_high() (0)
#endif
+#ifdef CONFIG_CPU_CP15
+
extern unsigned long cr_no_alignment; /* defined in entry-armv.S */
extern unsigned long cr_alignment; /* defined in entry-armv.S */
@@ -59,6 +61,20 @@ static inline void set_cr(unsigned int val)
isb();
}
+static inline unsigned int get_auxcr(void)
+{
+ unsigned int val;
+ asm("mrc p15, 0, %0, c1, c0, 1 @ get AUXCR" : "=r" (val));
+ return val;
+}
+
+static inline void set_auxcr(unsigned int val)
+{
+ asm volatile("mcr p15, 0, %0, c1, c0, 1 @ set AUXCR"
+ : : "r" (val));
+ isb();
+}
+
#ifndef CONFIG_SMP
extern void adjust_cr(unsigned long mask, unsigned long set);
#endif
@@ -82,6 +98,18 @@ static inline void set_copro_access(unsigned int val)
isb();
}
-#endif
+#else /* ifdef CONFIG_CPU_CP15 */
+
+/*
+ * cr_alignment and cr_no_alignment are tightly coupled to cp15 (at least in the
+ * minds of the developers). Yielding 0 for machines without a cp15 (and making
+ * it read-only) is fine for most cases and saves quite some #ifdeffery.
+ */
+#define cr_no_alignment UL(0)
+#define cr_alignment UL(0)
+
+#endif /* ifdef CONFIG_CPU_CP15 / else */
+
+#endif /* ifndef __ASSEMBLY__ */
#endif
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index ad41ec2471e..7652712d1d1 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -38,6 +38,24 @@
#define MPIDR_AFFINITY_LEVEL(mpidr, level) \
((mpidr >> (MPIDR_LEVEL_BITS * level)) & MPIDR_LEVEL_MASK)
+#define ARM_CPU_IMP_ARM 0x41
+#define ARM_CPU_IMP_INTEL 0x69
+
+#define ARM_CPU_PART_ARM1136 0xB360
+#define ARM_CPU_PART_ARM1156 0xB560
+#define ARM_CPU_PART_ARM1176 0xB760
+#define ARM_CPU_PART_ARM11MPCORE 0xB020
+#define ARM_CPU_PART_CORTEX_A8 0xC080
+#define ARM_CPU_PART_CORTEX_A9 0xC090
+#define ARM_CPU_PART_CORTEX_A5 0xC050
+#define ARM_CPU_PART_CORTEX_A15 0xC0F0
+#define ARM_CPU_PART_CORTEX_A7 0xC070
+
+#define ARM_CPU_XSCALE_ARCH_MASK 0xe000
+#define ARM_CPU_XSCALE_ARCH_V1 0x2000
+#define ARM_CPU_XSCALE_ARCH_V2 0x4000
+#define ARM_CPU_XSCALE_ARCH_V3 0x6000
+
extern unsigned int processor_id;
#ifdef CONFIG_CPU_CP15
@@ -50,6 +68,7 @@ extern unsigned int processor_id;
: "cc"); \
__val; \
})
+
#define read_cpuid_ext(ext_reg) \
({ \
unsigned int __val; \
@@ -59,29 +78,24 @@ extern unsigned int processor_id;
: "cc"); \
__val; \
})
-#else
-#define read_cpuid(reg) (processor_id)
-#define read_cpuid_ext(reg) 0
-#endif
-#define ARM_CPU_IMP_ARM 0x41
-#define ARM_CPU_IMP_INTEL 0x69
+#else /* ifdef CONFIG_CPU_CP15 */
-#define ARM_CPU_PART_ARM1136 0xB360
-#define ARM_CPU_PART_ARM1156 0xB560
-#define ARM_CPU_PART_ARM1176 0xB760
-#define ARM_CPU_PART_ARM11MPCORE 0xB020
-#define ARM_CPU_PART_CORTEX_A8 0xC080
-#define ARM_CPU_PART_CORTEX_A9 0xC090
-#define ARM_CPU_PART_CORTEX_A5 0xC050
-#define ARM_CPU_PART_CORTEX_A15 0xC0F0
-#define ARM_CPU_PART_CORTEX_A7 0xC070
+/*
+ * read_cpuid and read_cpuid_ext should only ever be called on machines that
+ * have cp15 so warn on other usages.
+ */
+#define read_cpuid(reg) \
+ ({ \
+ WARN_ON_ONCE(1); \
+ 0; \
+ })
-#define ARM_CPU_XSCALE_ARCH_MASK 0xe000
-#define ARM_CPU_XSCALE_ARCH_V1 0x2000
-#define ARM_CPU_XSCALE_ARCH_V2 0x4000
-#define ARM_CPU_XSCALE_ARCH_V3 0x6000
+#define read_cpuid_ext(reg) read_cpuid(reg)
+
+#endif /* ifdef CONFIG_CPU_CP15 / else */
+#ifdef CONFIG_CPU_CP15
/*
* The CPU ID never changes at run time, so we might as well tell the
* compiler that it's constant. Use this function to read the CPU ID
@@ -92,6 +106,15 @@ static inline unsigned int __attribute_const__ read_cpuid_id(void)
return read_cpuid(CPUID_ID);
}
+#else /* ifdef CONFIG_CPU_CP15 */
+
+static inline unsigned int __attribute_const__ read_cpuid_id(void)
+{
+ return processor_id;
+}
+
+#endif /* ifdef CONFIG_CPU_CP15 / else */
+
static inline unsigned int __attribute_const__ read_cpuid_implementor(void)
{
return (read_cpuid_id() & 0xFF000000) >> 24;
diff --git a/arch/arm/include/asm/glue-df.h b/arch/arm/include/asm/glue-df.h
index 8cacbcda76d..b6e9f2c108b 100644
--- a/arch/arm/include/asm/glue-df.h
+++ b/arch/arm/include/asm/glue-df.h
@@ -18,12 +18,12 @@
* ================
*
* We have the following to choose from:
- * arm6 - ARM6 style
* arm7 - ARM7 style
* v4_early - ARMv4 without Thumb early abort handler
* v4t_late - ARMv4 with Thumb late abort handler
* v4t_early - ARMv4 with Thumb early abort handler
- * v5tej_early - ARMv5 with Thumb and Java early abort handler
+ * v5t_early - ARMv5 with Thumb early abort handler
+ * v5tj_early - ARMv5 with Thumb and Java early abort handler
* xscale - ARMv5 with Thumb with Xscale extensions
* v6_early - ARMv6 generic early abort handler
* v7_early - ARMv7 generic early abort handler
@@ -39,19 +39,19 @@
# endif
#endif
-#ifdef CONFIG_CPU_ABRT_LV4T
+#ifdef CONFIG_CPU_ABRT_EV4
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
-# define CPU_DABORT_HANDLER v4t_late_abort
+# define CPU_DABORT_HANDLER v4_early_abort
# endif
#endif
-#ifdef CONFIG_CPU_ABRT_EV4
+#ifdef CONFIG_CPU_ABRT_LV4T
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
-# define CPU_DABORT_HANDLER v4_early_abort
+# define CPU_DABORT_HANDLER v4t_late_abort
# endif
#endif
@@ -63,19 +63,19 @@
# endif
#endif
-#ifdef CONFIG_CPU_ABRT_EV5TJ
+#ifdef CONFIG_CPU_ABRT_EV5T
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
-# define CPU_DABORT_HANDLER v5tj_early_abort
+# define CPU_DABORT_HANDLER v5t_early_abort
# endif
#endif
-#ifdef CONFIG_CPU_ABRT_EV5T
+#ifdef CONFIG_CPU_ABRT_EV5TJ
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
-# define CPU_DABORT_HANDLER v5t_early_abort
+# define CPU_DABORT_HANDLER v5tj_early_abort
# endif
#endif
diff --git a/arch/arm/include/asm/hardirq.h b/arch/arm/include/asm/hardirq.h
index 3d7351c844a..fe3ea776dc3 100644
--- a/arch/arm/include/asm/hardirq.h
+++ b/arch/arm/include/asm/hardirq.h
@@ -5,7 +5,7 @@
#include <linux/threads.h>
#include <asm/irq.h>
-#define NR_IPI 7
+#define NR_IPI 8
typedef struct {
unsigned int __softirq_pending;
diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h
index 3e0857a6248..809203a4b71 100644
--- a/arch/arm/include/asm/irq.h
+++ b/arch/arm/include/asm/irq.h
@@ -30,6 +30,11 @@ extern void asm_do_IRQ(unsigned int, struct pt_regs *);
void handle_IRQ(unsigned int, struct pt_regs *);
void init_IRQ(void);
+#ifdef CONFIG_MULTI_IRQ_HANDLER
+extern void (*handle_arch_irq)(struct pt_regs *);
+extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
+#endif
+
void arch_trigger_all_cpu_backtrace(void);
#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h
index 7c3d813e15d..124623e5ef1 100644
--- a/arch/arm/include/asm/kvm_arm.h
+++ b/arch/arm/include/asm/kvm_arm.h
@@ -211,4 +211,8 @@
#define HSR_HVC_IMM_MASK ((1UL << 16) - 1)
+#define HSR_DABT_S1PTW (1U << 7)
+#define HSR_DABT_CM (1U << 8)
+#define HSR_DABT_EA (1U << 9)
+
#endif /* __ARM_KVM_ARM_H__ */
diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h
index e4956f4e23e..18d50322a9e 100644
--- a/arch/arm/include/asm/kvm_asm.h
+++ b/arch/arm/include/asm/kvm_asm.h
@@ -75,7 +75,7 @@ extern char __kvm_hyp_code_end[];
extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
extern void __kvm_flush_vm_context(void);
-extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
+extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
#endif
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
index fd611996bfb..82b4babead2 100644
--- a/arch/arm/include/asm/kvm_emulate.h
+++ b/arch/arm/include/asm/kvm_emulate.h
@@ -22,11 +22,12 @@
#include <linux/kvm_host.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmio.h>
+#include <asm/kvm_arm.h>
-u32 *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
-u32 *vcpu_spsr(struct kvm_vcpu *vcpu);
+unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
+unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu);
-int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run);
+bool kvm_condition_valid(struct kvm_vcpu *vcpu);
void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr);
void kvm_inject_undefined(struct kvm_vcpu *vcpu);
void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
@@ -37,14 +38,14 @@ static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu)
return 1;
}
-static inline u32 *vcpu_pc(struct kvm_vcpu *vcpu)
+static inline unsigned long *vcpu_pc(struct kvm_vcpu *vcpu)
{
- return (u32 *)&vcpu->arch.regs.usr_regs.ARM_pc;
+ return &vcpu->arch.regs.usr_regs.ARM_pc;
}
-static inline u32 *vcpu_cpsr(struct kvm_vcpu *vcpu)
+static inline unsigned long *vcpu_cpsr(struct kvm_vcpu *vcpu)
{
- return (u32 *)&vcpu->arch.regs.usr_regs.ARM_cpsr;
+ return &vcpu->arch.regs.usr_regs.ARM_cpsr;
}
static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
@@ -69,4 +70,96 @@ static inline bool kvm_vcpu_reg_is_pc(struct kvm_vcpu *vcpu, int reg)
return reg == 15;
}
+static inline u32 kvm_vcpu_get_hsr(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.fault.hsr;
+}
+
+static inline unsigned long kvm_vcpu_get_hfar(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.fault.hxfar;
+}
+
+static inline phys_addr_t kvm_vcpu_get_fault_ipa(struct kvm_vcpu *vcpu)
+{
+ return ((phys_addr_t)vcpu->arch.fault.hpfar & HPFAR_MASK) << 8;
+}
+
+static inline unsigned long kvm_vcpu_get_hyp_pc(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.fault.hyp_pc;
+}
+
+static inline bool kvm_vcpu_dabt_isvalid(struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_get_hsr(vcpu) & HSR_ISV;
+}
+
+static inline bool kvm_vcpu_dabt_iswrite(struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_get_hsr(vcpu) & HSR_WNR;
+}
+
+static inline bool kvm_vcpu_dabt_issext(struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_get_hsr(vcpu) & HSR_SSE;
+}
+
+static inline int kvm_vcpu_dabt_get_rd(struct kvm_vcpu *vcpu)
+{
+ return (kvm_vcpu_get_hsr(vcpu) & HSR_SRT_MASK) >> HSR_SRT_SHIFT;
+}
+
+static inline bool kvm_vcpu_dabt_isextabt(struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_EA;
+}
+
+static inline bool kvm_vcpu_dabt_iss1tw(struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_S1PTW;
+}
+
+/* Get Access Size from a data abort */
+static inline int kvm_vcpu_dabt_get_as(struct kvm_vcpu *vcpu)
+{
+ switch ((kvm_vcpu_get_hsr(vcpu) >> 22) & 0x3) {
+ case 0:
+ return 1;
+ case 1:
+ return 2;
+ case 2:
+ return 4;
+ default:
+ kvm_err("Hardware is weird: SAS 0b11 is reserved\n");
+ return -EFAULT;
+ }
+}
+
+/* This one is not specific to Data Abort */
+static inline bool kvm_vcpu_trap_il_is32bit(struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_get_hsr(vcpu) & HSR_IL;
+}
+
+static inline u8 kvm_vcpu_trap_get_class(struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_get_hsr(vcpu) >> HSR_EC_SHIFT;
+}
+
+static inline bool kvm_vcpu_trap_is_iabt(struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_trap_get_class(vcpu) == HSR_EC_IABT;
+}
+
+static inline u8 kvm_vcpu_trap_get_fault(struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_get_hsr(vcpu) & HSR_FSC_TYPE;
+}
+
+static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK;
+}
+
#endif /* __ARM_KVM_EMULATE_H__ */
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index d1736a53b12..0c4e643d939 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -80,6 +80,15 @@ struct kvm_mmu_memory_cache {
void *objects[KVM_NR_MEM_OBJS];
};
+struct kvm_vcpu_fault_info {
+ u32 hsr; /* Hyp Syndrome Register */
+ u32 hxfar; /* Hyp Data/Inst. Fault Address Register */
+ u32 hpfar; /* Hyp IPA Fault Address Register */
+ u32 hyp_pc; /* PC when exception was taken from Hyp mode */
+};
+
+typedef struct vfp_hard_struct kvm_kernel_vfp_t;
+
struct kvm_vcpu_arch {
struct kvm_regs regs;
@@ -93,13 +102,11 @@ struct kvm_vcpu_arch {
u32 midr;
/* Exception Information */
- u32 hsr; /* Hyp Syndrome Register */
- u32 hxfar; /* Hyp Data/Inst Fault Address Register */
- u32 hpfar; /* Hyp IPA Fault Address Register */
+ struct kvm_vcpu_fault_info fault;
/* Floating point registers (VFP and Advanced SIMD/NEON) */
- struct vfp_hard_struct vfp_guest;
- struct vfp_hard_struct *vfp_host;
+ kvm_kernel_vfp_t vfp_guest;
+ kvm_kernel_vfp_t *vfp_host;
/* VGIC state */
struct vgic_cpu vgic_cpu;
@@ -122,9 +129,6 @@ struct kvm_vcpu_arch {
/* Interrupt related fields */
u32 irq_lines; /* IRQ and FIQ levels */
- /* Hyp exception information */
- u32 hyp_pc; /* PC when exception was taken from Hyp mode */
-
/* Cache some mmu pages needed inside spinlock regions */
struct kvm_mmu_memory_cache mmu_page_cache;
@@ -181,4 +185,26 @@ struct kvm_one_reg;
int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
+int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ int exception_index);
+
+static inline void __cpu_init_hyp_mode(unsigned long long pgd_ptr,
+ unsigned long hyp_stack_ptr,
+ unsigned long vector_ptr)
+{
+ unsigned long pgd_low, pgd_high;
+
+ pgd_low = (pgd_ptr & ((1ULL << 32) - 1));
+ pgd_high = (pgd_ptr >> 32ULL);
+
+ /*
+ * Call initialization code, and switch to the full blown
+ * HYP code. The init code doesn't need to preserve these registers as
+ * r1-r3 and r12 are already callee save according to the AAPCS.
+ * Note that we slightly misuse the prototype by casing the pgd_low to
+ * a void *.
+ */
+ kvm_call_hyp((void *)pgd_low, pgd_high, hyp_stack_ptr, vector_ptr);
+}
+
#endif /* __ARM_KVM_HOST_H__ */
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 421a20b3487..970f3b5fa10 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -19,6 +19,18 @@
#ifndef __ARM_KVM_MMU_H__
#define __ARM_KVM_MMU_H__
+#include <asm/cacheflush.h>
+#include <asm/pgalloc.h>
+#include <asm/idmap.h>
+
+/*
+ * We directly use the kernel VA for the HYP, as we can directly share
+ * the mapping (HTTBR "covers" TTBR1).
+ */
+#define HYP_PAGE_OFFSET_MASK (~0UL)
+#define HYP_PAGE_OFFSET PAGE_OFFSET
+#define KERN_TO_HYP(kva) (kva)
+
int create_hyp_mappings(void *from, void *to);
int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
void free_hyp_pmds(void);
@@ -36,6 +48,16 @@ phys_addr_t kvm_mmu_get_httbr(void);
int kvm_mmu_init(void);
void kvm_clear_hyp_idmap(void);
+static inline void kvm_set_pte(pte_t *pte, pte_t new_pte)
+{
+ pte_val(*pte) = new_pte;
+ /*
+ * flush_pmd_entry just takes a void pointer and cleans the necessary
+ * cache entries, so we can reuse the function for ptes.
+ */
+ flush_pmd_entry(pte);
+}
+
static inline bool kvm_is_write_fault(unsigned long hsr)
{
unsigned long hsr_ec = hsr >> HSR_EC_SHIFT;
@@ -47,4 +69,49 @@ static inline bool kvm_is_write_fault(unsigned long hsr)
return true;
}
+static inline void kvm_clean_pgd(pgd_t *pgd)
+{
+ clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t));
+}
+
+static inline void kvm_clean_pmd_entry(pmd_t *pmd)
+{
+ clean_pmd_entry(pmd);
+}
+
+static inline void kvm_clean_pte(pte_t *pte)
+{
+ clean_pte_table(pte);
+}
+
+static inline void kvm_set_s2pte_writable(pte_t *pte)
+{
+ pte_val(*pte) |= L_PTE_S2_RDWR;
+}
+
+struct kvm;
+
+static inline void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn)
+{
+ /*
+ * If we are going to insert an instruction page and the icache is
+ * either VIPT or PIPT, there is a potential problem where the host
+ * (or another VM) may have used the same page as this guest, and we
+ * read incorrect data from the icache. If we're using a PIPT cache,
+ * we can invalidate just that page, but if we are using a VIPT cache
+ * we need to invalidate the entire icache - damn shame - as written
+ * in the ARM ARM (DDI 0406C.b - Page B3-1393).
+ *
+ * VIVT caches are tagged using both the ASID and the VMID and doesn't
+ * need any kind of flushing (DDI 0406C.b - Page B3-1392).
+ */
+ if (icache_is_pipt()) {
+ unsigned long hva = gfn_to_hva(kvm, gfn);
+ __cpuc_coherent_user_range(hva, hva + PAGE_SIZE);
+ } else if (!icache_is_vivt_asid_tagged()) {
+ /* any kind of VIPT cache */
+ __flush_icache_all();
+ }
+}
+
#endif /* __ARM_KVM_MMU_H__ */
diff --git a/arch/arm/include/asm/kvm_vgic.h b/arch/arm/include/asm/kvm_vgic.h
index ab97207d9cd..343744e4809 100644
--- a/arch/arm/include/asm/kvm_vgic.h
+++ b/arch/arm/include/asm/kvm_vgic.h
@@ -21,7 +21,6 @@
#include <linux/kernel.h>
#include <linux/kvm.h>
-#include <linux/kvm_host.h>
#include <linux/irqreturn.h>
#include <linux/spinlock.h>
#include <linux/types.h>
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h
index 308ad7d6f98..75bf07910b8 100644
--- a/arch/arm/include/asm/mach/arch.h
+++ b/arch/arm/include/asm/mach/arch.h
@@ -8,6 +8,8 @@
* published by the Free Software Foundation.
*/
+#include <linux/types.h>
+
#ifndef __ASSEMBLY__
struct tag;
@@ -16,8 +18,10 @@ struct pt_regs;
struct smp_operations;
#ifdef CONFIG_SMP
#define smp_ops(ops) (&(ops))
+#define smp_init_ops(ops) (&(ops))
#else
#define smp_ops(ops) (struct smp_operations *)NULL
+#define smp_init_ops(ops) (bool (*)(void))NULL
#endif
struct machine_desc {
@@ -41,6 +45,7 @@ struct machine_desc {
unsigned char reserve_lp2 :1; /* never has lp2 */
char restart_mode; /* default restart mode */
struct smp_operations *smp; /* SMP operations */
+ bool (*smp_init)(void);
void (*fixup)(struct tag *, char **,
struct meminfo *);
void (*reserve)(void);/* reserve mem blocks */
diff --git a/arch/arm/include/asm/mach/irq.h b/arch/arm/include/asm/mach/irq.h
index 18c88302333..2092ee1e130 100644
--- a/arch/arm/include/asm/mach/irq.h
+++ b/arch/arm/include/asm/mach/irq.h
@@ -20,11 +20,6 @@ struct seq_file;
extern void init_FIQ(int);
extern int show_fiq_list(struct seq_file *, int);
-#ifdef CONFIG_MULTI_IRQ_HANDLER
-extern void (*handle_arch_irq)(struct pt_regs *);
-extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
-#endif
-
/*
* This is for easy migration, but should be changed in the source
*/
@@ -35,35 +30,4 @@ do { \
raw_spin_unlock(&desc->lock); \
} while(0)
-#ifndef __ASSEMBLY__
-/*
- * Entry/exit functions for chained handlers where the primary IRQ chip
- * may implement either fasteoi or level-trigger flow control.
- */
-static inline void chained_irq_enter(struct irq_chip *chip,
- struct irq_desc *desc)
-{
- /* FastEOI controllers require no action on entry. */
- if (chip->irq_eoi)
- return;
-
- if (chip->irq_mask_ack) {
- chip->irq_mask_ack(&desc->irq_data);
- } else {
- chip->irq_mask(&desc->irq_data);
- if (chip->irq_ack)
- chip->irq_ack(&desc->irq_data);
- }
-}
-
-static inline void chained_irq_exit(struct irq_chip *chip,
- struct irq_desc *desc)
-{
- if (chip->irq_eoi)
- chip->irq_eoi(&desc->irq_data);
- else
- chip->irq_unmask(&desc->irq_data);
-}
-#endif
-
#endif
diff --git a/arch/arm/include/asm/mcpm.h b/arch/arm/include/asm/mcpm.h
new file mode 100644
index 00000000000..7626a7fd493
--- /dev/null
+++ b/arch/arm/include/asm/mcpm.h
@@ -0,0 +1,217 @@
+/*
+ * arch/arm/include/asm/mcpm.h
+ *
+ * Created by: Nicolas Pitre, April 2012
+ * Copyright: (C) 2012-2013 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef MCPM_H
+#define MCPM_H
+
+/*
+ * Maximum number of possible clusters / CPUs per cluster.
+ *
+ * This should be sufficient for quite a while, while keeping the
+ * (assembly) code simpler. When this starts to grow then we'll have
+ * to consider dynamic allocation.
+ */
+#define MAX_CPUS_PER_CLUSTER 4
+#define MAX_NR_CLUSTERS 2
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+#include <asm/cacheflush.h>
+
+/*
+ * Platform specific code should use this symbol to set up secondary
+ * entry location for processors to use when released from reset.
+ */
+extern void mcpm_entry_point(void);
+
+/*
+ * This is used to indicate where the given CPU from given cluster should
+ * branch once it is ready to re-enter the kernel using ptr, or NULL if it
+ * should be gated. A gated CPU is held in a WFE loop until its vector
+ * becomes non NULL.
+ */
+void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr);
+
+/*
+ * This sets an early poke i.e a value to be poked into some address
+ * from very early assembly code before the CPU is ungated. The
+ * address must be physical, and if 0 then nothing will happen.
+ */
+void mcpm_set_early_poke(unsigned cpu, unsigned cluster,
+ unsigned long poke_phys_addr, unsigned long poke_val);
+
+/*
+ * CPU/cluster power operations API for higher subsystems to use.
+ */
+
+/**
+ * mcpm_cpu_power_up - make given CPU in given cluster runable
+ *
+ * @cpu: CPU number within given cluster
+ * @cluster: cluster number for the CPU
+ *
+ * The identified CPU is brought out of reset. If the cluster was powered
+ * down then it is brought up as well, taking care not to let the other CPUs
+ * in the cluster run, and ensuring appropriate cluster setup.
+ *
+ * Caller must ensure the appropriate entry vector is initialized with
+ * mcpm_set_entry_vector() prior to calling this.
+ *
+ * This must be called in a sleepable context. However, the implementation
+ * is strongly encouraged to return early and let the operation happen
+ * asynchronously, especially when significant delays are expected.
+ *
+ * If the operation cannot be performed then an error code is returned.
+ */
+int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster);
+
+/**
+ * mcpm_cpu_power_down - power the calling CPU down
+ *
+ * The calling CPU is powered down.
+ *
+ * If this CPU is found to be the "last man standing" in the cluster
+ * then the cluster is prepared for power-down too.
+ *
+ * This must be called with interrupts disabled.
+ *
+ * This does not return. Re-entry in the kernel is expected via
+ * mcpm_entry_point.
+ */
+void mcpm_cpu_power_down(void);
+
+/**
+ * mcpm_cpu_suspend - bring the calling CPU in a suspended state
+ *
+ * @expected_residency: duration in microseconds the CPU is expected
+ * to remain suspended, or 0 if unknown/infinity.
+ *
+ * The calling CPU is suspended. The expected residency argument is used
+ * as a hint by the platform specific backend to implement the appropriate
+ * sleep state level according to the knowledge it has on wake-up latency
+ * for the given hardware.
+ *
+ * If this CPU is found to be the "last man standing" in the cluster
+ * then the cluster may be prepared for power-down too, if the expected
+ * residency makes it worthwhile.
+ *
+ * This must be called with interrupts disabled.
+ *
+ * This does not return. Re-entry in the kernel is expected via
+ * mcpm_entry_point.
+ */
+void mcpm_cpu_suspend(u64 expected_residency);
+
+/**
+ * mcpm_cpu_powered_up - housekeeping workafter a CPU has been powered up
+ *
+ * This lets the platform specific backend code perform needed housekeeping
+ * work. This must be called by the newly activated CPU as soon as it is
+ * fully operational in kernel space, before it enables interrupts.
+ *
+ * If the operation cannot be performed then an error code is returned.
+ */
+int mcpm_cpu_powered_up(void);
+
+/*
+ * Platform specific methods used in the implementation of the above API.
+ */
+struct mcpm_platform_ops {
+ int (*power_up)(unsigned int cpu, unsigned int cluster);
+ void (*power_down)(void);
+ void (*suspend)(u64);
+ void (*powered_up)(void);
+};
+
+/**
+ * mcpm_platform_register - register platform specific power methods
+ *
+ * @ops: mcpm_platform_ops structure to register
+ *
+ * An error is returned if the registration has been done previously.
+ */
+int __init mcpm_platform_register(const struct mcpm_platform_ops *ops);
+
+/* Synchronisation structures for coordinating safe cluster setup/teardown: */
+
+/*
+ * When modifying this structure, make sure you update the MCPM_SYNC_ defines
+ * to match.
+ */
+struct mcpm_sync_struct {
+ /* individual CPU states */
+ struct {
+ s8 cpu __aligned(__CACHE_WRITEBACK_GRANULE);
+ } cpus[MAX_CPUS_PER_CLUSTER];
+
+ /* cluster state */
+ s8 cluster __aligned(__CACHE_WRITEBACK_GRANULE);
+
+ /* inbound-side state */
+ s8 inbound __aligned(__CACHE_WRITEBACK_GRANULE);
+};
+
+struct sync_struct {
+ struct mcpm_sync_struct clusters[MAX_NR_CLUSTERS];
+};
+
+extern unsigned long sync_phys; /* physical address of *mcpm_sync */
+
+void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster);
+void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster);
+void __mcpm_outbound_leave_critical(unsigned int cluster, int state);
+bool __mcpm_outbound_enter_critical(unsigned int this_cpu, unsigned int cluster);
+int __mcpm_cluster_state(unsigned int cluster);
+
+int __init mcpm_sync_init(
+ void (*power_up_setup)(unsigned int affinity_level));
+
+void __init mcpm_smp_set_ops(void);
+
+#else
+
+/*
+ * asm-offsets.h causes trouble when included in .c files, and cacheflush.h
+ * cannot be included in asm files. Let's work around the conflict like this.
+ */
+#include <asm/asm-offsets.h>
+#define __CACHE_WRITEBACK_GRANULE CACHE_WRITEBACK_GRANULE
+
+#endif /* ! __ASSEMBLY__ */
+
+/* Definitions for mcpm_sync_struct */
+#define CPU_DOWN 0x11
+#define CPU_COMING_UP 0x12
+#define CPU_UP 0x13
+#define CPU_GOING_DOWN 0x14
+
+#define CLUSTER_DOWN 0x21
+#define CLUSTER_UP 0x22
+#define CLUSTER_GOING_DOWN 0x23
+
+#define INBOUND_NOT_COMING_UP 0x31
+#define INBOUND_COMING_UP 0x32
+
+/*
+ * Offsets for the mcpm_sync_struct members, for use in asm.
+ * We don't want to make them global to the kernel via asm-offsets.c.
+ */
+#define MCPM_SYNC_CLUSTER_CPUS 0
+#define MCPM_SYNC_CPU_SIZE __CACHE_WRITEBACK_GRANULE
+#define MCPM_SYNC_CLUSTER_CLUSTER \
+ (MCPM_SYNC_CLUSTER_CPUS + MCPM_SYNC_CPU_SIZE * MAX_CPUS_PER_CLUSTER)
+#define MCPM_SYNC_CLUSTER_INBOUND \
+ (MCPM_SYNC_CLUSTER_CLUSTER + __CACHE_WRITEBACK_GRANULE)
+#define MCPM_SYNC_CLUSTER_SIZE \
+ (MCPM_SYNC_CLUSTER_INBOUND + __CACHE_WRITEBACK_GRANULE)
+
+#endif
diff --git a/arch/arm/include/asm/percpu.h b/arch/arm/include/asm/percpu.h
index 968c0a14e0a..209e6504922 100644
--- a/arch/arm/include/asm/percpu.h
+++ b/arch/arm/include/asm/percpu.h
@@ -30,8 +30,15 @@ static inline void set_my_cpu_offset(unsigned long off)
static inline unsigned long __my_cpu_offset(void)
{
unsigned long off;
- /* Read TPIDRPRW */
- asm("mrc p15, 0, %0, c13, c0, 4" : "=r" (off) : : "memory");
+ register unsigned long *sp asm ("sp");
+
+ /*
+ * Read TPIDRPRW.
+ * We want to allow caching the value, so avoid using volatile and
+ * instead use a fake stack read to hazard against barrier().
+ */
+ asm("mrc p15, 0, %0, c13, c0, 4" : "=r" (off) : "Q" (*sp));
+
return off;
}
#define __my_cpu_offset __my_cpu_offset()
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 80d6fc4dbe4..9bcd262a900 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -61,6 +61,15 @@ extern void __pgd_error(const char *file, int line, pgd_t);
#define FIRST_USER_ADDRESS PAGE_SIZE
/*
+ * Use TASK_SIZE as the ceiling argument for free_pgtables() and
+ * free_pgd_range() to avoid freeing the modules pmd when LPAE is enabled (pmd
+ * page shared between user and kernel).
+ */
+#ifdef CONFIG_ARM_LPAE
+#define USER_PGTABLES_CEILING TASK_SIZE
+#endif
+
+/*
* The pgprot_* and protection_map entries will be fixed up in runtime
* to include the cachable and bufferable bits based on memory policy,
* as well as any architecture dependent bits like global/ASID and SMP
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
index f24edad26c7..a7eaad37497 100644
--- a/arch/arm/include/asm/pmu.h
+++ b/arch/arm/include/asm/pmu.h
@@ -13,7 +13,9 @@
#define __ARM_PMU_H__
#include <linux/interrupt.h>
+#include <linux/percpu.h>
#include <linux/perf_event.h>
+#include <linux/types.h>
/*
* struct arm_pmu_platdata - ARM PMU platform data
@@ -62,9 +64,34 @@ struct pmu_hw_events {
raw_spinlock_t pmu_lock;
};
+struct cpupmu_regs {
+ u32 pmc;
+ u32 pmcntenset;
+ u32 pmuseren;
+ u32 pmintenset;
+ u32 pmxevttype[8];
+ u32 pmxevtcnt[8];
+};
+
+struct arm_cpu_pmu {
+ bool valid;
+ bool active;
+
+ u32 mpidr;
+ int irq;
+
+ struct perf_event *hw_events[ARMPMU_MAX_HWEVENTS];
+ unsigned long used_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)];
+ struct pmu_hw_events cpu_hw_events;
+ struct cpupmu_regs cpu_pmu_regs;
+
+ void *logical_state;
+};
+
struct arm_pmu {
struct pmu pmu;
cpumask_t active_irqs;
+ cpumask_t valid_cpus;
char *name;
irqreturn_t (*handle_irq)(int irq_num, void *dev);
void (*enable)(struct perf_event *event);
@@ -81,16 +108,26 @@ struct arm_pmu {
int (*request_irq)(struct arm_pmu *, irq_handler_t handler);
void (*free_irq)(struct arm_pmu *);
int (*map_event)(struct perf_event *event);
+ void (*save_regs)(struct arm_pmu *, struct cpupmu_regs *);
+ void (*restore_regs)(struct arm_pmu *, struct cpupmu_regs *);
+ void (*cpu_init)(struct arm_pmu *, struct arm_cpu_pmu *);
int num_events;
atomic_t active_events;
struct mutex reserve_mutex;
u64 max_period;
struct platform_device *plat_device;
- struct pmu_hw_events *(*get_hw_events)(void);
+ struct pmu_hw_events *(*get_hw_events)(struct arm_pmu *);
+
+ struct list_head class_pmus_list;
+ struct arm_cpu_pmu __percpu *cpu_pmus;
};
#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
+#define for_each_pmu(pmu, head) list_for_each_entry(pmu, head, class_pmus_list)
+
+#define to_this_cpu_pmu(arm_pmu) this_cpu_ptr((arm_pmu)->cpu_pmus)
+
extern const struct dev_pm_ops armpmu_dev_pm_ops;
int armpmu_register(struct arm_pmu *armpmu, int type);
diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
index ce0dbe7c162..a079cbee427 100644
--- a/arch/arm/include/asm/psci.h
+++ b/arch/arm/include/asm/psci.h
@@ -16,6 +16,10 @@
#define PSCI_POWER_STATE_TYPE_STANDBY 0
#define PSCI_POWER_STATE_TYPE_POWER_DOWN 1
+#define PSCI_POWER_STATE_AFFINITY_LEVEL0 0
+#define PSCI_POWER_STATE_AFFINITY_LEVEL1 1
+#define PSCI_POWER_STATE_AFFINITY_LEVEL2 2
+#define PSCI_POWER_STATE_AFFINITY_LEVEL3 3
struct psci_power_state {
u16 id;
@@ -33,4 +37,12 @@ struct psci_operations {
extern struct psci_operations psci_ops;
+#ifdef CONFIG_ARM_PSCI
+extern int __init psci_probe(void);
+#else
+static inline int psci_probe(void)
+{
+ return -ENODEV;
+}
+#endif
#endif /* __ASM_ARM_PSCI_H */
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
index c5aa088c0a8..7a9cc0948a3 100644
--- a/arch/arm/include/asm/smp.h
+++ b/arch/arm/include/asm/smp.h
@@ -81,6 +81,8 @@ extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
extern void arch_send_wakeup_ipi_mask(const struct cpumask *mask);
+extern int register_ipi_completion(struct completion *completion, int cpu);
+
extern void smp_send_all_cpu_backtrace(void);
struct smp_operations {
diff --git a/arch/arm/include/asm/topology.h b/arch/arm/include/asm/topology.h
index 58b8b84adcd..983fa7c153a 100644
--- a/arch/arm/include/asm/topology.h
+++ b/arch/arm/include/asm/topology.h
@@ -26,11 +26,45 @@ extern struct cputopo_arm cpu_topology[NR_CPUS];
void init_cpu_topology(void);
void store_cpu_topology(unsigned int cpuid);
const struct cpumask *cpu_coregroup_mask(int cpu);
+int cluster_to_logical_mask(unsigned int socket_id, cpumask_t *cluster_mask);
+
+#ifdef CONFIG_DISABLE_CPU_SCHED_DOMAIN_BALANCE
+/* Common values for CPUs */
+#ifndef SD_CPU_INIT
+#define SD_CPU_INIT (struct sched_domain) { \
+ .min_interval = 1, \
+ .max_interval = 4, \
+ .busy_factor = 64, \
+ .imbalance_pct = 125, \
+ .cache_nice_tries = 1, \
+ .busy_idx = 2, \
+ .idle_idx = 1, \
+ .newidle_idx = 0, \
+ .wake_idx = 0, \
+ .forkexec_idx = 0, \
+ \
+ .flags = 0*SD_LOAD_BALANCE \
+ | 1*SD_BALANCE_NEWIDLE \
+ | 1*SD_BALANCE_EXEC \
+ | 1*SD_BALANCE_FORK \
+ | 0*SD_BALANCE_WAKE \
+ | 1*SD_WAKE_AFFINE \
+ | 0*SD_SHARE_CPUPOWER \
+ | 0*SD_SHARE_PKG_RESOURCES \
+ | 0*SD_SERIALIZE \
+ , \
+ .last_balance = jiffies, \
+ .balance_interval = 1, \
+}
+#endif
+#endif /* CONFIG_DISABLE_CPU_SCHED_DOMAIN_BALANCE */
#else
static inline void init_cpu_topology(void) { }
static inline void store_cpu_topology(unsigned int cpuid) { }
+static inline int cluster_to_logical_mask(unsigned int socket_id,
+ cpumask_t *cluster_mask) { return -EINVAL; }
#endif
diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h
index 023bfeb367b..c1ee007523d 100644
--- a/arch/arm/include/uapi/asm/kvm.h
+++ b/arch/arm/include/uapi/asm/kvm.h
@@ -53,12 +53,12 @@
#define KVM_ARM_FIQ_spsr fiq_regs[7]
struct kvm_regs {
- struct pt_regs usr_regs;/* R0_usr - R14_usr, PC, CPSR */
- __u32 svc_regs[3]; /* SP_svc, LR_svc, SPSR_svc */
- __u32 abt_regs[3]; /* SP_abt, LR_abt, SPSR_abt */
- __u32 und_regs[3]; /* SP_und, LR_und, SPSR_und */
- __u32 irq_regs[3]; /* SP_irq, LR_irq, SPSR_irq */
- __u32 fiq_regs[8]; /* R8_fiq - R14_fiq, SPSR_fiq */
+ struct pt_regs usr_regs; /* R0_usr - R14_usr, PC, CPSR */
+ unsigned long svc_regs[3]; /* SP_svc, LR_svc, SPSR_svc */
+ unsigned long abt_regs[3]; /* SP_abt, LR_abt, SPSR_abt */
+ unsigned long und_regs[3]; /* SP_und, LR_und, SPSR_und */
+ unsigned long irq_regs[3]; /* SP_irq, LR_irq, SPSR_irq */
+ unsigned long fiq_regs[8]; /* R8_fiq - R14_fiq, SPSR_fiq */
};
/* Supported Processor Types */
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index 923eec7105c..a53efa99369 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -149,6 +149,10 @@ int main(void)
DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL);
DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE);
DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE);
+ BLANK();
+ DEFINE(CACHE_WRITEBACK_ORDER, __CACHE_WRITEBACK_ORDER);
+ DEFINE(CACHE_WRITEBACK_GRANULE, __CACHE_WRITEBACK_GRANULE);
+ BLANK();
#ifdef CONFIG_KVM_ARM_HOST
DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm));
DEFINE(VCPU_MIDR, offsetof(struct kvm_vcpu, arch.midr));
@@ -165,10 +169,10 @@ int main(void)
DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_pc));
DEFINE(VCPU_CPSR, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_cpsr));
DEFINE(VCPU_IRQ_LINES, offsetof(struct kvm_vcpu, arch.irq_lines));
- DEFINE(VCPU_HSR, offsetof(struct kvm_vcpu, arch.hsr));
- DEFINE(VCPU_HxFAR, offsetof(struct kvm_vcpu, arch.hxfar));
- DEFINE(VCPU_HPFAR, offsetof(struct kvm_vcpu, arch.hpfar));
- DEFINE(VCPU_HYP_PC, offsetof(struct kvm_vcpu, arch.hyp_pc));
+ DEFINE(VCPU_HSR, offsetof(struct kvm_vcpu, arch.fault.hsr));
+ DEFINE(VCPU_HxFAR, offsetof(struct kvm_vcpu, arch.fault.hxfar));
+ DEFINE(VCPU_HPFAR, offsetof(struct kvm_vcpu, arch.fault.hpfar));
+ DEFINE(VCPU_HYP_PC, offsetof(struct kvm_vcpu, arch.fault.hyp_pc));
#ifdef CONFIG_KVM_ARM_VGIC
DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu));
DEFINE(VGIC_CPU_HCR, offsetof(struct vgic_cpu, vgic_hcr));
diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S
index 854bd22380d..5b391a689b4 100644
--- a/arch/arm/kernel/head-common.S
+++ b/arch/arm/kernel/head-common.S
@@ -98,8 +98,9 @@ __mmap_switched:
str r9, [r4] @ Save processor ID
str r1, [r5] @ Save machine type
str r2, [r6] @ Save atags pointer
- bic r4, r0, #CR_A @ Clear 'A' bit
- stmia r7, {r0, r4} @ Save control register values
+ cmp r7, #0
+ bicne r4, r0, #CR_A @ Clear 'A' bit
+ stmneia r7, {r0, r4} @ Save control register values
b start_kernel
ENDPROC(__mmap_switched)
@@ -113,7 +114,11 @@ __mmap_switched_data:
.long processor_id @ r4
.long __machine_arch_type @ r5
.long __atags_pointer @ r6
+#ifdef CONFIG_CPU_CP15
.long cr_alignment @ r7
+#else
+ .long 0 @ r7
+#endif
.long init_thread_union + THREAD_START_SP @ sp
.size __mmap_switched_data, . - __mmap_switched_data
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index 2c228a07e58..6a2e09c952c 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -32,15 +32,21 @@
* numbers for r1.
*
*/
- .arm
__HEAD
+
+#ifdef CONFIG_CPU_THUMBONLY
+ .thumb
+ENTRY(stext)
+#else
+ .arm
ENTRY(stext)
THUMB( adr r9, BSYM(1f) ) @ Kernel is always entered in ARM.
THUMB( bx r9 ) @ If this is a Thumb-2 kernel,
THUMB( .thumb ) @ switch to Thumb now.
THUMB(1: )
+#endif
setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode
@ and irqs disabled
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 1fd749ee4a1..1b803117ed9 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -1049,7 +1049,8 @@ static struct notifier_block dbg_cpu_pm_nb = {
static void __init pm_init(void)
{
- cpu_pm_register_notifier(&dbg_cpu_pm_nb);
+ if (has_ossr)
+ cpu_pm_register_notifier(&dbg_cpu_pm_nb);
}
#else
static inline void pm_init(void)
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 8c3094d0f7b..3d753cc4aa0 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -12,6 +12,7 @@
*/
#define pr_fmt(fmt) "hw perfevents: " fmt
+#include <linux/cpumask.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -81,6 +82,9 @@ armpmu_map_event(struct perf_event *event,
return armpmu_map_cache_event(cache_map, config);
case PERF_TYPE_RAW:
return armpmu_map_raw_event(raw_event_mask, config);
+ default:
+ if (event->attr.type >= PERF_TYPE_MAX)
+ return armpmu_map_raw_event(raw_event_mask, config);
}
return -ENOENT;
@@ -158,6 +162,8 @@ armpmu_stop(struct perf_event *event, int flags)
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
+ if (!cpumask_test_cpu(smp_processor_id(), &armpmu->valid_cpus))
+ return;
/*
* ARM pmu always has to update the counter, so ignore
* PERF_EF_UPDATE, see comments in armpmu_start().
@@ -174,6 +180,8 @@ static void armpmu_start(struct perf_event *event, int flags)
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
+ if (!cpumask_test_cpu(smp_processor_id(), &armpmu->valid_cpus))
+ return;
/*
* ARM pmu always has to reprogram the period, so ignore
* PERF_EF_RELOAD, see the comment below.
@@ -197,10 +205,13 @@ static void
armpmu_del(struct perf_event *event, int flags)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
- struct pmu_hw_events *hw_events = armpmu->get_hw_events();
+ struct pmu_hw_events *hw_events = armpmu->get_hw_events(armpmu);
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
+ if (!cpumask_test_cpu(smp_processor_id(), &armpmu->valid_cpus))
+ return;
+
armpmu_stop(event, PERF_EF_UPDATE);
hw_events->events[idx] = NULL;
clear_bit(idx, hw_events->used_mask);
@@ -212,11 +223,15 @@ static int
armpmu_add(struct perf_event *event, int flags)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
- struct pmu_hw_events *hw_events = armpmu->get_hw_events();
+ struct pmu_hw_events *hw_events = armpmu->get_hw_events(armpmu);
struct hw_perf_event *hwc = &event->hw;
int idx;
int err = 0;
+ /* An event following a process won't be stopped earlier */
+ if (!cpumask_test_cpu(smp_processor_id(), &armpmu->valid_cpus))
+ return 0;
+
perf_pmu_disable(event->pmu);
/* If we don't have a space for the counter then finish early. */
@@ -416,6 +431,10 @@ static int armpmu_event_init(struct perf_event *event)
int err = 0;
atomic_t *active_events = &armpmu->active_events;
+ if (event->cpu != -1 &&
+ !cpumask_test_cpu(event->cpu, &armpmu->valid_cpus))
+ return -ENOENT;
+
/* does not support taken branch sampling */
if (has_branch_stack(event))
return -EOPNOTSUPP;
@@ -448,8 +467,14 @@ static int armpmu_event_init(struct perf_event *event)
static void armpmu_enable(struct pmu *pmu)
{
struct arm_pmu *armpmu = to_arm_pmu(pmu);
- struct pmu_hw_events *hw_events = armpmu->get_hw_events();
- int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
+ struct pmu_hw_events *hw_events = armpmu->get_hw_events(armpmu);
+ int enabled;
+
+ if (!cpumask_test_cpu(smp_processor_id(), &armpmu->valid_cpus))
+ return;
+
+ BUG_ON(!hw_events->used_mask); /* TEMPORARY */
+ enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
if (enabled)
armpmu->start(armpmu);
@@ -458,6 +483,10 @@ static void armpmu_enable(struct pmu *pmu)
static void armpmu_disable(struct pmu *pmu)
{
struct arm_pmu *armpmu = to_arm_pmu(pmu);
+
+ if (!cpumask_test_cpu(smp_processor_id(), &armpmu->valid_cpus))
+ return;
+
armpmu->stop(armpmu);
}
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
index 1f2740e3dbc..b3ae24f6afa 100644
--- a/arch/arm/kernel/perf_event_cpu.c
+++ b/arch/arm/kernel/perf_event_cpu.c
@@ -19,23 +19,40 @@
#define pr_fmt(fmt) "CPU PMU: " fmt
#include <linux/bitmap.h>
+#include <linux/cpumask.h>
+#include <linux/cpu_pm.h>
#include <linux/export.h>
#include <linux/kernel.h>
+#include <linux/list.h>
#include <linux/of.h>
+#include <linux/percpu.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <asm/bL_switcher.h>
#include <asm/cputype.h>
#include <asm/irq_regs.h>
#include <asm/pmu.h>
+#include <asm/smp_plat.h>
+#include <asm/topology.h>
-/* Set at runtime when we know what CPU type we are. */
-static struct arm_pmu *cpu_pmu;
+static LIST_HEAD(cpu_pmus_list);
-static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events);
-static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask);
-static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events);
+#define cpu_for_each_pmu(pmu, cpu_pmu, cpu) \
+ for_each_pmu(pmu, &cpu_pmus_list) \
+ if (((cpu_pmu) = per_cpu_ptr((pmu)->cpu_pmus, cpu))->valid)
+
+static struct arm_pmu *__cpu_find_any_pmu(unsigned int cpu)
+{
+ struct arm_pmu *pmu;
+ struct arm_cpu_pmu *cpu_pmu;
+
+ cpu_for_each_pmu(pmu, cpu_pmu, cpu)
+ return pmu;
+
+ return NULL;
+}
/*
* Despite the names, these two functions are CPU-specific and are used
@@ -43,21 +60,22 @@ static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events);
*/
const char *perf_pmu_name(void)
{
- if (!cpu_pmu)
+ struct arm_pmu *pmu = __cpu_find_any_pmu(0);
+ if (!pmu)
return NULL;
- return cpu_pmu->name;
+ return pmu->name;
}
EXPORT_SYMBOL_GPL(perf_pmu_name);
int perf_num_counters(void)
{
- int max_events = 0;
+ struct arm_pmu *pmu = __cpu_find_any_pmu(0);
- if (cpu_pmu != NULL)
- max_events = cpu_pmu->num_events;
+ if (!pmu)
+ return 0;
- return max_events;
+ return pmu->num_events;
}
EXPORT_SYMBOL_GPL(perf_num_counters);
@@ -66,89 +84,125 @@ EXPORT_SYMBOL_GPL(perf_num_counters);
#include "perf_event_v6.c"
#include "perf_event_v7.c"
-static struct pmu_hw_events *cpu_pmu_get_cpu_events(void)
+static struct pmu_hw_events *cpu_pmu_get_cpu_events(struct arm_pmu *pmu)
{
- return &__get_cpu_var(cpu_hw_events);
+ return &this_cpu_ptr(pmu->cpu_pmus)->cpu_hw_events;
}
-static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
+static int find_logical_cpu(u32 mpidr)
{
- int i, irq, irqs;
- struct platform_device *pmu_device = cpu_pmu->plat_device;
+ int cpu = bL_switcher_get_logical_index(mpidr);
- irqs = min(pmu_device->num_resources, num_possible_cpus());
+ if (cpu != -EUNATCH)
+ return cpu;
- for (i = 0; i < irqs; ++i) {
- if (!cpumask_test_and_clear_cpu(i, &cpu_pmu->active_irqs))
+ return get_logical_index(mpidr);
+}
+
+static void cpu_pmu_free_irq(struct arm_pmu *pmu)
+{
+ int i;
+ int cpu;
+ struct arm_cpu_pmu *cpu_pmu;
+
+ for_each_possible_cpu(i) {
+ if (!(cpu_pmu = per_cpu_ptr(pmu->cpu_pmus, i)))
continue;
- irq = platform_get_irq(pmu_device, i);
- if (irq >= 0)
- free_irq(irq, cpu_pmu);
+
+ if (cpu_pmu->mpidr == -1)
+ continue;
+
+ cpu = find_logical_cpu(cpu_pmu->mpidr);
+ if (cpu < 0)
+ continue;
+
+ if (!cpumask_test_and_clear_cpu(cpu, &pmu->active_irqs))
+ continue;
+ if (cpu_pmu->irq >= 0)
+ free_irq(cpu_pmu->irq, pmu);
}
}
-static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
+static int cpu_pmu_request_irq(struct arm_pmu *pmu, irq_handler_t handler)
{
int i, err, irq, irqs;
- struct platform_device *pmu_device = cpu_pmu->plat_device;
+ int cpu;
+ struct arm_cpu_pmu *cpu_pmu;
- if (!pmu_device)
- return -ENODEV;
+ irqs = 0;
+ for_each_possible_cpu(i)
+ if (per_cpu_ptr(pmu->cpu_pmus, i))
+ ++irqs;
- irqs = min(pmu_device->num_resources, num_possible_cpus());
if (irqs < 1) {
pr_err("no irqs for PMUs defined\n");
return -ENODEV;
}
- for (i = 0; i < irqs; ++i) {
- err = 0;
- irq = platform_get_irq(pmu_device, i);
+ for_each_possible_cpu(i) {
+ if (!(cpu_pmu = per_cpu_ptr(pmu->cpu_pmus, i)))
+ continue;
+
+ irq = cpu_pmu->irq;
if (irq < 0)
continue;
+ cpu = find_logical_cpu(cpu_pmu->mpidr);
+ if (cpu < 0 || cpu != i)
+ continue;
+
/*
* If we have a single PMU interrupt that we can't shift,
* assume that we're running on a uniprocessor machine and
* continue. Otherwise, continue without this interrupt.
*/
- if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
+ if (irq_set_affinity(irq, cpumask_of(cpu)) && irqs > 1) {
pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
- irq, i);
+ irq, cpu);
continue;
}
+ pr_debug("%s: requesting IRQ %d for CPU%d\n",
+ pmu->name, irq, cpu);
+
err = request_irq(irq, handler, IRQF_NOBALANCING, "arm-pmu",
- cpu_pmu);
+ pmu);
if (err) {
pr_err("unable to request IRQ%d for ARM PMU counters\n",
irq);
return err;
}
- cpumask_set_cpu(i, &cpu_pmu->active_irqs);
+ cpumask_set_cpu(cpu, &pmu->active_irqs);
}
return 0;
}
-static void cpu_pmu_init(struct arm_pmu *cpu_pmu)
+static void cpu_pmu_init(struct arm_pmu *pmu)
{
int cpu;
- for_each_possible_cpu(cpu) {
- struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu);
- events->events = per_cpu(hw_events, cpu);
- events->used_mask = per_cpu(used_mask, cpu);
+ for_each_cpu_mask(cpu, pmu->valid_cpus) {
+ struct arm_cpu_pmu *cpu_pmu = per_cpu_ptr(pmu->cpu_pmus, cpu);
+ struct pmu_hw_events *events = &cpu_pmu->cpu_hw_events;
+
+ events->events = cpu_pmu->hw_events;
+ events->used_mask = cpu_pmu->used_mask;
raw_spin_lock_init(&events->pmu_lock);
+
+ if (pmu->cpu_init)
+ pmu->cpu_init(pmu, cpu_pmu);
+
+ cpu_pmu->valid = true;
}
- cpu_pmu->get_hw_events = cpu_pmu_get_cpu_events;
- cpu_pmu->request_irq = cpu_pmu_request_irq;
- cpu_pmu->free_irq = cpu_pmu_free_irq;
+ pmu->get_hw_events = cpu_pmu_get_cpu_events;
+ pmu->request_irq = cpu_pmu_request_irq;
+ pmu->free_irq = cpu_pmu_free_irq;
/* Ensure the PMU has sane values out of reset. */
- if (cpu_pmu->reset)
- on_each_cpu(cpu_pmu->reset, cpu_pmu, 1);
+ if (pmu->reset)
+ on_each_cpu_mask(&pmu->valid_cpus, pmu->reset, pmu, 1);
}
/*
@@ -160,21 +214,52 @@ static void cpu_pmu_init(struct arm_pmu *cpu_pmu)
static int __cpuinit cpu_pmu_notify(struct notifier_block *b,
unsigned long action, void *hcpu)
{
+ struct arm_pmu *pmu;
+ struct arm_cpu_pmu *cpu_pmu;
+ int ret = NOTIFY_DONE;
+
if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
return NOTIFY_DONE;
- if (cpu_pmu && cpu_pmu->reset)
- cpu_pmu->reset(cpu_pmu);
- else
- return NOTIFY_DONE;
+ cpu_for_each_pmu(pmu, cpu_pmu, (unsigned int)hcpu)
+ if (pmu->reset) {
+ pmu->reset(pmu);
+ ret = NOTIFY_OK;
+ }
- return NOTIFY_OK;
+ return ret;
+}
+
+static int cpu_pmu_pm_notify(struct notifier_block *b,
+ unsigned long action, void *hcpu)
+{
+ int cpu = smp_processor_id();
+ struct arm_pmu *pmu;
+ struct arm_cpu_pmu *cpu_pmu;
+ int ret = NOTIFY_DONE;
+
+ cpu_for_each_pmu(pmu, cpu_pmu, cpu) {
+ struct cpupmu_regs *pmuregs = &cpu_pmu->cpu_pmu_regs;
+
+ if (action == CPU_PM_ENTER && pmu->save_regs)
+ pmu->save_regs(pmu, pmuregs);
+ else if (action == CPU_PM_EXIT && pmu->restore_regs)
+ pmu->restore_regs(pmu, pmuregs);
+
+ ret = NOTIFY_OK;
+ }
+
+ return ret;
}
static struct notifier_block __cpuinitdata cpu_pmu_hotplug_notifier = {
.notifier_call = cpu_pmu_notify,
};
+static struct notifier_block __cpuinitdata cpu_pmu_pm_notifier = {
+ .notifier_call = cpu_pmu_pm_notify,
+};
+
/*
* PMU platform driver and devicetree bindings.
*/
@@ -246,52 +331,197 @@ static int probe_current_pmu(struct arm_pmu *pmu)
}
}
+ /* assume PMU support all the CPUs in this case */
+ cpumask_setall(&pmu->valid_cpus);
+
put_cpu();
return ret;
}
+static void cpu_pmu_free(struct arm_pmu *pmu)
+{
+ if (!pmu)
+ return;
+
+ free_percpu(pmu->cpu_pmus);
+ kfree(pmu);
+}
+
+/*
+ * HACK: Find a b.L switcher partner for CPU cpu on the specified cluster
+ * This information should be obtained from an interface provided by the
+ * Switcher itself, if possible.
+ */
+#ifdef CONFIG_BL_SWITCHER
+static int bL_get_partner(int cpu, int cluster)
+{
+ unsigned int i;
+
+
+ for_each_possible_cpu(i) {
+ if (cpu_topology[i].thread_id == cpu_topology[cpu].thread_id &&
+ cpu_topology[i].core_id == cpu_topology[cpu].core_id &&
+ cpu_topology[i].socket_id == cluster)
+ return i;
+ }
+
+ return -1; /* no partner found */
+}
+#else
+static int bL_get_partner(int __always_unused cpu, int __always_unused cluster)
+{
+ return -1;
+}
+#endif
+
+static int find_irq(struct platform_device *pdev,
+ struct device_node *pmu_node,
+ struct device_node *cluster_node,
+ u32 mpidr)
+{
+ int irq = -1;
+ u32 cluster;
+ u32 core;
+ struct device_node *cores_node;
+ struct device_node *core_node = NULL;
+
+ if (of_property_read_u32(cluster_node, "reg", &cluster) ||
+ cluster != MPIDR_AFFINITY_LEVEL(mpidr, 1))
+ goto error;
+
+ cores_node = of_get_child_by_name(cluster_node, "cores");
+ if (!cores_node)
+ goto error;
+
+ for_each_child_of_node(cores_node, core_node)
+ if (!of_property_read_u32(core_node, "reg", &core) &&
+ core == MPIDR_AFFINITY_LEVEL(mpidr, 0))
+ break;
+
+ if (!core_node)
+ goto error;
+
+ irq = platform_get_irq(pdev, core);
+
+error:
+ of_node_put(core_node);
+ of_node_put(cores_node);
+ return irq;
+}
+
static int cpu_pmu_device_probe(struct platform_device *pdev)
{
const struct of_device_id *of_id;
- int (*init_fn)(struct arm_pmu *);
struct device_node *node = pdev->dev.of_node;
struct arm_pmu *pmu;
- int ret = -ENODEV;
-
- if (cpu_pmu) {
- pr_info("attempt to register multiple PMU devices!");
- return -ENOSPC;
- }
+ struct arm_cpu_pmu __percpu *cpu_pmus;
+ int ret = 0;
pmu = kzalloc(sizeof(struct arm_pmu), GFP_KERNEL);
- if (!pmu) {
- pr_info("failed to allocate PMU device!");
- return -ENOMEM;
- }
+ if (!pmu)
+ goto error_nomem;
+
+ pmu->cpu_pmus = cpu_pmus = alloc_percpu(struct arm_cpu_pmu);
+ if (!cpu_pmus)
+ goto error_nomem;
if (node && (of_id = of_match_node(cpu_pmu_of_device_ids, pdev->dev.of_node))) {
- init_fn = of_id->data;
- ret = init_fn(pmu);
+ smp_call_func_t init_fn = (smp_call_func_t)of_id->data;
+ struct device_node *ncluster;
+ int cluster = -1;
+ cpumask_t sibling_mask;
+ cpumask_t phys_sibling_mask;
+ unsigned int i;
+
+ ncluster = of_parse_phandle(node, "cluster", 0);
+ if (ncluster) {
+ int len;
+ const u32 *hwid;
+ hwid = of_get_property(ncluster, "reg", &len);
+ if (hwid && len == 4)
+ cluster = be32_to_cpup(hwid);
+ }
+ /* set sibling mask to all cpu mask if socket is not specified */
+ /*
+ * In a switcher kernel, we affine all PMUs to CPUs and
+ * abstract the runtime presence/absence of PMUs at a lower
+ * level.
+ */
+ if (cluster == -1 || IS_ENABLED(CONFIG_BL_SWITCHER) ||
+ cluster_to_logical_mask(cluster, &sibling_mask))
+ cpumask_copy(&sibling_mask, cpu_possible_mask);
+
+ if (bL_switcher_get_enabled())
+ /*
+ * The switcher initialises late now, so it should not
+ * have initialised yet:
+ */
+ BUG();
+
+ cpumask_copy(&phys_sibling_mask, cpu_possible_mask);
+
+ /*
+ * HACK: Deduce how the switcher will modify the topology
+ * in order to fill in PMU<->CPU combinations which don't
+ * make sense when the switcher is disabled. Ideally, this
+ * knowledge should come from the swithcer somehow.
+ */
+ for_each_possible_cpu(i) {
+ int cpu = i;
+
+ per_cpu_ptr(cpu_pmus, i)->mpidr = -1;
+ per_cpu_ptr(cpu_pmus, i)->irq = -1;
+
+ if (cpu_topology[i].socket_id != cluster) {
+ cpumask_clear_cpu(i, &phys_sibling_mask);
+ cpu = bL_get_partner(i, cluster);
+ }
+
+ if (cpu == -1)
+ cpumask_clear_cpu(i, &sibling_mask);
+ else {
+ int irq = find_irq(pdev, node, ncluster,
+ cpu_logical_map(cpu));
+ per_cpu_ptr(cpu_pmus, i)->mpidr =
+ cpu_logical_map(cpu);
+ per_cpu_ptr(cpu_pmus, i)->irq = irq;
+ }
+ }
+
+ /*
+ * This relies on an MP view of the system to choose the right
+ * CPU to run init_fn:
+ */
+ smp_call_function_any(&phys_sibling_mask, init_fn, pmu, 1);
+
+ bL_switcher_put_enabled();
+
+ /* now set the valid_cpus after init */
+ cpumask_copy(&pmu->valid_cpus, &sibling_mask);
} else {
ret = probe_current_pmu(pmu);
}
- if (ret) {
- pr_info("failed to probe PMU!");
- goto out_free;
- }
+ if (ret)
+ goto error;
- cpu_pmu = pmu;
- cpu_pmu->plat_device = pdev;
- cpu_pmu_init(cpu_pmu);
- ret = armpmu_register(cpu_pmu, PERF_TYPE_RAW);
+ pmu->plat_device = pdev;
+ cpu_pmu_init(pmu);
+ ret = armpmu_register(pmu, -1);
- if (!ret)
- return 0;
+ if (ret)
+ goto error;
-out_free:
- pr_info("failed to register PMU devices!");
- kfree(pmu);
+ list_add(&pmu->class_pmus_list, &cpu_pmus_list);
+ goto out;
+
+error_nomem:
+ pr_warn("out of memory\n");
+ ret = -ENOMEM;
+error:
+ pr_warn("failed to register PMU device(s)!\n");
+ cpu_pmu_free(pmu);
+out:
return ret;
}
@@ -313,9 +543,17 @@ static int __init register_pmu_driver(void)
if (err)
return err;
+ err = cpu_pm_register_notifier(&cpu_pmu_pm_notifier);
+ if (err) {
+ unregister_cpu_notifier(&cpu_pmu_hotplug_notifier);
+ return err;
+ }
+
err = platform_driver_register(&cpu_pmu_driver);
- if (err)
+ if (err) {
+ cpu_pm_unregister_notifier(&cpu_pmu_pm_notifier);
unregister_cpu_notifier(&cpu_pmu_hotplug_notifier);
+ }
return err;
}
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c
index 03664b0e8fa..a191bdb9ebd 100644
--- a/arch/arm/kernel/perf_event_v6.c
+++ b/arch/arm/kernel/perf_event_v6.c
@@ -439,7 +439,7 @@ static void armv6pmu_enable_event(struct perf_event *event)
unsigned long val, mask, evt, flags;
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
- struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events(cpu_pmu);
int idx = hwc->idx;
if (ARMV6_CYCLE_COUNTER == idx) {
@@ -477,7 +477,7 @@ armv6pmu_handle_irq(int irq_num,
unsigned long pmcr = armv6_pmcr_read();
struct perf_sample_data data;
struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
- struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events();
+ struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events(cpu_pmu);
struct pt_regs *regs;
int idx;
@@ -533,7 +533,7 @@ armv6pmu_handle_irq(int irq_num,
static void armv6pmu_start(struct arm_pmu *cpu_pmu)
{
unsigned long flags, val;
- struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events(cpu_pmu);
raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = armv6_pmcr_read();
@@ -545,7 +545,7 @@ static void armv6pmu_start(struct arm_pmu *cpu_pmu)
static void armv6pmu_stop(struct arm_pmu *cpu_pmu)
{
unsigned long flags, val;
- struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events(cpu_pmu);
raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = armv6_pmcr_read();
@@ -586,7 +586,7 @@ static void armv6pmu_disable_event(struct perf_event *event)
unsigned long val, mask, evt, flags;
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
- struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events(cpu_pmu);
int idx = hwc->idx;
if (ARMV6_CYCLE_COUNTER == idx) {
@@ -621,7 +621,7 @@ static void armv6mpcore_pmu_disable_event(struct perf_event *event)
unsigned long val, mask, flags, evt = 0;
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
- struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events(cpu_pmu);
int idx = hwc->idx;
if (ARMV6_CYCLE_COUNTER == idx) {
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 039cffb053a..25762a548f2 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -18,6 +18,175 @@
#ifdef CONFIG_CPU_V7
+struct armv7_pmu_logical_state {
+ u32 PMCR;
+ u32 PMCNTENSET;
+ u32 PMCNTENCLR;
+ u32 PMOVSR;
+ u32 PMSWINC;
+ u32 PMSELR;
+ u32 PMCEID0;
+ u32 PMCEID1;
+
+ u32 PMCCNTR;
+
+ u32 PMUSERENR;
+ u32 PMINTENSET;
+ u32 PMINTENCLR;
+ u32 PMOVSSET;
+
+ struct armv7_pmu_logical_cntr_state {
+ u32 PMXEVTYPER;
+ u32 PMXEVCNTR;
+ } cntrs[1]; /* we will grow this during allocation */
+};
+
+#define __v7_logical_state(cpupmu) \
+ ((struct armv7_pmu_logical_state *)(cpupmu)->logical_state)
+
+#define __v7_logical_state_single(cpupmu, name) \
+ __v7_logical_state(cpupmu)->name
+#define __v7_logical_state_cntr(cpupmu, name) \
+ __v7_logical_state(cpupmu)->cntrs[__v7_logical_state(cpupmu)->PMSELR].name
+
+#define __def_v7_pmu_reg_W(kind, name, op1, Cm, op2) \
+ static inline u32 __v7_pmu_write_physical_##name(u32 value) \
+ { \
+ asm volatile ( \
+ "mcr p15, " #op1 ", %0, c9, " #Cm ", " #op2 \
+ :: "r" (value) \
+ ); \
+ \
+ return value; \
+ } \
+ \
+ static inline u32 __v7_pmu_write_logical_##name( \
+ struct arm_cpu_pmu *cpupmu, u32 value) \
+ { \
+ __v7_logical_state_##kind(cpupmu, name) = value; \
+ return value; \
+ }
+
+#define __def_v7_pmu_reg_R(kind, name, op1, Cm, op2) \
+ static inline u32 __v7_pmu_read_physical_##name(void) \
+ { \
+ u32 result; \
+ \
+ asm volatile ( \
+ "mrc p15, " #op1 ", %0, c9, " #Cm ", " #op2 \
+ : "=r" (result) \
+ ); \
+ \
+ return result; \
+ } \
+ \
+ static inline u32 __v7_pmu_read_logical_##name( \
+ struct arm_cpu_pmu *cpupmu) \
+ { \
+ return __v7_logical_state_##kind(cpupmu, name); \
+ }
+
+#define __def_v7_pmu_reg_WO(name, op1, Cm, op2) \
+ __def_v7_pmu_reg_W(single, name, op1, Cm, op2)
+#define __def_v7_pmu_reg_RO(name, op1, Cm, op2) \
+ __def_v7_pmu_reg_R(single, name, op1, Cm, op2)
+
+#define __def_v7_pmu_reg_RW(name, op1, Cm, op2) \
+ __def_v7_pmu_reg_WO(name, op1, Cm, op2) \
+ __def_v7_pmu_reg_RO(name, op1, Cm, op2)
+
+#define __def_v7_pmu_cntr_WO(name, op1, Cm, op2) \
+ __def_v7_pmu_reg_W(cntr, name, op1, Cm, op2)
+#define __def_v7_pmu_cntr_RO(name, op1, Cm, op2) \
+ __def_v7_pmu_reg_R(cntr, name, op1, Cm, op2)
+
+#define __def_v7_pmu_cntr_RW(name, op1, Cm, op2) \
+ __def_v7_pmu_cntr_WO(name, op1, Cm, op2) \
+ __def_v7_pmu_cntr_RO(name, op1, Cm, op2)
+
+#define __def_v7_pmu_reg(name, prot, op1, Cm, op2) \
+ __def_v7_pmu_reg_##prot(name, op1, Cm, op2)
+#define __def_v7_pmu_cntr(name, prot, op1, Cm, op2) \
+ __def_v7_pmu_cntr_##prot(name, op1, Cm, op2)
+
+__def_v7_pmu_reg(PMCR, RW, 0, c12, 0)
+__def_v7_pmu_reg(PMCNTENSET, RW, 0, c12, 1)
+__def_v7_pmu_reg(PMCNTENCLR, RW, 0, c12, 2)
+__def_v7_pmu_reg(PMOVSR, RW, 0, c12, 3)
+__def_v7_pmu_reg(PMSWINC, WO, 0, c12, 4)
+__def_v7_pmu_reg(PMSELR, RW, 0, c12, 5)
+__def_v7_pmu_reg(PMCEID0, RO, 0, c12, 6)
+__def_v7_pmu_reg(PMCEID1, RO, 0, c12, 7)
+
+__def_v7_pmu_reg(PMCCNTR, RW, 0, c13, 0)
+__def_v7_pmu_cntr(PMXEVTYPER, RW, 0, c13, 1)
+__def_v7_pmu_cntr(PMXEVCNTR, RW, 0, c13, 2)
+
+__def_v7_pmu_reg(PMUSERENR, RW, 0, c14, 0)
+__def_v7_pmu_reg(PMINTENSET, RW, 0, c14, 1)
+__def_v7_pmu_reg(PMINTENCLR, RW, 0, c14, 2)
+__def_v7_pmu_reg(PMOVSSET, RW, 0, c14, 3)
+
+#define __v7_pmu_write_physical(name, value) \
+ __v7_pmu_write_physical_##name(value)
+#define __v7_pmu_read_physical(name) \
+ __v7_pmu_read_physical_##name()
+
+#define __v7_pmu_write_logical(cpupmu, name, value) \
+ __v7_pmu_write_logical_##name(cpupmu, value)
+#define __v7_pmu_read_logical(cpupmu, name) \
+ __v7_pmu_read_logical_##name(cpupmu)
+
+#define __v7_pmu_write_reg(cpupmu, name, value) do { \
+ if ((cpupmu)->active) \
+ __v7_pmu_write_physical(name, value); \
+ else \
+ __v7_pmu_write_logical(cpupmu, name, value); \
+} while(0)
+
+#define __v7_pmu_read_reg(cpupmu, name) ( \
+ (cpupmu)->active ? \
+ __v7_pmu_read_physical(name) : \
+ __v7_pmu_read_logical(cpupmu, name) \
+)
+
+#define __v7_pmu_reg_set(cpupmu, name, logical_name, mask) do { \
+ if ((cpupmu)->active) \
+ __v7_pmu_write_physical(name, mask); \
+ else { \
+ u32 __value; \
+ __value =__v7_pmu_read_logical(cpupmu, logical_name) | (mask); \
+ __v7_pmu_write_logical(cpupmu, logical_name, __value); \
+ } \
+} while(0)
+
+#define __v7_pmu_reg_clr(cpupmu, name, logical_name, mask) do { \
+ if ((cpupmu)->active) \
+ __v7_pmu_write_physical(name, mask); \
+ else { \
+ u32 __value; \
+ __value = __v7_pmu_read_logical(cpupmu, logical_name) & ~(mask); \
+ __v7_pmu_write_logical(cpupmu, logical_name, __value); \
+ } \
+} while(0)
+
+#define __v7_pmu_save_reg(cpupmu, name) \
+ __v7_pmu_write_logical(cpupmu, name, \
+ __v7_pmu_read_physical(name))
+#define __v7_pmu_restore_reg(cpupmu, name) \
+ __v7_pmu_write_physical(name, \
+ __v7_pmu_read_logical(cpupmu, name))
+static u32 read_mpidr(void)
+{
+ u32 result;
+
+ asm volatile ("mrc p15, 0, %0, c0, c0, 5" : "=r" (result));
+
+ return result;
+}
+
+static void armv7pmu_reset(void *info);
+
/*
* Common ARMv7 event types
*
@@ -784,18 +953,16 @@ static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
#define ARMV7_EXCLUDE_USER (1 << 30)
#define ARMV7_INCLUDE_HYP (1 << 27)
-static inline u32 armv7_pmnc_read(void)
+static inline u32 armv7_pmnc_read(struct arm_cpu_pmu *cpupmu)
{
- u32 val;
- asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val));
- return val;
+ return __v7_pmu_read_reg(cpupmu, PMCR);
}
-static inline void armv7_pmnc_write(u32 val)
+static inline void armv7_pmnc_write(struct arm_cpu_pmu *cpupmu, u32 val)
{
val &= ARMV7_PMNC_MASK;
isb();
- asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val));
+ __v7_pmu_write_reg(cpupmu, PMCR, val);
}
static inline int armv7_pmnc_has_overflowed(u32 pmnc)
@@ -814,10 +981,10 @@ static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc, int idx)
return pmnc & BIT(ARMV7_IDX_TO_COUNTER(idx));
}
-static inline int armv7_pmnc_select_counter(int idx)
+static inline int armv7_pmnc_select_counter(struct arm_cpu_pmu *cpupmu, int idx)
{
u32 counter = ARMV7_IDX_TO_COUNTER(idx);
- asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (counter));
+ __v7_pmu_write_reg(cpupmu, PMSELR, counter);
isb();
return idx;
@@ -825,140 +992,197 @@ static inline int armv7_pmnc_select_counter(int idx)
static inline u32 armv7pmu_read_counter(struct perf_event *event)
{
- struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
+ struct arm_pmu *pmu = to_arm_pmu(event->pmu);
+ struct arm_cpu_pmu *cpupmu = to_this_cpu_pmu(pmu);
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
u32 value = 0;
- if (!armv7_pmnc_counter_valid(cpu_pmu, idx))
+ if (!armv7_pmnc_counter_valid(pmu, idx))
pr_err("CPU%u reading wrong counter %d\n",
smp_processor_id(), idx);
else if (idx == ARMV7_IDX_CYCLE_COUNTER)
- asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value));
- else if (armv7_pmnc_select_counter(idx) == idx)
- asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (value));
+ value = __v7_pmu_read_reg(cpupmu, PMCCNTR);
+ else if (armv7_pmnc_select_counter(cpupmu, idx) == idx)
+ value = __v7_pmu_read_reg(cpupmu, PMXEVCNTR);
return value;
}
static inline void armv7pmu_write_counter(struct perf_event *event, u32 value)
{
- struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
+ struct arm_pmu *pmu = to_arm_pmu(event->pmu);
+ struct arm_cpu_pmu *cpupmu = to_this_cpu_pmu(pmu);
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
- if (!armv7_pmnc_counter_valid(cpu_pmu, idx))
+ if (!armv7_pmnc_counter_valid(pmu, idx))
pr_err("CPU%u writing wrong counter %d\n",
smp_processor_id(), idx);
else if (idx == ARMV7_IDX_CYCLE_COUNTER)
- asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
- else if (armv7_pmnc_select_counter(idx) == idx)
- asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (value));
+ __v7_pmu_write_reg(cpupmu, PMCCNTR, value);
+ else if (armv7_pmnc_select_counter(cpupmu, idx) == idx)
+ __v7_pmu_write_reg(cpupmu, PMXEVCNTR, value);
}
-static inline void armv7_pmnc_write_evtsel(int idx, u32 val)
+static inline void armv7_pmnc_write_evtsel(struct arm_cpu_pmu *cpupmu, int idx, u32 val)
{
- if (armv7_pmnc_select_counter(idx) == idx) {
+ if (armv7_pmnc_select_counter(cpupmu, idx) == idx) {
val &= ARMV7_EVTYPE_MASK;
- asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
+ __v7_pmu_write_reg(cpupmu, PMXEVTYPER, val);
}
}
-static inline int armv7_pmnc_enable_counter(int idx)
+static inline int armv7_pmnc_enable_counter(struct arm_cpu_pmu *cpupmu, int idx)
{
u32 counter = ARMV7_IDX_TO_COUNTER(idx);
- asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (BIT(counter)));
+ __v7_pmu_reg_set(cpupmu, PMCNTENSET, PMCNTENSET, BIT(counter));
return idx;
}
-static inline int armv7_pmnc_disable_counter(int idx)
+static inline int armv7_pmnc_disable_counter(struct arm_cpu_pmu *cpupmu, int idx)
{
u32 counter = ARMV7_IDX_TO_COUNTER(idx);
- asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (BIT(counter)));
+ __v7_pmu_reg_clr(cpupmu, PMCNTENCLR, PMCNTENSET, BIT(counter));
return idx;
}
-static inline int armv7_pmnc_enable_intens(int idx)
+static inline int armv7_pmnc_enable_intens(struct arm_cpu_pmu *cpupmu, int idx)
{
u32 counter = ARMV7_IDX_TO_COUNTER(idx);
- asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (BIT(counter)));
+ __v7_pmu_reg_set(cpupmu, PMINTENSET, PMCNTENSET, BIT(counter));
return idx;
}
-static inline int armv7_pmnc_disable_intens(int idx)
+static inline int armv7_pmnc_disable_intens(struct arm_cpu_pmu *cpupmu, int idx)
{
u32 counter = ARMV7_IDX_TO_COUNTER(idx);
- asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter)));
+ __v7_pmu_reg_clr(cpupmu, PMINTENCLR, PMINTENSET, BIT(counter));
isb();
/* Clear the overflow flag in case an interrupt is pending. */
- asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (BIT(counter)));
+ __v7_pmu_reg_clr(cpupmu, PMOVSR, PMOVSR, BIT(counter));
isb();
return idx;
}
-static inline u32 armv7_pmnc_getreset_flags(void)
+static inline u32 armv7_pmnc_getreset_flags(struct arm_cpu_pmu *cpupmu)
{
u32 val;
/* Read */
- asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
+ val = __v7_pmu_read_reg(cpupmu, PMOVSR);
/* Write to clear flags */
val &= ARMV7_FLAG_MASK;
- asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val));
+ __v7_pmu_reg_clr(cpupmu, PMOVSR, PMOVSR, val);
return val;
}
#ifdef DEBUG
-static void armv7_pmnc_dump_regs(struct arm_pmu *cpu_pmu)
+static void armv7_pmnc_dump_regs(struct arm_pmu *pmu)
{
u32 val;
unsigned int cnt;
+ struct arm_cpu_pmu *cpupmu = to_this_cpu_pmu(pmu);
printk(KERN_INFO "PMNC registers dump:\n");
+ printk(KERN_INFO "PMNC =0x%08x\n", __v7_pmu_read_reg(PMCR));
+ printk(KERN_INFO "CNTENS=0x%08x\n", __v7_pmu_read_reg(PMCNTENSET));
+ printk(KERN_INFO "INTENS=0x%08x\n", __v7_pmu_read_reg(PMINTENSET));
+ printk(KERN_INFO "FLAGS =0x%08x\n", __v7_pmu_read_reg(PMOVSR));
+ printk(KERN_INFO "SELECT=0x%08x\n", __v7_pmu_read_reg(PMSELR));
+ printk(KERN_INFO "CCNT =0x%08x\n", __v7_pmu_read_reg(PMCCNTR));
- asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
- printk(KERN_INFO "PMNC =0x%08x\n", val);
+ for (cnt = ARMV7_IDX_COUNTER0;
+ cnt <= ARMV7_IDX_COUNTER_LAST(pmu); cnt++) {
+ armv7_pmnc_select_counter(cpupmu, cnt);
+ printk(KERN_INFO "CNT[%d] count =0x%08x\n",
+ ARMV7_IDX_TO_COUNTER(cnt),
+ __v7_pmu_read_reg(cpupmu, PMXEVCNTR));
+ printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n",
+ ARMV7_IDX_TO_COUNTER(cnt),
+ __v7_pmu_read_reg(cpupmu, PMXEVTYPER));
+ }
+}
+#endif
- asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val));
- printk(KERN_INFO "CNTENS=0x%08x\n", val);
+static void armv7pmu_save_regs(struct arm_pmu *pmu,
+ struct cpupmu_regs *regs)
+{
+ unsigned int cnt;
+ struct arm_cpu_pmu *cpupmu = to_this_cpu_pmu(pmu);
- asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val));
- printk(KERN_INFO "INTENS=0x%08x\n", val);
+ if (!cpupmu->active)
+ return;
- asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
- printk(KERN_INFO "FLAGS =0x%08x\n", val);
+ if (!*cpupmu->cpu_hw_events.used_mask)
+ return;
- asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val));
- printk(KERN_INFO "SELECT=0x%08x\n", val);
+ if (!__v7_pmu_save_reg(cpupmu, PMCR) & ARMV7_PMNC_E)
+ return;
- asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
- printk(KERN_INFO "CCNT =0x%08x\n", val);
+ __v7_pmu_save_reg(cpupmu, PMCNTENSET);
+ __v7_pmu_save_reg(cpupmu, PMUSERENR);
+ __v7_pmu_save_reg(cpupmu, PMINTENSET);
+ __v7_pmu_save_reg(cpupmu, PMCCNTR);
for (cnt = ARMV7_IDX_COUNTER0;
- cnt <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); cnt++) {
- armv7_pmnc_select_counter(cnt);
- asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
- printk(KERN_INFO "CNT[%d] count =0x%08x\n",
- ARMV7_IDX_TO_COUNTER(cnt), val);
- asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val));
- printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n",
- ARMV7_IDX_TO_COUNTER(cnt), val);
+ cnt <= ARMV7_IDX_COUNTER_LAST(pmu); cnt++) {
+ armv7_pmnc_select_counter(cpupmu, cnt);
+ __v7_pmu_save_reg(cpupmu, PMSELR); /* mirror physical PMSELR */
+ __v7_pmu_save_reg(cpupmu, PMXEVTYPER);
+ __v7_pmu_save_reg(cpupmu, PMXEVCNTR);
}
+ return;
+}
+
+/* armv7pmu_reset() must be called before calling this funtion */
+static void armv7pmu_restore_regs(struct arm_pmu *pmu,
+ struct cpupmu_regs *regs)
+{
+ unsigned int cnt;
+ u32 pmcr;
+ struct arm_cpu_pmu *cpupmu = to_this_cpu_pmu(pmu);
+
+ armv7pmu_reset(pmu);
+
+ if (!cpupmu->active)
+ return;
+
+ if (!*cpupmu->cpu_hw_events.used_mask)
+ return;
+
+ pmcr = __v7_pmu_read_logical(cpupmu, PMCR);
+ if (!pmcr & ARMV7_PMNC_E)
+ return;
+
+ __v7_pmu_restore_reg(cpupmu, PMCNTENSET);
+ __v7_pmu_restore_reg(cpupmu, PMUSERENR);
+ __v7_pmu_restore_reg(cpupmu, PMINTENSET);
+ __v7_pmu_restore_reg(cpupmu, PMCCNTR);
+
+ for (cnt = ARMV7_IDX_COUNTER0;
+ cnt <= ARMV7_IDX_COUNTER_LAST(pmu); cnt++) {
+ armv7_pmnc_select_counter(cpupmu, cnt);
+ __v7_pmu_save_reg(cpupmu, PMSELR); /* mirror physical PMSELR */
+ __v7_pmu_restore_reg(cpupmu, PMXEVTYPER);
+ __v7_pmu_restore_reg(cpupmu, PMXEVCNTR);
+ }
+ __v7_pmu_write_reg(cpupmu, PMCR, pmcr);
}
-#endif
static void armv7pmu_enable_event(struct perf_event *event)
{
unsigned long flags;
struct hw_perf_event *hwc = &event->hw;
- struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
- struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+ struct arm_pmu *pmu = to_arm_pmu(event->pmu);
+ struct arm_cpu_pmu *cpupmu = to_this_cpu_pmu(pmu);
+ struct pmu_hw_events *events = pmu->get_hw_events(pmu);
int idx = hwc->idx;
- if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
+ if (!armv7_pmnc_counter_valid(pmu, idx)) {
pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n",
smp_processor_id(), idx);
return;
@@ -973,25 +1197,25 @@ static void armv7pmu_enable_event(struct perf_event *event)
/*
* Disable counter
*/
- armv7_pmnc_disable_counter(idx);
+ armv7_pmnc_disable_counter(cpupmu, idx);
/*
* Set event (if destined for PMNx counters)
* We only need to set the event for the cycle counter if we
* have the ability to perform event filtering.
*/
- if (cpu_pmu->set_event_filter || idx != ARMV7_IDX_CYCLE_COUNTER)
- armv7_pmnc_write_evtsel(idx, hwc->config_base);
+ if (pmu->set_event_filter || idx != ARMV7_IDX_CYCLE_COUNTER)
+ armv7_pmnc_write_evtsel(cpupmu, idx, hwc->config_base);
/*
* Enable interrupt for this counter
*/
- armv7_pmnc_enable_intens(idx);
+ armv7_pmnc_enable_intens(cpupmu, idx);
/*
* Enable counter
*/
- armv7_pmnc_enable_counter(idx);
+ armv7_pmnc_enable_counter(cpupmu,idx);
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
@@ -1000,11 +1224,12 @@ static void armv7pmu_disable_event(struct perf_event *event)
{
unsigned long flags;
struct hw_perf_event *hwc = &event->hw;
- struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
- struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+ struct arm_pmu *pmu = to_arm_pmu(event->pmu);
+ struct arm_cpu_pmu *cpupmu = to_this_cpu_pmu(pmu);
+ struct pmu_hw_events *events = pmu->get_hw_events(pmu);
int idx = hwc->idx;
- if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
+ if (!armv7_pmnc_counter_valid(pmu, idx)) {
pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n",
smp_processor_id(), idx);
return;
@@ -1018,12 +1243,12 @@ static void armv7pmu_disable_event(struct perf_event *event)
/*
* Disable counter
*/
- armv7_pmnc_disable_counter(idx);
+ armv7_pmnc_disable_counter(cpupmu, idx);
/*
* Disable interrupt for this counter
*/
- armv7_pmnc_disable_intens(idx);
+ armv7_pmnc_disable_intens(cpupmu, idx);
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
@@ -1032,15 +1257,23 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
{
u32 pmnc;
struct perf_sample_data data;
- struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
- struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events();
+ struct arm_pmu *pmu = (struct arm_pmu *)dev;
+ struct arm_cpu_pmu *cpupmu = to_this_cpu_pmu(pmu);
+ struct pmu_hw_events *cpuc = pmu->get_hw_events(pmu);
struct pt_regs *regs;
int idx;
+ if (!cpupmu->active) {
+ pr_warn_ratelimited("%s: Spurious interrupt for inactive PMU %s: event counts will be wrong.\n",
+ __func__, pmu->name);
+ pr_warn_once("This is a known interrupt affinity bug in the b.L switcher perf support.\n");
+ return IRQ_NONE;
+ }
+
/*
* Get and reset the IRQ flags
*/
- pmnc = armv7_pmnc_getreset_flags();
+ pmnc = armv7_pmnc_getreset_flags(cpupmu);
/*
* Did an overflow occur?
@@ -1053,7 +1286,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
*/
regs = get_irq_regs();
- for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
+ for (idx = 0; idx < pmu->num_events; ++idx) {
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
@@ -1075,7 +1308,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
continue;
if (perf_event_overflow(event, &data, regs))
- cpu_pmu->disable(event);
+ pmu->disable(event);
}
/*
@@ -1090,25 +1323,27 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
return IRQ_HANDLED;
}
-static void armv7pmu_start(struct arm_pmu *cpu_pmu)
+static void armv7pmu_start(struct arm_pmu *pmu)
{
unsigned long flags;
- struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+ struct arm_cpu_pmu *cpupmu = to_this_cpu_pmu(pmu);
+ struct pmu_hw_events *events = pmu->get_hw_events(pmu);
raw_spin_lock_irqsave(&events->pmu_lock, flags);
/* Enable all counters */
- armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E);
+ armv7_pmnc_write(cpupmu, armv7_pmnc_read(cpupmu) | ARMV7_PMNC_E);
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
-static void armv7pmu_stop(struct arm_pmu *cpu_pmu)
+static void armv7pmu_stop(struct arm_pmu *pmu)
{
unsigned long flags;
- struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+ struct arm_cpu_pmu *cpupmu = to_this_cpu_pmu(pmu);
+ struct pmu_hw_events *events = pmu->get_hw_events(pmu);
raw_spin_lock_irqsave(&events->pmu_lock, flags);
/* Disable all counters */
- armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E);
+ armv7_pmnc_write(cpupmu, armv7_pmnc_read(cpupmu) & ~ARMV7_PMNC_E);
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
@@ -1167,19 +1402,33 @@ static int armv7pmu_set_event_filter(struct hw_perf_event *event,
return 0;
}
+static bool check_active(struct arm_cpu_pmu *cpupmu)
+{
+ u32 mpidr = read_mpidr();
+
+ BUG_ON(!(mpidr & 0x80000000)); /* this won't work on uniprocessor */
+
+ cpupmu->active = ((mpidr ^ cpupmu->mpidr) & 0xFFFFFF) == 0;
+ return cpupmu->active;
+}
+
static void armv7pmu_reset(void *info)
{
- struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
- u32 idx, nb_cnt = cpu_pmu->num_events;
+ struct arm_pmu *pmu = (struct arm_pmu *)info;
+ struct arm_cpu_pmu *cpupmu = to_this_cpu_pmu(pmu);
+ u32 idx, nb_cnt = pmu->num_events;
+
+ if (!check_active(cpupmu))
+ return;
/* The counter and interrupt enable registers are unknown at reset. */
for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
- armv7_pmnc_disable_counter(idx);
- armv7_pmnc_disable_intens(idx);
+ armv7_pmnc_disable_counter(cpupmu, idx);
+ armv7_pmnc_disable_intens(cpupmu, idx);
}
/* Initialize & Reset PMNC: C and P bits */
- armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
+ armv7_pmnc_write(cpupmu, ARMV7_PMNC_P | ARMV7_PMNC_C);
}
static int armv7_a8_map_event(struct perf_event *event)
@@ -1212,8 +1461,13 @@ static int armv7_a7_map_event(struct perf_event *event)
&armv7_a7_perf_cache_map, 0xFF);
}
+static void armv7pmu_cpu_init(struct arm_pmu *pmu,
+ struct arm_cpu_pmu *cpupmu);
+
static void armv7pmu_init(struct arm_pmu *cpu_pmu)
{
+ struct arm_cpu_pmu *cpu_pmus = cpu_pmu->cpu_pmus;
+
cpu_pmu->handle_irq = armv7pmu_handle_irq;
cpu_pmu->enable = armv7pmu_enable_event;
cpu_pmu->disable = armv7pmu_disable_event;
@@ -1223,7 +1477,12 @@ static void armv7pmu_init(struct arm_pmu *cpu_pmu)
cpu_pmu->start = armv7pmu_start;
cpu_pmu->stop = armv7pmu_stop;
cpu_pmu->reset = armv7pmu_reset;
+ cpu_pmu->save_regs = armv7pmu_save_regs;
+ cpu_pmu->restore_regs = armv7pmu_restore_regs;
+ cpu_pmu->cpu_init = armv7pmu_cpu_init;
cpu_pmu->max_period = (1LLU << 32) - 1;
+
+ cpu_pmu->cpu_pmus = cpu_pmus;
};
static u32 armv7_read_num_pmnc_events(void)
@@ -1231,16 +1490,42 @@ static u32 armv7_read_num_pmnc_events(void)
u32 nb_cnt;
/* Read the nb of CNTx counters supported from PMNC */
- nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK;
+ nb_cnt = (__v7_pmu_read_physical(PMCR) >> ARMV7_PMNC_N_SHIFT);
+ nb_cnt &= ARMV7_PMNC_N_MASK;
/* Add the CPU cycles counter and return */
return nb_cnt + 1;
}
+static void armv7pmu_cpu_init(struct arm_pmu *pmu,
+ struct arm_cpu_pmu *cpupmu)
+{
+ size_t size = offsetof(struct armv7_pmu_logical_state, cntrs) +
+ pmu->num_events * sizeof(*__v7_logical_state(cpupmu));
+
+ cpupmu->logical_state = kzalloc(size, GFP_KERNEL);
+
+ /*
+ * We need a proper error return mechanism for these init functions.
+ * Until then, panicking the kernel is acceptable, since a failure
+ * here is indicative of crippling memory contstraints which will
+ * likely make the system unusable anyway:
+ */
+ BUG_ON(!cpupmu->logical_state);
+
+ /*
+ * Save the "read-only" ID registers in logical_state.
+ * Because they are read-only, there are no direct accessors,
+ * so poke them directly into the logical_state structure:
+ */
+ __v7_logical_state(cpupmu)->PMCEID0 = __v7_pmu_read_physical(PMCEID0);
+ __v7_logical_state(cpupmu)->PMCEID1 = __v7_pmu_read_physical(PMCEID1);
+}
+
static int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
{
armv7pmu_init(cpu_pmu);
- cpu_pmu->name = "ARMv7 Cortex-A8";
+ cpu_pmu->name = "ARMv7_Cortex_A8";
cpu_pmu->map_event = armv7_a8_map_event;
cpu_pmu->num_events = armv7_read_num_pmnc_events();
return 0;
@@ -1249,7 +1534,7 @@ static int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
static int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu)
{
armv7pmu_init(cpu_pmu);
- cpu_pmu->name = "ARMv7 Cortex-A9";
+ cpu_pmu->name = "ARMv7_Cortex_A9";
cpu_pmu->map_event = armv7_a9_map_event;
cpu_pmu->num_events = armv7_read_num_pmnc_events();
return 0;
@@ -1258,7 +1543,7 @@ static int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu)
static int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu)
{
armv7pmu_init(cpu_pmu);
- cpu_pmu->name = "ARMv7 Cortex-A5";
+ cpu_pmu->name = "ARMv7_Cortex_A5";
cpu_pmu->map_event = armv7_a5_map_event;
cpu_pmu->num_events = armv7_read_num_pmnc_events();
return 0;
@@ -1267,7 +1552,7 @@ static int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu)
static int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu)
{
armv7pmu_init(cpu_pmu);
- cpu_pmu->name = "ARMv7 Cortex-A15";
+ cpu_pmu->name = "ARMv7_Cortex_A15";
cpu_pmu->map_event = armv7_a15_map_event;
cpu_pmu->num_events = armv7_read_num_pmnc_events();
cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
@@ -1277,7 +1562,7 @@ static int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu)
static int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
{
armv7pmu_init(cpu_pmu);
- cpu_pmu->name = "ARMv7 Cortex-A7";
+ cpu_pmu->name = "ARMv7_Cortex_A7";
cpu_pmu->map_event = armv7_a7_map_event;
cpu_pmu->num_events = armv7_read_num_pmnc_events();
cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c
index 63990c42fac..cd670eafbb5 100644
--- a/arch/arm/kernel/perf_event_xscale.c
+++ b/arch/arm/kernel/perf_event_xscale.c
@@ -225,7 +225,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
unsigned long pmnc;
struct perf_sample_data data;
struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
- struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events();
+ struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events(cpu_pmu);
struct pt_regs *regs;
int idx;
@@ -285,7 +285,7 @@ static void xscale1pmu_enable_event(struct perf_event *event)
unsigned long val, mask, evt, flags;
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
- struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events(cpu_pmu);
int idx = hwc->idx;
switch (idx) {
@@ -321,7 +321,7 @@ static void xscale1pmu_disable_event(struct perf_event *event)
unsigned long val, mask, evt, flags;
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
- struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events(cpu_pmu);
int idx = hwc->idx;
switch (idx) {
@@ -374,7 +374,7 @@ xscale1pmu_get_event_idx(struct pmu_hw_events *cpuc,
static void xscale1pmu_start(struct arm_pmu *cpu_pmu)
{
unsigned long flags, val;
- struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events(cpu_pmu);
raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = xscale1pmu_read_pmnc();
@@ -386,7 +386,7 @@ static void xscale1pmu_start(struct arm_pmu *cpu_pmu)
static void xscale1pmu_stop(struct arm_pmu *cpu_pmu)
{
unsigned long flags, val;
- struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events(cpu_pmu);
raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = xscale1pmu_read_pmnc();
@@ -572,7 +572,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
unsigned long pmnc, of_flags;
struct perf_sample_data data;
struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
- struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events();
+ struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events(cpu_pmu);
struct pt_regs *regs;
int idx;
@@ -626,7 +626,7 @@ static void xscale2pmu_enable_event(struct perf_event *event)
unsigned long flags, ien, evtsel;
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
- struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events(cpu_pmu);
int idx = hwc->idx;
ien = xscale2pmu_read_int_enable();
@@ -672,7 +672,7 @@ static void xscale2pmu_disable_event(struct perf_event *event)
unsigned long flags, ien, evtsel, of_flags;
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
- struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events(cpu_pmu);
int idx = hwc->idx;
ien = xscale2pmu_read_int_enable();
@@ -738,7 +738,7 @@ out:
static void xscale2pmu_start(struct arm_pmu *cpu_pmu)
{
unsigned long flags, val;
- struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events(cpu_pmu);
raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64;
@@ -750,7 +750,7 @@ static void xscale2pmu_start(struct arm_pmu *cpu_pmu)
static void xscale2pmu_stop(struct arm_pmu *cpu_pmu)
{
unsigned long flags, val;
- struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events(cpu_pmu);
raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = xscale2pmu_read_pmnc();
diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c
index 36531643cc2..1180801468d 100644
--- a/arch/arm/kernel/psci.c
+++ b/arch/arm/kernel/psci.c
@@ -17,6 +17,7 @@
#include <linux/init.h>
#include <linux/of.h>
+#include <linux/string.h>
#include <asm/compiler.h>
#include <asm/errno.h>
@@ -26,6 +27,11 @@
struct psci_operations psci_ops;
+/* Type of psci support. Currently can only be enabled or disabled */
+#define PSCI_SUP_DISABLED 0
+#define PSCI_SUP_ENABLED 1
+
+static unsigned int psci;
static int (*invoke_psci_fn)(u32, u32, u32, u32);
enum psci_function {
@@ -42,6 +48,7 @@ static u32 psci_function_id[PSCI_FN_MAX];
#define PSCI_RET_EOPNOTSUPP -1
#define PSCI_RET_EINVAL -2
#define PSCI_RET_EPERM -3
+#define PSCI_RET_EALREADYON -4
static int psci_to_linux_errno(int errno)
{
@@ -54,6 +61,8 @@ static int psci_to_linux_errno(int errno)
return -EINVAL;
case PSCI_RET_EPERM:
return -EPERM;
+ case PSCI_RET_EALREADYON:
+ return -EAGAIN;
};
return -EINVAL;
@@ -164,6 +173,9 @@ static int __init psci_init(void)
const char *method;
u32 id;
+ if (psci == PSCI_SUP_DISABLED)
+ return 0;
+
np = of_find_matching_node(NULL, psci_of_match);
if (!np)
return 0;
@@ -209,3 +221,33 @@ out_put_node:
return 0;
}
early_initcall(psci_init);
+
+int __init psci_probe(void)
+{
+ struct device_node *np;
+ int ret = -ENODEV;
+
+ if (psci == PSCI_SUP_ENABLED) {
+ np = of_find_matching_node(NULL, psci_of_match);
+ if (np)
+ ret = 0;
+ }
+
+ of_node_put(np);
+ return ret;
+}
+
+static int __init early_psci(char *val)
+{
+ int ret = 0;
+
+ if (strcmp(val, "enable") == 0)
+ psci = PSCI_SUP_ENABLED;
+ else if (strcmp(val, "disable") == 0)
+ psci = PSCI_SUP_DISABLED;
+ else
+ ret = -EINVAL;
+
+ return ret;
+}
+early_param("psci", early_psci);
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 234e339196c..7c9fd36be1d 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -260,6 +260,19 @@ static int cpu_has_aliasing_icache(unsigned int arch)
int aliasing_icache;
unsigned int id_reg, num_sets, line_size;
+#ifdef CONFIG_BIG_LITTLE
+ /*
+ * We expect a combination of Cortex-A15 and Cortex-A7 cores.
+ * A7 = VIPT aliasing I-cache
+ * A15 = PIPT (non-aliasing) I-cache
+ * To cater for this discrepancy, let's assume aliasing I-cache
+ * all the time. This means unneeded extra work on the A15 but
+ * only ptrace is affected which is not performance critical.
+ */
+ if ((read_cpuid_id() & 0xff0ffff0) == 0x410fc0f0)
+ return 1;
+#endif
+
/* PIPT caches never alias. */
if (icache_is_pipt())
return 0;
@@ -290,10 +303,10 @@ static int cpu_has_aliasing_icache(unsigned int arch)
static void __init cacheid_init(void)
{
- unsigned int cachetype = read_cpuid_cachetype();
unsigned int arch = cpu_architecture();
if (arch >= CPU_ARCH_ARMv6) {
+ unsigned int cachetype = read_cpuid_cachetype();
if ((cachetype & (7 << 29)) == 4 << 29) {
/* ARMv7 register format */
arch = CPU_ARCH_ARMv7;
@@ -389,7 +402,7 @@ static void __init feat_v6_fixup(void)
*
* cpu_init sets up the per-CPU stacks.
*/
-void cpu_init(void)
+void notrace cpu_init(void)
{
unsigned int cpu = smp_processor_id();
struct stack *stk = &stacks[cpu];
@@ -787,7 +800,10 @@ void __init setup_arch(char **cmdline_p)
arm_dt_init_cpu_maps();
#ifdef CONFIG_SMP
if (is_smp()) {
- smp_set_ops(mdesc->smp);
+ if (!mdesc->smp_init || !mdesc->smp_init()) {
+ if(mdesc->smp)
+ smp_set_ops(mdesc->smp);
+ }
smp_init_cpus();
}
#endif
diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S
index 987dcf33415..b5c1e636ed8 100644
--- a/arch/arm/kernel/sleep.S
+++ b/arch/arm/kernel/sleep.S
@@ -4,6 +4,7 @@
#include <asm/assembler.h>
#include <asm/glue-cache.h>
#include <asm/glue-proc.h>
+#include "entry-header.S"
.text
/*
@@ -30,9 +31,8 @@ ENTRY(__cpu_suspend)
mov r2, r5 @ virtual SP
ldr r3, =sleep_save_sp
#ifdef CONFIG_SMP
- ALT_SMP(mrc p15, 0, lr, c0, c0, 5)
- ALT_UP(mov lr, #0)
- and lr, lr, #15
+ get_thread_info r5
+ ldr lr, [r5, #TI_CPU] @ cpu logical index
add r3, r3, lr, lsl #2
#endif
bl __cpu_suspend_save
@@ -82,10 +82,13 @@ ENDPROC(cpu_resume_after_mmu)
.align
ENTRY(cpu_resume)
#ifdef CONFIG_SMP
+ mov r1, #0 @ fall-back logical index for UP
+ ALT_SMP(mrc p15, 0, r0, c0, c0, 5)
+ ALT_UP_B(1f)
+ bic r0, #0xff000000
+ bl cpu_logical_index @ return logical index in r1
+1:
adr r0, sleep_save_sp
- ALT_SMP(mrc p15, 0, r1, c0, c0, 5)
- ALT_UP(mov r1, #0)
- and r1, r1, #15
ldr r0, [r0, r1, lsl #2] @ stack phys addr
#else
ldr r0, sleep_save_sp @ stack phys addr
@@ -102,3 +105,20 @@ sleep_save_sp:
.rept CONFIG_NR_CPUS
.long 0 @ preserve stack phys ptr here
.endr
+
+#ifdef CONFIG_SMP
+cpu_logical_index:
+ adr r3, cpu_map_ptr
+ ldr r2, [r3]
+ add r3, r3, r2 @ virt_to_phys(__cpu_logical_map)
+ mov r1, #0
+1:
+ ldr r2, [r3, r1, lsl #2]
+ cmp r2, r0
+ moveq pc, lr
+ add r1, r1, #1
+ b 1b
+
+cpu_map_ptr:
+ .long __cpu_logical_map - .
+#endif
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index ef366e34bdb..e9905a57b89 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -66,6 +66,7 @@ enum ipi_msg_type {
IPI_CALL_FUNC,
IPI_CALL_FUNC_SINGLE,
IPI_CPU_STOP,
+ IPI_COMPLETION,
IPI_CPU_BACKTRACE,
};
@@ -430,6 +431,7 @@ static const char *ipi_types[NR_IPI] = {
S(IPI_CALL_FUNC, "Function call interrupts"),
S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
S(IPI_CPU_STOP, "CPU stop interrupts"),
+ S(IPI_COMPLETION, "completion interrupts"),
S(IPI_CPU_BACKTRACE, "CPU backtrace"),
};
@@ -556,6 +558,19 @@ static void ipi_cpu_stop(unsigned int cpu)
cpu_relax();
}
+static DEFINE_PER_CPU(struct completion *, cpu_completion);
+
+int register_ipi_completion(struct completion *completion, int cpu)
+{
+ per_cpu(cpu_completion, cpu) = completion;
+ return IPI_COMPLETION;
+}
+
+static void ipi_complete(unsigned int cpu)
+{
+ complete(per_cpu(cpu_completion, cpu));
+}
+
static cpumask_t backtrace_mask;
static DEFINE_RAW_SPINLOCK(backtrace_lock);
@@ -658,6 +673,12 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
irq_exit();
break;
+ case IPI_COMPLETION:
+ irq_enter();
+ ipi_complete(cpu);
+ irq_exit();
+ break;
+
case IPI_CPU_BACKTRACE:
ipi_cpu_backtrace(cpu, regs);
break;
diff --git a/arch/arm/kernel/smp_scu.c b/arch/arm/kernel/smp_scu.c
index 45eac87ed66..5bc1a63284e 100644
--- a/arch/arm/kernel/smp_scu.c
+++ b/arch/arm/kernel/smp_scu.c
@@ -41,7 +41,7 @@ void scu_enable(void __iomem *scu_base)
#ifdef CONFIG_ARM_ERRATA_764369
/* Cortex-A9 only */
- if ((read_cpuid(CPUID_ID) & 0xff0ffff0) == 0x410fc090) {
+ if ((read_cpuid_id() & 0xff0ffff0) == 0x410fc090) {
scu_ctrl = __raw_readl(scu_base + 0x30);
if (!(scu_ctrl & 1))
__raw_writel(scu_ctrl | 0x1, scu_base + 0x30);
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
index 79282ebcd93..8df74d630ca 100644
--- a/arch/arm/kernel/topology.c
+++ b/arch/arm/kernel/topology.c
@@ -13,6 +13,7 @@
#include <linux/cpu.h>
#include <linux/cpumask.h>
+#include <linux/export.h>
#include <linux/init.h>
#include <linux/percpu.h>
#include <linux/node.h>
@@ -22,6 +23,7 @@
#include <linux/slab.h>
#include <asm/cputype.h>
+#include <asm/smp_plat.h>
#include <asm/topology.h>
/*
@@ -200,6 +202,7 @@ static inline void update_cpu_power(unsigned int cpuid, unsigned int mpidr) {}
* cpu topology table
*/
struct cputopo_arm cpu_topology[NR_CPUS];
+EXPORT_SYMBOL_GPL(cpu_topology);
const struct cpumask *cpu_coregroup_mask(int cpu)
{
@@ -287,6 +290,137 @@ void store_cpu_topology(unsigned int cpuid)
cpu_topology[cpuid].socket_id, mpidr);
}
+
+#ifdef CONFIG_SCHED_HMP
+
+static const char * const little_cores[] = {
+ "arm,cortex-a7",
+ NULL,
+};
+
+static bool is_little_cpu(struct device_node *cn)
+{
+ const char * const *lc;
+ for (lc = little_cores; *lc; lc++)
+ if (of_device_is_compatible(cn, *lc))
+ return true;
+ return false;
+}
+
+void __init arch_get_fast_and_slow_cpus(struct cpumask *fast,
+ struct cpumask *slow)
+{
+ struct device_node *cn = NULL;
+ int cpu;
+
+ cpumask_clear(fast);
+ cpumask_clear(slow);
+
+ /*
+ * Use the config options if they are given. This helps testing
+ * HMP scheduling on systems without a big.LITTLE architecture.
+ */
+ if (strlen(CONFIG_HMP_FAST_CPU_MASK) && strlen(CONFIG_HMP_SLOW_CPU_MASK)) {
+ if (cpulist_parse(CONFIG_HMP_FAST_CPU_MASK, fast))
+ WARN(1, "Failed to parse HMP fast cpu mask!\n");
+ if (cpulist_parse(CONFIG_HMP_SLOW_CPU_MASK, slow))
+ WARN(1, "Failed to parse HMP slow cpu mask!\n");
+ return;
+ }
+
+ /*
+ * Else, parse device tree for little cores.
+ */
+ while ((cn = of_find_node_by_type(cn, "cpu"))) {
+
+ const u32 *mpidr;
+ int len;
+
+ mpidr = of_get_property(cn, "reg", &len);
+ if (!mpidr || len != 4) {
+ pr_err("* %s missing reg property\n", cn->full_name);
+ continue;
+ }
+
+ cpu = get_logical_index(be32_to_cpup(mpidr));
+ if (cpu == -EINVAL) {
+ pr_err("couldn't get logical index for mpidr %x\n",
+ be32_to_cpup(mpidr));
+ break;
+ }
+
+ if (is_little_cpu(cn))
+ cpumask_set_cpu(cpu, slow);
+ else
+ cpumask_set_cpu(cpu, fast);
+ }
+
+ if (!cpumask_empty(fast) && !cpumask_empty(slow))
+ return;
+
+ /*
+ * We didn't find both big and little cores so let's call all cores
+ * fast as this will keep the system running, with all cores being
+ * treated equal.
+ */
+ cpumask_setall(fast);
+ cpumask_clear(slow);
+}
+
+void __init arch_get_hmp_domains(struct list_head *hmp_domains_list)
+{
+ struct cpumask hmp_fast_cpu_mask;
+ struct cpumask hmp_slow_cpu_mask;
+ struct hmp_domain *domain;
+
+ arch_get_fast_and_slow_cpus(&hmp_fast_cpu_mask, &hmp_slow_cpu_mask);
+
+ /*
+ * Initialize hmp_domains
+ * Must be ordered with respect to compute capacity.
+ * Fastest domain at head of list.
+ */
+ if(!cpumask_empty(&hmp_slow_cpu_mask)) {
+ domain = (struct hmp_domain *)
+ kmalloc(sizeof(struct hmp_domain), GFP_KERNEL);
+ cpumask_copy(&domain->cpus, &hmp_slow_cpu_mask);
+ list_add(&domain->hmp_domains, hmp_domains_list);
+ }
+ domain = (struct hmp_domain *)
+ kmalloc(sizeof(struct hmp_domain), GFP_KERNEL);
+ cpumask_copy(&domain->cpus, &hmp_fast_cpu_mask);
+ list_add(&domain->hmp_domains, hmp_domains_list);
+}
+#endif /* CONFIG_SCHED_HMP */
+
+
+/*
+ * cluster_to_logical_mask - return cpu logical mask of CPUs in a cluster
+ * @socket_id: cluster HW identifier
+ * @cluster_mask: the cpumask location to be initialized, modified by the
+ * function only if return value == 0
+ *
+ * Return:
+ *
+ * 0 on success
+ * -EINVAL if cluster_mask is NULL or there is no record matching socket_id
+ */
+int cluster_to_logical_mask(unsigned int socket_id, cpumask_t *cluster_mask)
+{
+ int cpu;
+
+ if (!cluster_mask)
+ return -EINVAL;
+
+ for_each_online_cpu(cpu)
+ if (socket_id == topology_physical_package_id(cpu)) {
+ cpumask_copy(cluster_mask, topology_core_cpumask(cpu));
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
/*
* init_cpu_topology is called at boot when only one cpu is running
* which prevent simultaneous write access to cpu_topology array
diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile
index fc96ce6f235..8dc5e76cb78 100644
--- a/arch/arm/kvm/Makefile
+++ b/arch/arm/kvm/Makefile
@@ -17,7 +17,7 @@ AFLAGS_interrupts.o := -Wa,-march=armv7-a$(plus_virt)
kvm-arm-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o)
obj-y += kvm-arm.o init.o interrupts.o
-obj-y += arm.o guest.o mmu.o emulate.o reset.o
+obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o
obj-y += coproc.o coproc_a15.o mmio.o psci.o
obj-$(CONFIG_KVM_ARM_VGIC) += vgic.o
obj-$(CONFIG_KVM_ARM_TIMER) += arch_timer.o
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index c1fe498983a..a0dfc2a53f9 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -30,11 +30,9 @@
#define CREATE_TRACE_POINTS
#include "trace.h"
-#include <asm/unified.h>
#include <asm/uaccess.h>
#include <asm/ptrace.h>
#include <asm/mman.h>
-#include <asm/cputype.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
#include <asm/virt.h>
@@ -44,14 +42,13 @@
#include <asm/kvm_emulate.h>
#include <asm/kvm_coproc.h>
#include <asm/kvm_psci.h>
-#include <asm/opcodes.h>
#ifdef REQUIRES_VIRT
__asm__(".arch_extension virt");
#endif
static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
-static struct vfp_hard_struct __percpu *kvm_host_vfp_state;
+static kvm_kernel_vfp_t __percpu *kvm_host_vfp_state;
static unsigned long hyp_default_vectors;
/* Per-CPU variable containing the currently running vcpu. */
@@ -304,22 +301,6 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
return 0;
}
-int __attribute_const__ kvm_target_cpu(void)
-{
- unsigned long implementor = read_cpuid_implementor();
- unsigned long part_number = read_cpuid_part_number();
-
- if (implementor != ARM_CPU_IMP_ARM)
- return -EINVAL;
-
- switch (part_number) {
- case ARM_CPU_PART_CORTEX_A15:
- return KVM_ARM_TARGET_CORTEX_A15;
- default:
- return -EINVAL;
- }
-}
-
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
int ret;
@@ -482,163 +463,6 @@ static void update_vttbr(struct kvm *kvm)
spin_unlock(&kvm_vmid_lock);
}
-static int handle_svc_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
-{
- /* SVC called from Hyp mode should never get here */
- kvm_debug("SVC called from Hyp mode shouldn't go here\n");
- BUG();
- return -EINVAL; /* Squash warning */
-}
-
-static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
-{
- trace_kvm_hvc(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0),
- vcpu->arch.hsr & HSR_HVC_IMM_MASK);
-
- if (kvm_psci_call(vcpu))
- return 1;
-
- kvm_inject_undefined(vcpu);
- return 1;
-}
-
-static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
-{
- if (kvm_psci_call(vcpu))
- return 1;
-
- kvm_inject_undefined(vcpu);
- return 1;
-}
-
-static int handle_pabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
-{
- /* The hypervisor should never cause aborts */
- kvm_err("Prefetch Abort taken from Hyp mode at %#08x (HSR: %#08x)\n",
- vcpu->arch.hxfar, vcpu->arch.hsr);
- return -EFAULT;
-}
-
-static int handle_dabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
-{
- /* This is either an error in the ws. code or an external abort */
- kvm_err("Data Abort taken from Hyp mode at %#08x (HSR: %#08x)\n",
- vcpu->arch.hxfar, vcpu->arch.hsr);
- return -EFAULT;
-}
-
-typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *);
-static exit_handle_fn arm_exit_handlers[] = {
- [HSR_EC_WFI] = kvm_handle_wfi,
- [HSR_EC_CP15_32] = kvm_handle_cp15_32,
- [HSR_EC_CP15_64] = kvm_handle_cp15_64,
- [HSR_EC_CP14_MR] = kvm_handle_cp14_access,
- [HSR_EC_CP14_LS] = kvm_handle_cp14_load_store,
- [HSR_EC_CP14_64] = kvm_handle_cp14_access,
- [HSR_EC_CP_0_13] = kvm_handle_cp_0_13_access,
- [HSR_EC_CP10_ID] = kvm_handle_cp10_id,
- [HSR_EC_SVC_HYP] = handle_svc_hyp,
- [HSR_EC_HVC] = handle_hvc,
- [HSR_EC_SMC] = handle_smc,
- [HSR_EC_IABT] = kvm_handle_guest_abort,
- [HSR_EC_IABT_HYP] = handle_pabt_hyp,
- [HSR_EC_DABT] = kvm_handle_guest_abort,
- [HSR_EC_DABT_HYP] = handle_dabt_hyp,
-};
-
-/*
- * A conditional instruction is allowed to trap, even though it
- * wouldn't be executed. So let's re-implement the hardware, in
- * software!
- */
-static bool kvm_condition_valid(struct kvm_vcpu *vcpu)
-{
- unsigned long cpsr, cond, insn;
-
- /*
- * Exception Code 0 can only happen if we set HCR.TGE to 1, to
- * catch undefined instructions, and then we won't get past
- * the arm_exit_handlers test anyway.
- */
- BUG_ON(((vcpu->arch.hsr & HSR_EC) >> HSR_EC_SHIFT) == 0);
-
- /* Top two bits non-zero? Unconditional. */
- if (vcpu->arch.hsr >> 30)
- return true;
-
- cpsr = *vcpu_cpsr(vcpu);
-
- /* Is condition field valid? */
- if ((vcpu->arch.hsr & HSR_CV) >> HSR_CV_SHIFT)
- cond = (vcpu->arch.hsr & HSR_COND) >> HSR_COND_SHIFT;
- else {
- /* This can happen in Thumb mode: examine IT state. */
- unsigned long it;
-
- it = ((cpsr >> 8) & 0xFC) | ((cpsr >> 25) & 0x3);
-
- /* it == 0 => unconditional. */
- if (it == 0)
- return true;
-
- /* The cond for this insn works out as the top 4 bits. */
- cond = (it >> 4);
- }
-
- /* Shift makes it look like an ARM-mode instruction */
- insn = cond << 28;
- return arm_check_condition(insn, cpsr) != ARM_OPCODE_CONDTEST_FAIL;
-}
-
-/*
- * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
- * proper exit to QEMU.
- */
-static int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
- int exception_index)
-{
- unsigned long hsr_ec;
-
- switch (exception_index) {
- case ARM_EXCEPTION_IRQ:
- return 1;
- case ARM_EXCEPTION_UNDEFINED:
- kvm_err("Undefined exception in Hyp mode at: %#08x\n",
- vcpu->arch.hyp_pc);
- BUG();
- panic("KVM: Hypervisor undefined exception!\n");
- case ARM_EXCEPTION_DATA_ABORT:
- case ARM_EXCEPTION_PREF_ABORT:
- case ARM_EXCEPTION_HVC:
- hsr_ec = (vcpu->arch.hsr & HSR_EC) >> HSR_EC_SHIFT;
-
- if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers)
- || !arm_exit_handlers[hsr_ec]) {
- kvm_err("Unkown exception class: %#08lx, "
- "hsr: %#08x\n", hsr_ec,
- (unsigned int)vcpu->arch.hsr);
- BUG();
- }
-
- /*
- * See ARM ARM B1.14.1: "Hyp traps on instructions
- * that fail their condition code check"
- */
- if (!kvm_condition_valid(vcpu)) {
- bool is_wide = vcpu->arch.hsr & HSR_IL;
- kvm_skip_instr(vcpu, is_wide);
- return 1;
- }
-
- return arm_exit_handlers[hsr_ec](vcpu, run);
- default:
- kvm_pr_unimpl("Unsupported exception type: %d",
- exception_index);
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- return 0;
- }
-}
-
static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
{
if (likely(vcpu->arch.has_run_once))
@@ -973,7 +797,6 @@ long kvm_arch_vm_ioctl(struct file *filp,
static void cpu_init_hyp_mode(void *vector)
{
unsigned long long pgd_ptr;
- unsigned long pgd_low, pgd_high;
unsigned long hyp_stack_ptr;
unsigned long stack_page;
unsigned long vector_ptr;
@@ -982,20 +805,11 @@ static void cpu_init_hyp_mode(void *vector)
__hyp_set_vectors((unsigned long)vector);
pgd_ptr = (unsigned long long)kvm_mmu_get_httbr();
- pgd_low = (pgd_ptr & ((1ULL << 32) - 1));
- pgd_high = (pgd_ptr >> 32ULL);
stack_page = __get_cpu_var(kvm_arm_hyp_stack_page);
hyp_stack_ptr = stack_page + PAGE_SIZE;
vector_ptr = (unsigned long)__kvm_hyp_vector;
- /*
- * Call initialization code, and switch to the full blown
- * HYP code. The init code doesn't need to preserve these registers as
- * r1-r3 and r12 are already callee save according to the AAPCS.
- * Note that we slightly misuse the prototype by casing the pgd_low to
- * a void *.
- */
- kvm_call_hyp((void *)pgd_low, pgd_high, hyp_stack_ptr, vector_ptr);
+ __cpu_init_hyp_mode(pgd_ptr, hyp_stack_ptr, vector_ptr);
}
/**
@@ -1078,7 +892,7 @@ static int init_hyp_mode(void)
/*
* Map the host VFP structures
*/
- kvm_host_vfp_state = alloc_percpu(struct vfp_hard_struct);
+ kvm_host_vfp_state = alloc_percpu(kvm_kernel_vfp_t);
if (!kvm_host_vfp_state) {
err = -ENOMEM;
kvm_err("Cannot allocate host VFP state\n");
@@ -1086,7 +900,7 @@ static int init_hyp_mode(void)
}
for_each_possible_cpu(cpu) {
- struct vfp_hard_struct *vfp;
+ kvm_kernel_vfp_t *vfp;
vfp = per_cpu_ptr(kvm_host_vfp_state, cpu);
err = create_hyp_mappings(vfp, vfp + 1);
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
index 7bed7556077..8eea97be1ed 100644
--- a/arch/arm/kvm/coproc.c
+++ b/arch/arm/kvm/coproc.c
@@ -76,7 +76,7 @@ static bool access_dcsw(struct kvm_vcpu *vcpu,
const struct coproc_params *p,
const struct coproc_reg *r)
{
- u32 val;
+ unsigned long val;
int cpu;
if (!p->is_write)
@@ -293,12 +293,12 @@ static int emulate_cp15(struct kvm_vcpu *vcpu,
if (likely(r->access(vcpu, params, r))) {
/* Skip instruction, since it was emulated */
- kvm_skip_instr(vcpu, (vcpu->arch.hsr >> 25) & 1);
+ kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
return 1;
}
/* If access function fails, it should complain. */
} else {
- kvm_err("Unsupported guest CP15 access at: %08x\n",
+ kvm_err("Unsupported guest CP15 access at: %08lx\n",
*vcpu_pc(vcpu));
print_cp_instr(params);
}
@@ -315,14 +315,14 @@ int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
struct coproc_params params;
- params.CRm = (vcpu->arch.hsr >> 1) & 0xf;
- params.Rt1 = (vcpu->arch.hsr >> 5) & 0xf;
- params.is_write = ((vcpu->arch.hsr & 1) == 0);
+ params.CRm = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf;
+ params.Rt1 = (kvm_vcpu_get_hsr(vcpu) >> 5) & 0xf;
+ params.is_write = ((kvm_vcpu_get_hsr(vcpu) & 1) == 0);
params.is_64bit = true;
- params.Op1 = (vcpu->arch.hsr >> 16) & 0xf;
+ params.Op1 = (kvm_vcpu_get_hsr(vcpu) >> 16) & 0xf;
params.Op2 = 0;
- params.Rt2 = (vcpu->arch.hsr >> 10) & 0xf;
+ params.Rt2 = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf;
params.CRn = 0;
return emulate_cp15(vcpu, &params);
@@ -347,14 +347,14 @@ int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
struct coproc_params params;
- params.CRm = (vcpu->arch.hsr >> 1) & 0xf;
- params.Rt1 = (vcpu->arch.hsr >> 5) & 0xf;
- params.is_write = ((vcpu->arch.hsr & 1) == 0);
+ params.CRm = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf;
+ params.Rt1 = (kvm_vcpu_get_hsr(vcpu) >> 5) & 0xf;
+ params.is_write = ((kvm_vcpu_get_hsr(vcpu) & 1) == 0);
params.is_64bit = false;
- params.CRn = (vcpu->arch.hsr >> 10) & 0xf;
- params.Op1 = (vcpu->arch.hsr >> 14) & 0x7;
- params.Op2 = (vcpu->arch.hsr >> 17) & 0x7;
+ params.CRn = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf;
+ params.Op1 = (kvm_vcpu_get_hsr(vcpu) >> 14) & 0x7;
+ params.Op2 = (kvm_vcpu_get_hsr(vcpu) >> 17) & 0x7;
params.Rt2 = 0;
return emulate_cp15(vcpu, &params);
diff --git a/arch/arm/kvm/coproc.h b/arch/arm/kvm/coproc.h
index 992adfafa2f..b7301d3e479 100644
--- a/arch/arm/kvm/coproc.h
+++ b/arch/arm/kvm/coproc.h
@@ -84,7 +84,7 @@ static inline bool read_zero(struct kvm_vcpu *vcpu,
static inline bool write_to_read_only(struct kvm_vcpu *vcpu,
const struct coproc_params *params)
{
- kvm_debug("CP15 write to read-only register at: %08x\n",
+ kvm_debug("CP15 write to read-only register at: %08lx\n",
*vcpu_pc(vcpu));
print_cp_instr(params);
return false;
@@ -93,7 +93,7 @@ static inline bool write_to_read_only(struct kvm_vcpu *vcpu,
static inline bool read_from_write_only(struct kvm_vcpu *vcpu,
const struct coproc_params *params)
{
- kvm_debug("CP15 read to write-only register at: %08x\n",
+ kvm_debug("CP15 read to write-only register at: %08lx\n",
*vcpu_pc(vcpu));
print_cp_instr(params);
return false;
diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c
index d61450ac666..bdede9e7da5 100644
--- a/arch/arm/kvm/emulate.c
+++ b/arch/arm/kvm/emulate.c
@@ -20,6 +20,7 @@
#include <linux/kvm_host.h>
#include <asm/kvm_arm.h>
#include <asm/kvm_emulate.h>
+#include <asm/opcodes.h>
#include <trace/events/kvm.h>
#include "trace.h"
@@ -109,10 +110,10 @@ static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][15] = {
* Return a pointer to the register number valid in the current mode of
* the virtual CPU.
*/
-u32 *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num)
+unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num)
{
- u32 *reg_array = (u32 *)&vcpu->arch.regs;
- u32 mode = *vcpu_cpsr(vcpu) & MODE_MASK;
+ unsigned long *reg_array = (unsigned long *)&vcpu->arch.regs;
+ unsigned long mode = *vcpu_cpsr(vcpu) & MODE_MASK;
switch (mode) {
case USR_MODE...SVC_MODE:
@@ -141,9 +142,9 @@ u32 *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num)
/*
* Return the SPSR for the current mode of the virtual CPU.
*/
-u32 *vcpu_spsr(struct kvm_vcpu *vcpu)
+unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu)
{
- u32 mode = *vcpu_cpsr(vcpu) & MODE_MASK;
+ unsigned long mode = *vcpu_cpsr(vcpu) & MODE_MASK;
switch (mode) {
case SVC_MODE:
return &vcpu->arch.regs.KVM_ARM_SVC_spsr;
@@ -160,20 +161,48 @@ u32 *vcpu_spsr(struct kvm_vcpu *vcpu)
}
}
-/**
- * kvm_handle_wfi - handle a wait-for-interrupts instruction executed by a guest
- * @vcpu: the vcpu pointer
- * @run: the kvm_run structure pointer
- *
- * Simply sets the wait_for_interrupts flag on the vcpu structure, which will
- * halt execution of world-switches and schedule other host processes until
- * there is an incoming IRQ or FIQ to the VM.
+/*
+ * A conditional instruction is allowed to trap, even though it
+ * wouldn't be executed. So let's re-implement the hardware, in
+ * software!
*/
-int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run)
+bool kvm_condition_valid(struct kvm_vcpu *vcpu)
{
- trace_kvm_wfi(*vcpu_pc(vcpu));
- kvm_vcpu_block(vcpu);
- return 1;
+ unsigned long cpsr, cond, insn;
+
+ /*
+ * Exception Code 0 can only happen if we set HCR.TGE to 1, to
+ * catch undefined instructions, and then we won't get past
+ * the arm_exit_handlers test anyway.
+ */
+ BUG_ON(!kvm_vcpu_trap_get_class(vcpu));
+
+ /* Top two bits non-zero? Unconditional. */
+ if (kvm_vcpu_get_hsr(vcpu) >> 30)
+ return true;
+
+ cpsr = *vcpu_cpsr(vcpu);
+
+ /* Is condition field valid? */
+ if ((kvm_vcpu_get_hsr(vcpu) & HSR_CV) >> HSR_CV_SHIFT)
+ cond = (kvm_vcpu_get_hsr(vcpu) & HSR_COND) >> HSR_COND_SHIFT;
+ else {
+ /* This can happen in Thumb mode: examine IT state. */
+ unsigned long it;
+
+ it = ((cpsr >> 8) & 0xFC) | ((cpsr >> 25) & 0x3);
+
+ /* it == 0 => unconditional. */
+ if (it == 0)
+ return true;
+
+ /* The cond for this insn works out as the top 4 bits. */
+ cond = (it >> 4);
+ }
+
+ /* Shift makes it look like an ARM-mode instruction */
+ insn = cond << 28;
+ return arm_check_condition(insn, cpsr) != ARM_OPCODE_CONDTEST_FAIL;
}
/**
@@ -257,9 +286,9 @@ static u32 exc_vector_base(struct kvm_vcpu *vcpu)
*/
void kvm_inject_undefined(struct kvm_vcpu *vcpu)
{
- u32 new_lr_value;
- u32 new_spsr_value;
- u32 cpsr = *vcpu_cpsr(vcpu);
+ unsigned long new_lr_value;
+ unsigned long new_spsr_value;
+ unsigned long cpsr = *vcpu_cpsr(vcpu);
u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
bool is_thumb = (cpsr & PSR_T_BIT);
u32 vect_offset = 4;
@@ -291,9 +320,9 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu)
*/
static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr)
{
- u32 new_lr_value;
- u32 new_spsr_value;
- u32 cpsr = *vcpu_cpsr(vcpu);
+ unsigned long new_lr_value;
+ unsigned long new_spsr_value;
+ unsigned long cpsr = *vcpu_cpsr(vcpu);
u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
bool is_thumb = (cpsr & PSR_T_BIT);
u32 vect_offset;
diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c
index 2339d9609d3..152d0361218 100644
--- a/arch/arm/kvm/guest.c
+++ b/arch/arm/kvm/guest.c
@@ -22,6 +22,7 @@
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
+#include <asm/cputype.h>
#include <asm/uaccess.h>
#include <asm/kvm.h>
#include <asm/kvm_asm.h>
@@ -180,6 +181,22 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
return -EINVAL;
}
+int __attribute_const__ kvm_target_cpu(void)
+{
+ unsigned long implementor = read_cpuid_implementor();
+ unsigned long part_number = read_cpuid_part_number();
+
+ if (implementor != ARM_CPU_IMP_ARM)
+ return -EINVAL;
+
+ switch (part_number) {
+ case ARM_CPU_PART_CORTEX_A15:
+ return KVM_ARM_TARGET_CORTEX_A15;
+ default:
+ return -EINVAL;
+ }
+}
+
int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
const struct kvm_vcpu_init *init)
{
diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c
new file mode 100644
index 00000000000..26ad17310a1
--- /dev/null
+++ b/arch/arm/kvm/handle_exit.c
@@ -0,0 +1,164 @@
+/*
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include <asm/kvm_emulate.h>
+#include <asm/kvm_coproc.h>
+#include <asm/kvm_mmu.h>
+#include <asm/kvm_psci.h>
+#include <trace/events/kvm.h>
+
+#include "trace.h"
+
+#include "trace.h"
+
+typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *);
+
+static int handle_svc_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ /* SVC called from Hyp mode should never get here */
+ kvm_debug("SVC called from Hyp mode shouldn't go here\n");
+ BUG();
+ return -EINVAL; /* Squash warning */
+}
+
+static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ trace_kvm_hvc(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0),
+ kvm_vcpu_hvc_get_imm(vcpu));
+
+ if (kvm_psci_call(vcpu))
+ return 1;
+
+ kvm_inject_undefined(vcpu);
+ return 1;
+}
+
+static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ if (kvm_psci_call(vcpu))
+ return 1;
+
+ kvm_inject_undefined(vcpu);
+ return 1;
+}
+
+static int handle_pabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ /* The hypervisor should never cause aborts */
+ kvm_err("Prefetch Abort taken from Hyp mode at %#08lx (HSR: %#08x)\n",
+ kvm_vcpu_get_hfar(vcpu), kvm_vcpu_get_hsr(vcpu));
+ return -EFAULT;
+}
+
+static int handle_dabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ /* This is either an error in the ws. code or an external abort */
+ kvm_err("Data Abort taken from Hyp mode at %#08lx (HSR: %#08x)\n",
+ kvm_vcpu_get_hfar(vcpu), kvm_vcpu_get_hsr(vcpu));
+ return -EFAULT;
+}
+
+/**
+ * kvm_handle_wfi - handle a wait-for-interrupts instruction executed by a guest
+ * @vcpu: the vcpu pointer
+ * @run: the kvm_run structure pointer
+ *
+ * Simply sets the wait_for_interrupts flag on the vcpu structure, which will
+ * halt execution of world-switches and schedule other host processes until
+ * there is an incoming IRQ or FIQ to the VM.
+ */
+static int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ trace_kvm_wfi(*vcpu_pc(vcpu));
+ kvm_vcpu_block(vcpu);
+ return 1;
+}
+
+static exit_handle_fn arm_exit_handlers[] = {
+ [HSR_EC_WFI] = kvm_handle_wfi,
+ [HSR_EC_CP15_32] = kvm_handle_cp15_32,
+ [HSR_EC_CP15_64] = kvm_handle_cp15_64,
+ [HSR_EC_CP14_MR] = kvm_handle_cp14_access,
+ [HSR_EC_CP14_LS] = kvm_handle_cp14_load_store,
+ [HSR_EC_CP14_64] = kvm_handle_cp14_access,
+ [HSR_EC_CP_0_13] = kvm_handle_cp_0_13_access,
+ [HSR_EC_CP10_ID] = kvm_handle_cp10_id,
+ [HSR_EC_SVC_HYP] = handle_svc_hyp,
+ [HSR_EC_HVC] = handle_hvc,
+ [HSR_EC_SMC] = handle_smc,
+ [HSR_EC_IABT] = kvm_handle_guest_abort,
+ [HSR_EC_IABT_HYP] = handle_pabt_hyp,
+ [HSR_EC_DABT] = kvm_handle_guest_abort,
+ [HSR_EC_DABT_HYP] = handle_dabt_hyp,
+};
+
+static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
+{
+ u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
+
+ if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) ||
+ !arm_exit_handlers[hsr_ec]) {
+ kvm_err("Unkown exception class: hsr: %#08x\n",
+ (unsigned int)kvm_vcpu_get_hsr(vcpu));
+ BUG();
+ }
+
+ return arm_exit_handlers[hsr_ec];
+}
+
+/*
+ * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
+ * proper exit to userspace.
+ */
+int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ int exception_index)
+{
+ exit_handle_fn exit_handler;
+
+ switch (exception_index) {
+ case ARM_EXCEPTION_IRQ:
+ return 1;
+ case ARM_EXCEPTION_UNDEFINED:
+ kvm_err("Undefined exception in Hyp mode at: %#08lx\n",
+ kvm_vcpu_get_hyp_pc(vcpu));
+ BUG();
+ panic("KVM: Hypervisor undefined exception!\n");
+ case ARM_EXCEPTION_DATA_ABORT:
+ case ARM_EXCEPTION_PREF_ABORT:
+ case ARM_EXCEPTION_HVC:
+ /*
+ * See ARM ARM B1.14.1: "Hyp traps on instructions
+ * that fail their condition code check"
+ */
+ if (!kvm_condition_valid(vcpu)) {
+ kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
+ return 1;
+ }
+
+ exit_handler = kvm_get_exit_handler(vcpu);
+
+ return exit_handler(vcpu, run);
+ default:
+ kvm_pr_unimpl("Unsupported exception type: %d",
+ exception_index);
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ return 0;
+ }
+}
diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S
index 8ca87ab0919..f7793df62f5 100644
--- a/arch/arm/kvm/interrupts.S
+++ b/arch/arm/kvm/interrupts.S
@@ -35,15 +35,18 @@ __kvm_hyp_code_start:
/********************************************************************
* Flush per-VMID TLBs
*
- * void __kvm_tlb_flush_vmid(struct kvm *kvm);
+ * void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
*
* We rely on the hardware to broadcast the TLB invalidation to all CPUs
* inside the inner-shareable domain (which is the case for all v7
* implementations). If we come across a non-IS SMP implementation, we'll
* have to use an IPI based mechanism. Until then, we stick to the simple
* hardware assisted version.
+ *
+ * As v7 does not support flushing per IPA, just nuke the whole TLB
+ * instead, ignoring the ipa value.
*/
-ENTRY(__kvm_tlb_flush_vmid)
+ENTRY(__kvm_tlb_flush_vmid_ipa)
push {r2, r3}
add r0, r0, #KVM_VTTBR
@@ -60,7 +63,7 @@ ENTRY(__kvm_tlb_flush_vmid)
pop {r2, r3}
bx lr
-ENDPROC(__kvm_tlb_flush_vmid)
+ENDPROC(__kvm_tlb_flush_vmid_ipa)
/********************************************************************
* Flush TLBs and instruction caches of all CPUs inside the inner-shareable
@@ -235,9 +238,9 @@ ENTRY(kvm_call_hyp)
* instruction is issued since all traps are disabled when running the host
* kernel as per the Hyp-mode initialization at boot time.
*
- * HVC instructions cause a trap to the vector page + offset 0x18 (see hyp_hvc
+ * HVC instructions cause a trap to the vector page + offset 0x14 (see hyp_hvc
* below) when the HVC instruction is called from SVC mode (i.e. a guest or the
- * host kernel) and they cause a trap to the vector page + offset 0xc when HVC
+ * host kernel) and they cause a trap to the vector page + offset 0x8 when HVC
* instructions are called from within Hyp-mode.
*
* Hyp-ABI: Calling HYP-mode functions from host (in SVC mode):
diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c
index 98a870ff1a5..72a12f2171b 100644
--- a/arch/arm/kvm/mmio.c
+++ b/arch/arm/kvm/mmio.c
@@ -33,16 +33,16 @@
*/
int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
- __u32 *dest;
+ unsigned long *dest;
unsigned int len;
int mask;
if (!run->mmio.is_write) {
dest = vcpu_reg(vcpu, vcpu->arch.mmio_decode.rt);
- memset(dest, 0, sizeof(int));
+ *dest = 0;
len = run->mmio.len;
- if (len > 4)
+ if (len > sizeof(unsigned long))
return -EINVAL;
memcpy(dest, run->mmio.data, len);
@@ -50,7 +50,8 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
*((u64 *)run->mmio.data));
- if (vcpu->arch.mmio_decode.sign_extend && len < 4) {
+ if (vcpu->arch.mmio_decode.sign_extend &&
+ len < sizeof(unsigned long)) {
mask = 1U << ((len * 8) - 1);
*dest = (*dest ^ mask) - mask;
}
@@ -65,40 +66,29 @@ static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
unsigned long rt, len;
bool is_write, sign_extend;
- if ((vcpu->arch.hsr >> 8) & 1) {
+ if (kvm_vcpu_dabt_isextabt(vcpu)) {
/* cache operation on I/O addr, tell guest unsupported */
- kvm_inject_dabt(vcpu, vcpu->arch.hxfar);
+ kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
return 1;
}
- if ((vcpu->arch.hsr >> 7) & 1) {
+ if (kvm_vcpu_dabt_iss1tw(vcpu)) {
/* page table accesses IO mem: tell guest to fix its TTBR */
- kvm_inject_dabt(vcpu, vcpu->arch.hxfar);
+ kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
return 1;
}
- switch ((vcpu->arch.hsr >> 22) & 0x3) {
- case 0:
- len = 1;
- break;
- case 1:
- len = 2;
- break;
- case 2:
- len = 4;
- break;
- default:
- kvm_err("Hardware is weird: SAS 0b11 is reserved\n");
- return -EFAULT;
- }
+ len = kvm_vcpu_dabt_get_as(vcpu);
+ if (unlikely(len < 0))
+ return len;
- is_write = vcpu->arch.hsr & HSR_WNR;
- sign_extend = vcpu->arch.hsr & HSR_SSE;
- rt = (vcpu->arch.hsr & HSR_SRT_MASK) >> HSR_SRT_SHIFT;
+ is_write = kvm_vcpu_dabt_iswrite(vcpu);
+ sign_extend = kvm_vcpu_dabt_issext(vcpu);
+ rt = kvm_vcpu_dabt_get_rd(vcpu);
if (kvm_vcpu_reg_is_pc(vcpu, rt)) {
/* IO memory trying to read/write pc */
- kvm_inject_pabt(vcpu, vcpu->arch.hxfar);
+ kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
return 1;
}
@@ -112,7 +102,7 @@ static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
* The MMIO instruction is emulated and should not be re-executed
* in the guest.
*/
- kvm_skip_instr(vcpu, (vcpu->arch.hsr >> 25) & 1);
+ kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
return 0;
}
@@ -130,7 +120,7 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
* space do its magic.
*/
- if (vcpu->arch.hsr & HSR_ISV) {
+ if (kvm_vcpu_dabt_isvalid(vcpu)) {
ret = decode_hsr(vcpu, fault_ipa, &mmio);
if (ret)
return ret;
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 99e07c7dd74..2f12e405640 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -20,7 +20,6 @@
#include <linux/kvm_host.h>
#include <linux/io.h>
#include <trace/events/kvm.h>
-#include <asm/idmap.h>
#include <asm/pgalloc.h>
#include <asm/cacheflush.h>
#include <asm/kvm_arm.h>
@@ -28,8 +27,6 @@
#include <asm/kvm_mmio.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_emulate.h>
-#include <asm/mach/map.h>
-#include <trace/events/kvm.h>
#include "trace.h"
@@ -37,19 +34,9 @@ extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
-static void kvm_tlb_flush_vmid(struct kvm *kvm)
+static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
{
- kvm_call_hyp(__kvm_tlb_flush_vmid, kvm);
-}
-
-static void kvm_set_pte(pte_t *pte, pte_t new_pte)
-{
- pte_val(*pte) = new_pte;
- /*
- * flush_pmd_entry just takes a void pointer and cleans the necessary
- * cache entries, so we can reuse the function for ptes.
- */
- flush_pmd_entry(pte);
+ kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
}
static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
@@ -98,33 +85,42 @@ static void free_ptes(pmd_t *pmd, unsigned long addr)
}
}
+static void free_hyp_pgd_entry(unsigned long addr)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ unsigned long hyp_addr = KERN_TO_HYP(addr);
+
+ pgd = hyp_pgd + pgd_index(hyp_addr);
+ pud = pud_offset(pgd, hyp_addr);
+
+ if (pud_none(*pud))
+ return;
+ BUG_ON(pud_bad(*pud));
+
+ pmd = pmd_offset(pud, hyp_addr);
+ free_ptes(pmd, addr);
+ pmd_free(NULL, pmd);
+ pud_clear(pud);
+}
+
/**
* free_hyp_pmds - free a Hyp-mode level-2 tables and child level-3 tables
*
* Assumes this is a page table used strictly in Hyp-mode and therefore contains
- * only mappings in the kernel memory area, which is above PAGE_OFFSET.
+ * either mappings in the kernel memory area (above PAGE_OFFSET), or
+ * device mappings in the vmalloc range (from VMALLOC_START to VMALLOC_END).
*/
void free_hyp_pmds(void)
{
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
unsigned long addr;
mutex_lock(&kvm_hyp_pgd_mutex);
- for (addr = PAGE_OFFSET; addr != 0; addr += PGDIR_SIZE) {
- pgd = hyp_pgd + pgd_index(addr);
- pud = pud_offset(pgd, addr);
-
- if (pud_none(*pud))
- continue;
- BUG_ON(pud_bad(*pud));
-
- pmd = pmd_offset(pud, addr);
- free_ptes(pmd, addr);
- pmd_free(NULL, pmd);
- pud_clear(pud);
- }
+ for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE)
+ free_hyp_pgd_entry(addr);
+ for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
+ free_hyp_pgd_entry(addr);
mutex_unlock(&kvm_hyp_pgd_mutex);
}
@@ -136,7 +132,9 @@ static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start,
struct page *page;
for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
- pte = pte_offset_kernel(pmd, addr);
+ unsigned long hyp_addr = KERN_TO_HYP(addr);
+
+ pte = pte_offset_kernel(pmd, hyp_addr);
BUG_ON(!virt_addr_valid(addr));
page = virt_to_page(addr);
kvm_set_pte(pte, mk_pte(page, PAGE_HYP));
@@ -151,7 +149,9 @@ static void create_hyp_io_pte_mappings(pmd_t *pmd, unsigned long start,
unsigned long addr;
for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
- pte = pte_offset_kernel(pmd, addr);
+ unsigned long hyp_addr = KERN_TO_HYP(addr);
+
+ pte = pte_offset_kernel(pmd, hyp_addr);
BUG_ON(pfn_valid(*pfn_base));
kvm_set_pte(pte, pfn_pte(*pfn_base, PAGE_HYP_DEVICE));
(*pfn_base)++;
@@ -166,12 +166,13 @@ static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,
unsigned long addr, next;
for (addr = start; addr < end; addr = next) {
- pmd = pmd_offset(pud, addr);
+ unsigned long hyp_addr = KERN_TO_HYP(addr);
+ pmd = pmd_offset(pud, hyp_addr);
BUG_ON(pmd_sect(*pmd));
if (pmd_none(*pmd)) {
- pte = pte_alloc_one_kernel(NULL, addr);
+ pte = pte_alloc_one_kernel(NULL, hyp_addr);
if (!pte) {
kvm_err("Cannot allocate Hyp pte\n");
return -ENOMEM;
@@ -206,17 +207,23 @@ static int __create_hyp_mappings(void *from, void *to, unsigned long *pfn_base)
unsigned long addr, next;
int err = 0;
- BUG_ON(start > end);
- if (start < PAGE_OFFSET)
+ if (start >= end)
+ return -EINVAL;
+ /* Check for a valid kernel memory mapping */
+ if (!pfn_base && (!virt_addr_valid(from) || !virt_addr_valid(to - 1)))
+ return -EINVAL;
+ /* Check for a valid kernel IO mapping */
+ if (pfn_base && (!is_vmalloc_addr(from) || !is_vmalloc_addr(to - 1)))
return -EINVAL;
mutex_lock(&kvm_hyp_pgd_mutex);
for (addr = start; addr < end; addr = next) {
- pgd = hyp_pgd + pgd_index(addr);
- pud = pud_offset(pgd, addr);
+ unsigned long hyp_addr = KERN_TO_HYP(addr);
+ pgd = hyp_pgd + pgd_index(hyp_addr);
+ pud = pud_offset(pgd, hyp_addr);
if (pud_none_or_clear_bad(pud)) {
- pmd = pmd_alloc_one(NULL, addr);
+ pmd = pmd_alloc_one(NULL, hyp_addr);
if (!pmd) {
kvm_err("Cannot allocate Hyp pmd\n");
err = -ENOMEM;
@@ -236,12 +243,13 @@ out:
}
/**
- * create_hyp_mappings - map a kernel virtual address range in Hyp mode
+ * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
* @from: The virtual kernel start address of the range
* @to: The virtual kernel end address of the range (exclusive)
*
- * The same virtual address as the kernel virtual address is also used in
- * Hyp-mode mapping to the same underlying physical pages.
+ * The same virtual address as the kernel virtual address is also used
+ * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying
+ * physical pages.
*
* Note: Wrapping around zero in the "to" address is not supported.
*/
@@ -251,10 +259,13 @@ int create_hyp_mappings(void *from, void *to)
}
/**
- * create_hyp_io_mappings - map a physical IO range in Hyp mode
- * @from: The virtual HYP start address of the range
- * @to: The virtual HYP end address of the range (exclusive)
+ * create_hyp_io_mappings - duplicate a kernel IO mapping into Hyp mode
+ * @from: The kernel start VA of the range
+ * @to: The kernel end VA of the range (exclusive)
* @addr: The physical start address which gets mapped
+ *
+ * The resulting HYP VA is the same as the kernel VA, modulo
+ * HYP_PAGE_OFFSET.
*/
int create_hyp_io_mappings(void *from, void *to, phys_addr_t addr)
{
@@ -290,7 +301,7 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm)
VM_BUG_ON((unsigned long)pgd & (S2_PGD_SIZE - 1));
memset(pgd, 0, PTRS_PER_S2_PGD * sizeof(pgd_t));
- clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t));
+ kvm_clean_pgd(pgd);
kvm->arch.pgd = pgd;
return 0;
@@ -422,22 +433,22 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
return 0; /* ignore calls from kvm_set_spte_hva */
pmd = mmu_memory_cache_alloc(cache);
pud_populate(NULL, pud, pmd);
- pmd += pmd_index(addr);
get_page(virt_to_page(pud));
- } else
- pmd = pmd_offset(pud, addr);
+ }
+
+ pmd = pmd_offset(pud, addr);
/* Create 2nd stage page table mapping - Level 2 */
if (pmd_none(*pmd)) {
if (!cache)
return 0; /* ignore calls from kvm_set_spte_hva */
pte = mmu_memory_cache_alloc(cache);
- clean_pte_table(pte);
+ kvm_clean_pte(pte);
pmd_populate_kernel(NULL, pmd, pte);
- pte += pte_index(addr);
get_page(virt_to_page(pmd));
- } else
- pte = pte_offset_kernel(pmd, addr);
+ }
+
+ pte = pte_offset_kernel(pmd, addr);
if (iomap && pte_present(*pte))
return -EFAULT;
@@ -446,7 +457,7 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
old_pte = *pte;
kvm_set_pte(pte, *new_pte);
if (pte_present(old_pte))
- kvm_tlb_flush_vmid(kvm);
+ kvm_tlb_flush_vmid_ipa(kvm, addr);
else
get_page(virt_to_page(pte));
@@ -473,7 +484,8 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
pfn = __phys_to_pfn(pa);
for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
- pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE | L_PTE_S2_RDWR);
+ pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE);
+ kvm_set_s2pte_writable(&pte);
ret = mmu_topup_memory_cache(&cache, 2, 2);
if (ret)
@@ -492,29 +504,6 @@ out:
return ret;
}
-static void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn)
-{
- /*
- * If we are going to insert an instruction page and the icache is
- * either VIPT or PIPT, there is a potential problem where the host
- * (or another VM) may have used the same page as this guest, and we
- * read incorrect data from the icache. If we're using a PIPT cache,
- * we can invalidate just that page, but if we are using a VIPT cache
- * we need to invalidate the entire icache - damn shame - as written
- * in the ARM ARM (DDI 0406C.b - Page B3-1393).
- *
- * VIVT caches are tagged using both the ASID and the VMID and doesn't
- * need any kind of flushing (DDI 0406C.b - Page B3-1392).
- */
- if (icache_is_pipt()) {
- unsigned long hva = gfn_to_hva(kvm, gfn);
- __cpuc_coherent_user_range(hva, hva + PAGE_SIZE);
- } else if (!icache_is_vivt_asid_tagged()) {
- /* any kind of VIPT cache */
- __flush_icache_all();
- }
-}
-
static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
gfn_t gfn, struct kvm_memory_slot *memslot,
unsigned long fault_status)
@@ -526,7 +515,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
unsigned long mmu_seq;
struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
- write_fault = kvm_is_write_fault(vcpu->arch.hsr);
+ write_fault = kvm_is_write_fault(kvm_vcpu_get_hsr(vcpu));
if (fault_status == FSC_PERM && !write_fault) {
kvm_err("Unexpected L2 read permission error\n");
return -EFAULT;
@@ -560,7 +549,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
goto out_unlock;
if (writable) {
- pte_val(new_pte) |= L_PTE_S2_RDWR;
+ kvm_set_s2pte_writable(&new_pte);
kvm_set_pfn_dirty(pfn);
}
stage2_set_pte(vcpu->kvm, memcache, fault_ipa, &new_pte, false);
@@ -585,7 +574,6 @@ out_unlock:
*/
int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
- unsigned long hsr_ec;
unsigned long fault_status;
phys_addr_t fault_ipa;
struct kvm_memory_slot *memslot;
@@ -593,18 +581,17 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
gfn_t gfn;
int ret, idx;
- hsr_ec = vcpu->arch.hsr >> HSR_EC_SHIFT;
- is_iabt = (hsr_ec == HSR_EC_IABT);
- fault_ipa = ((phys_addr_t)vcpu->arch.hpfar & HPFAR_MASK) << 8;
+ is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
+ fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
- trace_kvm_guest_fault(*vcpu_pc(vcpu), vcpu->arch.hsr,
- vcpu->arch.hxfar, fault_ipa);
+ trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu),
+ kvm_vcpu_get_hfar(vcpu), fault_ipa);
/* Check the stage-2 fault is trans. fault or write fault */
- fault_status = (vcpu->arch.hsr & HSR_FSC_TYPE);
+ fault_status = kvm_vcpu_trap_get_fault(vcpu);
if (fault_status != FSC_FAULT && fault_status != FSC_PERM) {
- kvm_err("Unsupported fault status: EC=%#lx DFCS=%#lx\n",
- hsr_ec, fault_status);
+ kvm_err("Unsupported fault status: EC=%#x DFCS=%#lx\n",
+ kvm_vcpu_trap_get_class(vcpu), fault_status);
return -EFAULT;
}
@@ -614,7 +601,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
if (!kvm_is_visible_gfn(vcpu->kvm, gfn)) {
if (is_iabt) {
/* Prefetch Abort on I/O address */
- kvm_inject_pabt(vcpu, vcpu->arch.hxfar);
+ kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
ret = 1;
goto out_unlock;
}
@@ -626,8 +613,13 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
goto out_unlock;
}
- /* Adjust page offset */
- fault_ipa |= vcpu->arch.hxfar & ~PAGE_MASK;
+ /*
+ * The IPA is reported as [MAX:12], so we need to
+ * complement it with the bottom 12 bits from the
+ * faulting VA. This is always 12 bits, irrespective
+ * of the page size.
+ */
+ fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
ret = io_mem_abort(vcpu, run, fault_ipa);
goto out_unlock;
}
@@ -682,7 +674,7 @@ static void handle_hva_to_gpa(struct kvm *kvm,
static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
{
unmap_stage2_range(kvm, gpa, PAGE_SIZE);
- kvm_tlb_flush_vmid(kvm);
+ kvm_tlb_flush_vmid_ipa(kvm, gpa);
}
int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
@@ -776,7 +768,7 @@ void kvm_clear_hyp_idmap(void)
pmd = pmd_offset(pud, addr);
pud_clear(pud);
- clean_pmd_entry(pmd);
+ kvm_clean_pmd_entry(pmd);
pmd_free(NULL, (pmd_t *)((unsigned long)pmd & PAGE_MASK));
} while (pgd++, addr = next, addr < end);
}
diff --git a/arch/arm/kvm/vgic.c b/arch/arm/kvm/vgic.c
index 0e4cfe123b3..17c5ac7d10e 100644
--- a/arch/arm/kvm/vgic.c
+++ b/arch/arm/kvm/vgic.c
@@ -1477,7 +1477,7 @@ int kvm_vgic_set_addr(struct kvm *kvm, unsigned long type, u64 addr)
if (addr & ~KVM_PHYS_MASK)
return -E2BIG;
- if (addr & ~PAGE_MASK)
+ if (addr & (SZ_4K - 1))
return -EINVAL;
mutex_lock(&kvm->lock);
diff --git a/arch/arm/mach-at91/at91rm9200_time.c b/arch/arm/mach-at91/at91rm9200_time.c
index 2acdff4c1df..180b3024bec 100644
--- a/arch/arm/mach-at91/at91rm9200_time.c
+++ b/arch/arm/mach-at91/at91rm9200_time.c
@@ -174,6 +174,7 @@ clkevt32k_next_event(unsigned long delta, struct clock_event_device *dev)
static struct clock_event_device clkevt = {
.name = "at91_tick",
.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
+ .shift = 32,
.rating = 150,
.set_next_event = clkevt32k_next_event,
.set_mode = clkevt32k_mode,
@@ -264,9 +265,11 @@ void __init at91rm9200_timer_init(void)
at91_st_write(AT91_ST_RTMR, 1);
/* Setup timer clockevent, with minimum of two ticks (important!!) */
+ clkevt.mult = div_sc(AT91_SLOW_CLOCK, NSEC_PER_SEC, clkevt.shift);
+ clkevt.max_delta_ns = clockevent_delta2ns(AT91_ST_ALMV, &clkevt);
+ clkevt.min_delta_ns = clockevent_delta2ns(2, &clkevt) + 1;
clkevt.cpumask = cpumask_of(0);
- clockevents_config_and_register(&clkevt, AT91_SLOW_CLOCK,
- 2, AT91_ST_ALMV);
+ clockevents_register_device(&clkevt);
/* register clocksource */
clocksource_register_hz(&clk32k, AT91_SLOW_CLOCK);
diff --git a/arch/arm/mach-at91/gpio.c b/arch/arm/mach-at91/gpio.c
index c5d7e1e9d75..a5afcf76550 100644
--- a/arch/arm/mach-at91/gpio.c
+++ b/arch/arm/mach-at91/gpio.c
@@ -22,10 +22,9 @@
#include <linux/module.h>
#include <linux/io.h>
#include <linux/irqdomain.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/of_address.h>
-#include <asm/mach/irq.h>
-
#include <mach/hardware.h>
#include <mach/at91_pio.h>
diff --git a/arch/arm/mach-at91/setup.c b/arch/arm/mach-at91/setup.c
index 4b678478cf9..6b4608d58da 100644
--- a/arch/arm/mach-at91/setup.c
+++ b/arch/arm/mach-at91/setup.c
@@ -333,7 +333,7 @@ static void at91_dt_rstc(void)
of_id = of_match_node(rstc_ids, np);
if (!of_id)
- panic("AT91: rtsc no restart function availlable\n");
+ panic("AT91: rtsc no restart function available\n");
arm_pm_restart = of_id->data;
diff --git a/arch/arm/mach-exynos/common.c b/arch/arm/mach-exynos/common.c
index d63d399c7ba..7bc0f9aa8b3 100644
--- a/arch/arm/mach-exynos/common.c
+++ b/arch/arm/mach-exynos/common.c
@@ -26,6 +26,7 @@
#include <linux/irqchip.h>
#include <linux/of_address.h>
#include <linux/irqchip/arm-gic.h>
+#include <linux/irqchip/chained_irq.h>
#include <asm/proc-fns.h>
#include <asm/exception.h>
diff --git a/arch/arm/mach-exynos/include/mach/regs-pmu.h b/arch/arm/mach-exynos/include/mach/regs-pmu.h
index 3f30aa1ae35..57344b7e98c 100644
--- a/arch/arm/mach-exynos/include/mach/regs-pmu.h
+++ b/arch/arm/mach-exynos/include/mach/regs-pmu.h
@@ -344,6 +344,7 @@
#define EXYNOS5_FSYS_ARM_OPTION S5P_PMUREG(0x2208)
#define EXYNOS5_ISP_ARM_OPTION S5P_PMUREG(0x2288)
#define EXYNOS5_ARM_COMMON_OPTION S5P_PMUREG(0x2408)
+#define EXYNOS5_ARM_L2_OPTION S5P_PMUREG(0x2608)
#define EXYNOS5_TOP_PWR_OPTION S5P_PMUREG(0x2C48)
#define EXYNOS5_TOP_PWR_SYSMEM_OPTION S5P_PMUREG(0x2CC8)
#define EXYNOS5_JPEG_MEM_OPTION S5P_PMUREG(0x2F48)
diff --git a/arch/arm/mach-exynos/platsmp.c b/arch/arm/mach-exynos/platsmp.c
index 60f7c5be057..95e04bd5813 100644
--- a/arch/arm/mach-exynos/platsmp.c
+++ b/arch/arm/mach-exynos/platsmp.c
@@ -20,7 +20,6 @@
#include <linux/jiffies.h>
#include <linux/smp.h>
#include <linux/io.h>
-#include <linux/irqchip/arm-gic.h>
#include <asm/cacheflush.h>
#include <asm/smp_plat.h>
@@ -76,13 +75,6 @@ static DEFINE_SPINLOCK(boot_lock);
static void __cpuinit exynos_secondary_init(unsigned int cpu)
{
/*
- * if any interrupts are already enabled for the primary
- * core (e.g. timer irq), then they will not have been enabled
- * for us: do so
- */
- gic_secondary_init(0);
-
- /*
* let the primary processor know we're out of the
* pen, then head off into the C entry point
*/
diff --git a/arch/arm/mach-exynos/pmu.c b/arch/arm/mach-exynos/pmu.c
index daebc1abc96..97d68852625 100644
--- a/arch/arm/mach-exynos/pmu.c
+++ b/arch/arm/mach-exynos/pmu.c
@@ -228,6 +228,7 @@ static struct exynos_pmu_conf exynos5250_pmu_config[] = {
{ EXYNOS5_DIS_IRQ_ISP_ARM_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
{ EXYNOS5_ARM_COMMON_SYS_PWR_REG, { 0x0, 0x0, 0x2} },
{ EXYNOS5_ARM_L2_SYS_PWR_REG, { 0x3, 0x3, 0x3} },
+ { EXYNOS5_ARM_L2_OPTION, { 0x10, 0x10, 0x0 } },
{ EXYNOS5_CMU_ACLKSTOP_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
{ EXYNOS5_CMU_SCLKSTOP_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
{ EXYNOS5_CMU_RESET_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
@@ -353,11 +354,9 @@ static void exynos5_init_pmu(void)
/*
* SKIP_DEACTIVATE_ACEACP_IN_PWDN_BITFIELD Enable
- * MANUAL_L2RSTDISABLE_CONTROL_BITFIELD Enable
*/
tmp = __raw_readl(EXYNOS5_ARM_COMMON_OPTION);
- tmp |= (EXYNOS5_MANUAL_L2RSTDISABLE_CONTROL |
- EXYNOS5_SKIP_DEACTIVATE_ACEACP_IN_PWDN);
+ tmp |= EXYNOS5_SKIP_DEACTIVATE_ACEACP_IN_PWDN;
__raw_writel(tmp, EXYNOS5_ARM_COMMON_OPTION);
/*
diff --git a/arch/arm/mach-highbank/platsmp.c b/arch/arm/mach-highbank/platsmp.c
index 8797a700172..a984573e0d0 100644
--- a/arch/arm/mach-highbank/platsmp.c
+++ b/arch/arm/mach-highbank/platsmp.c
@@ -17,7 +17,6 @@
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/io.h>
-#include <linux/irqchip/arm-gic.h>
#include <asm/smp_scu.h>
@@ -25,11 +24,6 @@
extern void secondary_startup(void);
-static void __cpuinit highbank_secondary_init(unsigned int cpu)
-{
- gic_secondary_init(0);
-}
-
static int __cpuinit highbank_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
highbank_set_cpu_jump(cpu, secondary_startup);
@@ -67,7 +61,6 @@ static void __init highbank_smp_prepare_cpus(unsigned int max_cpus)
struct smp_operations highbank_smp_ops __initdata = {
.smp_init_cpus = highbank_smp_init_cpus,
.smp_prepare_cpus = highbank_smp_prepare_cpus,
- .smp_secondary_init = highbank_secondary_init,
.smp_boot_secondary = highbank_boot_secondary,
#ifdef CONFIG_HOTPLUG_CPU
.cpu_die = highbank_cpu_die,
diff --git a/arch/arm/mach-imx/platsmp.c b/arch/arm/mach-imx/platsmp.c
index 7c0b03f67b0..77e9a25ed0f 100644
--- a/arch/arm/mach-imx/platsmp.c
+++ b/arch/arm/mach-imx/platsmp.c
@@ -12,7 +12,6 @@
#include <linux/init.h>
#include <linux/smp.h>
-#include <linux/irqchip/arm-gic.h>
#include <asm/page.h>
#include <asm/smp_scu.h>
#include <asm/mach/map.h>
@@ -52,16 +51,6 @@ void imx_scu_standby_enable(void)
writel_relaxed(val, scu_base);
}
-static void __cpuinit imx_secondary_init(unsigned int cpu)
-{
- /*
- * if any interrupts are already enabled for the primary
- * core (e.g. timer irq), then they will not have been enabled
- * for us: do so
- */
- gic_secondary_init(0);
-}
-
static int __cpuinit imx_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
imx_set_cpu_jump(cpu, v7_secondary_startup);
@@ -96,7 +85,6 @@ static void __init imx_smp_prepare_cpus(unsigned int max_cpus)
struct smp_operations imx_smp_ops __initdata = {
.smp_init_cpus = imx_smp_init_cpus,
.smp_prepare_cpus = imx_smp_prepare_cpus,
- .smp_secondary_init = imx_secondary_init,
.smp_boot_secondary = imx_boot_secondary,
#ifdef CONFIG_HOTPLUG_CPU
.cpu_die = imx_cpu_die,
diff --git a/arch/arm/mach-kirkwood/board-ts219.c b/arch/arm/mach-kirkwood/board-ts219.c
index acb0187c7ee..4695d5f35fc 100644
--- a/arch/arm/mach-kirkwood/board-ts219.c
+++ b/arch/arm/mach-kirkwood/board-ts219.c
@@ -41,13 +41,3 @@ void __init qnap_dt_ts219_init(void)
pm_power_off = qnap_tsx1x_power_off;
}
-
-/* FIXME: Will not work with DT. Maybe use MPP40_GPIO? */
-static int __init ts219_pci_init(void)
-{
- if (machine_is_ts219())
- kirkwood_pcie_init(KW_PCIE0);
-
- return 0;
-}
-subsys_initcall(ts219_pci_init);
diff --git a/arch/arm/mach-kirkwood/ts219-setup.c b/arch/arm/mach-kirkwood/ts219-setup.c
index 283abff9022..e1267d6b468 100644
--- a/arch/arm/mach-kirkwood/ts219-setup.c
+++ b/arch/arm/mach-kirkwood/ts219-setup.c
@@ -124,7 +124,7 @@ static void __init qnap_ts219_init(void)
static int __init ts219_pci_init(void)
{
if (machine_is_ts219())
- kirkwood_pcie_init(KW_PCIE0);
+ kirkwood_pcie_init(KW_PCIE1 | KW_PCIE0);
return 0;
}
diff --git a/arch/arm/mach-msm/platsmp.c b/arch/arm/mach-msm/platsmp.c
index 42932865416..00cdb0a5dac 100644
--- a/arch/arm/mach-msm/platsmp.c
+++ b/arch/arm/mach-msm/platsmp.c
@@ -15,7 +15,6 @@
#include <linux/jiffies.h>
#include <linux/smp.h>
#include <linux/io.h>
-#include <linux/irqchip/arm-gic.h>
#include <asm/cacheflush.h>
#include <asm/cputype.h>
@@ -42,13 +41,6 @@ static inline int get_core_count(void)
static void __cpuinit msm_secondary_init(unsigned int cpu)
{
/*
- * if any interrupts are already enabled for the primary
- * core (e.g. timer irq), then they will not have been enabled
- * for us: do so
- */
- gic_secondary_init(0);
-
- /*
* let the primary processor know we're out of the
* pen, then head off into the C entry point
*/
diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
index 3a077df6b8d..9bc9f191f9c 100644
--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
+++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
@@ -73,11 +73,11 @@
#define LIS302_IRQ1_GPIO 181
#define LIS302_IRQ2_GPIO 180 /* Not yet in use */
-/* list all spi devices here */
+/* List all SPI devices here. Note that the list/probe order seems to matter! */
enum {
RX51_SPI_WL1251,
- RX51_SPI_MIPID, /* LCD panel */
RX51_SPI_TSC2005, /* Touch Controller */
+ RX51_SPI_MIPID, /* LCD panel */
};
static struct wl12xx_platform_data wl1251_pdata;
diff --git a/arch/arm/mach-omap2/cclock33xx_data.c b/arch/arm/mach-omap2/cclock33xx_data.c
index 476b82066cb..832772147f3 100644
--- a/arch/arm/mach-omap2/cclock33xx_data.c
+++ b/arch/arm/mach-omap2/cclock33xx_data.c
@@ -446,9 +446,29 @@ DEFINE_CLK_GATE(cefuse_fck, "sys_clkin_ck", &sys_clkin_ck, 0x0,
*/
DEFINE_CLK_FIXED_FACTOR(clkdiv32k_ck, "clk_24mhz", &clk_24mhz, 0x0, 1, 732);
-DEFINE_CLK_GATE(clkdiv32k_ick, "clkdiv32k_ck", &clkdiv32k_ck, 0x0,
- AM33XX_CM_PER_CLKDIV32K_CLKCTRL, AM33XX_MODULEMODE_SWCTRL_SHIFT,
- 0x0, NULL);
+static struct clk clkdiv32k_ick;
+
+static const char *clkdiv32k_ick_parent_names[] = {
+ "clkdiv32k_ck",
+};
+
+static const struct clk_ops clkdiv32k_ick_ops = {
+ .enable = &omap2_dflt_clk_enable,
+ .disable = &omap2_dflt_clk_disable,
+ .is_enabled = &omap2_dflt_clk_is_enabled,
+ .init = &omap2_init_clk_clkdm,
+};
+
+static struct clk_hw_omap clkdiv32k_ick_hw = {
+ .hw = {
+ .clk = &clkdiv32k_ick,
+ },
+ .enable_reg = AM33XX_CM_PER_CLKDIV32K_CLKCTRL,
+ .enable_bit = AM33XX_MODULEMODE_SWCTRL_SHIFT,
+ .clkdm_name = "clk_24mhz_clkdm",
+};
+
+DEFINE_STRUCT_CLK(clkdiv32k_ick, clkdiv32k_ick_parent_names, clkdiv32k_ick_ops);
/* "usbotg_fck" is an additional clock and not really a modulemode */
DEFINE_CLK_GATE(usbotg_fck, "dpll_per_ck", &dpll_per_ck, 0x0,
diff --git a/arch/arm/mach-omap2/cpuidle34xx.c b/arch/arm/mach-omap2/cpuidle34xx.c
index 80392fca86c..4f67a5b9bc5 100644
--- a/arch/arm/mach-omap2/cpuidle34xx.c
+++ b/arch/arm/mach-omap2/cpuidle34xx.c
@@ -274,8 +274,9 @@ static int omap3_enter_idle_bm(struct cpuidle_device *dev,
static DEFINE_PER_CPU(struct cpuidle_device, omap3_idle_dev);
static struct cpuidle_driver omap3_idle_driver = {
- .name = "omap3_idle",
- .owner = THIS_MODULE,
+ .name = "omap3_idle",
+ .owner = THIS_MODULE,
+ .en_core_tk_irqen = 1,
.states = {
{
.enter = omap3_enter_idle_bm,
diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c
index 8a68f1ec66b..577298ed5a4 100644
--- a/arch/arm/mach-omap2/id.c
+++ b/arch/arm/mach-omap2/id.c
@@ -300,7 +300,7 @@ void __init omap3xxx_check_revision(void)
* If the processor type is Cortex-A8 and the revision is 0x0
* it means its Cortex r0p0 which is 3430 ES1.0.
*/
- cpuid = read_cpuid(CPUID_ID);
+ cpuid = read_cpuid_id();
if ((((cpuid >> 4) & 0xfff) == 0xc08) && ((cpuid & 0xf) == 0x0)) {
omap_revision = OMAP3430_REV_ES1_0;
cpu_rev = "1.0";
@@ -460,7 +460,7 @@ void __init omap4xxx_check_revision(void)
* Use ARM register to detect the correct ES version
*/
if (!rev && (hawkeye != 0xb94e) && (hawkeye != 0xb975)) {
- idcode = read_cpuid(CPUID_ID);
+ idcode = read_cpuid_id();
rev = (idcode & 0xf) - 1;
}
diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
index d9727218dd0..f0897732a96 100644
--- a/arch/arm/mach-omap2/omap-smp.c
+++ b/arch/arm/mach-omap2/omap-smp.c
@@ -67,13 +67,6 @@ static void __cpuinit omap4_secondary_init(unsigned int cpu)
4, 0, 0, 0, 0, 0);
/*
- * If any interrupts are already enabled for the primary
- * core (e.g. timer irq), then they will not have been enabled
- * for us: do so
- */
- gic_secondary_init(0);
-
- /*
* Synchronise with the boot thread.
*/
spin_lock(&boot_lock);
@@ -209,7 +202,7 @@ static void __init omap4_smp_init_cpus(void)
unsigned int i = 0, ncores = 1, cpu_id;
/* Use ARM cpuid check here, as SoC detection will not work so early */
- cpu_id = read_cpuid(CPUID_ID) & CPU_MASK;
+ cpu_id = read_cpuid_id() & CPU_MASK;
if (cpu_id == CPU_CORTEX_A9) {
/*
* Currently we can't call ioremap here because
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index a202a478510..3a750de656c 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -2066,7 +2066,7 @@ static int _omap4_get_context_lost(struct omap_hwmod *oh)
* do so is present in the hwmod data, then call it and pass along the
* return value; otherwise, return 0.
*/
-static int __init _enable_preprogram(struct omap_hwmod *oh)
+static int _enable_preprogram(struct omap_hwmod *oh)
{
if (!oh->class->enable_preprogram)
return 0;
diff --git a/arch/arm/mach-prima2/platsmp.c b/arch/arm/mach-prima2/platsmp.c
index 4b788310f6a..c7c92e78f0c 100644
--- a/arch/arm/mach-prima2/platsmp.c
+++ b/arch/arm/mach-prima2/platsmp.c
@@ -11,7 +11,6 @@
#include <linux/delay.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/irqchip/arm-gic.h>
#include <asm/page.h>
#include <asm/mach/map.h>
#include <asm/smp_plat.h>
@@ -49,13 +48,6 @@ void __init sirfsoc_map_scu(void)
static void __cpuinit sirfsoc_secondary_init(unsigned int cpu)
{
/*
- * if any interrupts are already enabled for the primary
- * core (e.g. timer irq), then they will not have been enabled
- * for us: do so
- */
- gic_secondary_init(0);
-
- /*
* let the primary processor know we're out of the
* pen, then head off into the C entry point
*/
diff --git a/arch/arm/mach-s3c24xx/irq.c b/arch/arm/mach-s3c24xx/irq.c
index d8ba9bee4c7..6b0b6047785 100644
--- a/arch/arm/mach-s3c24xx/irq.c
+++ b/arch/arm/mach-s3c24xx/irq.c
@@ -25,6 +25,7 @@
#include <linux/ioport.h>
#include <linux/device.h>
#include <linux/irqdomain.h>
+#include <linux/irqchip/chained_irq.h>
#include <asm/mach/irq.h>
diff --git a/arch/arm/mach-shmobile/smp-emev2.c b/arch/arm/mach-shmobile/smp-emev2.c
index 953eb1f9388..384e27dd360 100644
--- a/arch/arm/mach-shmobile/smp-emev2.c
+++ b/arch/arm/mach-shmobile/smp-emev2.c
@@ -23,7 +23,6 @@
#include <linux/spinlock.h>
#include <linux/io.h>
#include <linux/delay.h>
-#include <linux/irqchip/arm-gic.h>
#include <mach/common.h>
#include <mach/emev2.h>
#include <asm/smp_plat.h>
@@ -85,11 +84,6 @@ static int __maybe_unused emev2_cpu_kill(unsigned int cpu)
}
-static void __cpuinit emev2_secondary_init(unsigned int cpu)
-{
- gic_secondary_init(0);
-}
-
static int __cpuinit emev2_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
cpu = cpu_logical_map(cpu);
@@ -124,7 +118,6 @@ static void __init emev2_smp_init_cpus(void)
struct smp_operations emev2_smp_ops __initdata = {
.smp_init_cpus = emev2_smp_init_cpus,
.smp_prepare_cpus = emev2_smp_prepare_cpus,
- .smp_secondary_init = emev2_secondary_init,
.smp_boot_secondary = emev2_boot_secondary,
#ifdef CONFIG_HOTPLUG_CPU
.cpu_kill = emev2_cpu_kill,
diff --git a/arch/arm/mach-shmobile/smp-r8a7779.c b/arch/arm/mach-shmobile/smp-r8a7779.c
index 3a4acf23edc..994906560ed 100644
--- a/arch/arm/mach-shmobile/smp-r8a7779.c
+++ b/arch/arm/mach-shmobile/smp-r8a7779.c
@@ -23,7 +23,6 @@
#include <linux/spinlock.h>
#include <linux/io.h>
#include <linux/delay.h>
-#include <linux/irqchip/arm-gic.h>
#include <mach/common.h>
#include <mach/r8a7779.h>
#include <asm/smp_plat.h>
@@ -132,11 +131,6 @@ static int __maybe_unused r8a7779_cpu_kill(unsigned int cpu)
}
-static void __cpuinit r8a7779_secondary_init(unsigned int cpu)
-{
- gic_secondary_init(0);
-}
-
static int __cpuinit r8a7779_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
struct r8a7779_pm_ch *ch = NULL;
@@ -186,7 +180,6 @@ static void __init r8a7779_smp_init_cpus(void)
struct smp_operations r8a7779_smp_ops __initdata = {
.smp_init_cpus = r8a7779_smp_init_cpus,
.smp_prepare_cpus = r8a7779_smp_prepare_cpus,
- .smp_secondary_init = r8a7779_secondary_init,
.smp_boot_secondary = r8a7779_boot_secondary,
#ifdef CONFIG_HOTPLUG_CPU
.cpu_kill = r8a7779_cpu_kill,
diff --git a/arch/arm/mach-shmobile/smp-sh73a0.c b/arch/arm/mach-shmobile/smp-sh73a0.c
index acb46a94ccd..d0f9aca2247 100644
--- a/arch/arm/mach-shmobile/smp-sh73a0.c
+++ b/arch/arm/mach-shmobile/smp-sh73a0.c
@@ -23,7 +23,6 @@
#include <linux/spinlock.h>
#include <linux/io.h>
#include <linux/delay.h>
-#include <linux/irqchip/arm-gic.h>
#include <mach/common.h>
#include <asm/cacheflush.h>
#include <asm/smp_plat.h>
@@ -59,11 +58,6 @@ static unsigned int __init sh73a0_get_core_count(void)
return scu_get_core_count(scu_base);
}
-static void __cpuinit sh73a0_secondary_init(unsigned int cpu)
-{
- gic_secondary_init(0);
-}
-
static int __cpuinit sh73a0_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
cpu = cpu_logical_map(cpu);
@@ -138,7 +132,6 @@ static void sh73a0_cpu_die(unsigned int cpu)
struct smp_operations sh73a0_smp_ops __initdata = {
.smp_init_cpus = sh73a0_smp_init_cpus,
.smp_prepare_cpus = sh73a0_smp_prepare_cpus,
- .smp_secondary_init = sh73a0_secondary_init,
.smp_boot_secondary = sh73a0_boot_secondary,
#ifdef CONFIG_HOTPLUG_CPU
.cpu_kill = sh73a0_cpu_kill,
diff --git a/arch/arm/mach-socfpga/platsmp.c b/arch/arm/mach-socfpga/platsmp.c
index 84c60fa8daa..ca14d1d5ac7 100644
--- a/arch/arm/mach-socfpga/platsmp.c
+++ b/arch/arm/mach-socfpga/platsmp.c
@@ -22,7 +22,6 @@
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/irqchip/arm-gic.h>
#include <asm/cacheflush.h>
#include <asm/smp_scu.h>
@@ -33,16 +32,6 @@
extern void __iomem *sys_manager_base_addr;
extern void __iomem *rst_manager_base_addr;
-static void __cpuinit socfpga_secondary_init(unsigned int cpu)
-{
- /*
- * if any interrupts are already enabled for the primary
- * core (e.g. timer irq), then they will not have been enabled
- * for us: do so
- */
- gic_secondary_init(0);
-}
-
static int __cpuinit socfpga_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
int trampoline_size = &secondary_trampoline_end - &secondary_trampoline;
@@ -109,7 +98,6 @@ static void socfpga_cpu_die(unsigned int cpu)
struct smp_operations socfpga_smp_ops __initdata = {
.smp_init_cpus = socfpga_smp_init_cpus,
.smp_prepare_cpus = socfpga_smp_prepare_cpus,
- .smp_secondary_init = socfpga_secondary_init,
.smp_boot_secondary = socfpga_boot_secondary,
#ifdef CONFIG_HOTPLUG_CPU
.cpu_die = socfpga_cpu_die,
diff --git a/arch/arm/mach-spear13xx/platsmp.c b/arch/arm/mach-spear13xx/platsmp.c
index af4ade61cd9..551c69c9a22 100644
--- a/arch/arm/mach-spear13xx/platsmp.c
+++ b/arch/arm/mach-spear13xx/platsmp.c
@@ -15,7 +15,6 @@
#include <linux/jiffies.h>
#include <linux/io.h>
#include <linux/smp.h>
-#include <linux/irqchip/arm-gic.h>
#include <asm/cacheflush.h>
#include <asm/smp_scu.h>
#include <mach/spear.h>
@@ -28,13 +27,6 @@ static void __iomem *scu_base = IOMEM(VA_SCU_BASE);
static void __cpuinit spear13xx_secondary_init(unsigned int cpu)
{
/*
- * if any interrupts are already enabled for the primary
- * core (e.g. timer irq), then they will not have been enabled
- * for us: do so
- */
- gic_secondary_init(0);
-
- /*
* let the primary processor know we're out of the
* pen, then head off into the C entry point
*/
diff --git a/arch/arm/mach-tegra/platsmp.c b/arch/arm/mach-tegra/platsmp.c
index 2c6b3d55213..9348d3c496a 100644
--- a/arch/arm/mach-tegra/platsmp.c
+++ b/arch/arm/mach-tegra/platsmp.c
@@ -18,7 +18,6 @@
#include <linux/jiffies.h>
#include <linux/smp.h>
#include <linux/io.h>
-#include <linux/irqchip/arm-gic.h>
#include <linux/clk/tegra.h>
#include <asm/cacheflush.h>
@@ -44,13 +43,6 @@ static cpumask_t tegra_cpu_init_mask;
static void __cpuinit tegra_secondary_init(unsigned int cpu)
{
- /*
- * if any interrupts are already enabled for the primary
- * core (e.g. timer irq), then they will not have been enabled
- * for us: do so
- */
- gic_secondary_init(0);
-
cpumask_set_cpu(cpu, &tegra_cpu_init_mask);
}
diff --git a/arch/arm/mach-u300/include/mach/u300-regs.h b/arch/arm/mach-u300/include/mach/u300-regs.h
index 1e49d901f2c..0320495efc4 100644
--- a/arch/arm/mach-u300/include/mach/u300-regs.h
+++ b/arch/arm/mach-u300/include/mach/u300-regs.h
@@ -95,7 +95,7 @@
#define U300_SPI_BASE (U300_FAST_PER_PHYS_BASE+0x6000)
/* Fast UART1 on U335 only */
-#define U300_UART1_BASE (U300_SLOW_PER_PHYS_BASE+0x7000)
+#define U300_UART1_BASE (U300_FAST_PER_PHYS_BASE+0x7000)
/*
* SLOW peripherals
diff --git a/arch/arm/mach-ux500/platsmp.c b/arch/arm/mach-ux500/platsmp.c
index 18f7af339dc..152b1309b9a 100644
--- a/arch/arm/mach-ux500/platsmp.c
+++ b/arch/arm/mach-ux500/platsmp.c
@@ -16,7 +16,6 @@
#include <linux/device.h>
#include <linux/smp.h>
#include <linux/io.h>
-#include <linux/irqchip/arm-gic.h>
#include <asm/cacheflush.h>
#include <asm/smp_plat.h>
@@ -58,13 +57,6 @@ static DEFINE_SPINLOCK(boot_lock);
static void __cpuinit ux500_secondary_init(unsigned int cpu)
{
/*
- * if any interrupts are already enabled for the primary
- * core (e.g. timer irq), then they will not have been enabled
- * for us: do so
- */
- gic_secondary_init(0);
-
- /*
* let the primary processor know we're out of the
* pen, then head off into the C entry point
*/
diff --git a/arch/arm/mach-vexpress/Kconfig b/arch/arm/mach-vexpress/Kconfig
index 52d315b792c..7b2ebc29ce3 100644
--- a/arch/arm/mach-vexpress/Kconfig
+++ b/arch/arm/mach-vexpress/Kconfig
@@ -1,5 +1,6 @@
config ARCH_VEXPRESS
bool "ARM Ltd. Versatile Express family" if ARCH_MULTI_V7
+ select ARCH_HAS_CPUFREQ
select ARCH_REQUIRE_GPIOLIB
select ARM_AMBA
select ARM_GIC
@@ -52,4 +53,21 @@ config ARCH_VEXPRESS_CORTEX_A5_A9_ERRATA
config ARCH_VEXPRESS_CA9X4
bool "Versatile Express Cortex-A9x4 tile"
+config ARCH_VEXPRESS_DCSCB
+ bool "Dual Cluster System Control Block (DCSCB) support"
+ depends on MCPM
+ select ARM_CCI
+ help
+ Support for the Dual Cluster System Configuration Block (DCSCB).
+ This is needed to provide CPU and cluster power management
+ on RTSM.
+
+config ARCH_VEXPRESS_TC2
+ bool "TC2 cluster management"
+ depends on MCPM
+ select ARM_SPC
+ select ARM_CCI
+ help
+ Support for CPU and cluster power management on TC2.
+
endmenu
diff --git a/arch/arm/mach-vexpress/Makefile b/arch/arm/mach-vexpress/Makefile
index 80b64971fbd..b4117dbfe85 100644
--- a/arch/arm/mach-vexpress/Makefile
+++ b/arch/arm/mach-vexpress/Makefile
@@ -6,5 +6,13 @@ ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/$(src)/include \
obj-y := v2m.o reset.o
obj-$(CONFIG_ARCH_VEXPRESS_CA9X4) += ct-ca9x4.o
+obj-$(CONFIG_ARCH_VEXPRESS_DCSCB) += dcscb.o dcscb_setup.o
+CFLAGS_REMOVE_dcscb.o = -pg
+obj-$(CONFIG_ARCH_VEXPRESS_TC2) += tc2_pm.o tc2_pm_setup.o
+CFLAGS_REMOVE_tc2_pm.o = -pg
+ifeq ($(CONFIG_ARCH_VEXPRESS_TC2),y)
+obj-$(CONFIG_ARM_PSCI) += tc2_pm_psci.o
+CFLAGS_REMOVE_tc2_pm_psci.o = -pg
+endif
obj-$(CONFIG_SMP) += platsmp.o
obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
diff --git a/arch/arm/mach-vexpress/core.h b/arch/arm/mach-vexpress/core.h
index f134cd4a85f..bde4374ab6d 100644
--- a/arch/arm/mach-vexpress/core.h
+++ b/arch/arm/mach-vexpress/core.h
@@ -6,6 +6,8 @@
void vexpress_dt_smp_map_io(void);
+bool vexpress_smp_init_ops(void);
+
extern struct smp_operations vexpress_smp_ops;
extern void vexpress_cpu_die(unsigned int cpu);
diff --git a/arch/arm/mach-vexpress/dcscb.c b/arch/arm/mach-vexpress/dcscb.c
new file mode 100644
index 00000000000..0dc3caca227
--- /dev/null
+++ b/arch/arm/mach-vexpress/dcscb.c
@@ -0,0 +1,256 @@
+/*
+ * arch/arm/mach-vexpress/dcscb.c - Dual Cluster System Configuration Block
+ *
+ * Created by: Nicolas Pitre, May 2012
+ * Copyright: (C) 2012-2013 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/errno.h>
+#include <linux/of_address.h>
+#include <linux/vexpress.h>
+#include <linux/arm-cci.h>
+
+#include <asm/mcpm.h>
+#include <asm/proc-fns.h>
+#include <asm/cacheflush.h>
+#include <asm/cputype.h>
+#include <asm/cp15.h>
+#include <asm/psci.h>
+
+
+#define RST_HOLD0 0x0
+#define RST_HOLD1 0x4
+#define SYS_SWRESET 0x8
+#define RST_STAT0 0xc
+#define RST_STAT1 0x10
+#define EAG_CFG_R 0x20
+#define EAG_CFG_W 0x24
+#define KFC_CFG_R 0x28
+#define KFC_CFG_W 0x2c
+#define DCS_CFG_R 0x30
+
+/*
+ * We can't use regular spinlocks. In the switcher case, it is possible
+ * for an outbound CPU to call power_down() after its inbound counterpart
+ * is already live using the same logical CPU number which trips lockdep
+ * debugging.
+ */
+static arch_spinlock_t dcscb_lock = __ARCH_SPIN_LOCK_UNLOCKED;
+
+static void __iomem *dcscb_base;
+static int dcscb_use_count[4][2];
+static int dcscb_mcpm_cpu_mask[2];
+
+static int dcscb_power_up(unsigned int cpu, unsigned int cluster)
+{
+ unsigned int rst_hold, cpumask = (1 << cpu);
+ unsigned int mcpm_mask = dcscb_mcpm_cpu_mask[cluster];
+
+ pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
+ if (cpu >= 4 || cluster >= 2)
+ return -EINVAL;
+
+ /*
+ * Since this is called with IRQs enabled, and no arch_spin_lock_irq
+ * variant exists, we need to disable IRQs manually here.
+ */
+ local_irq_disable();
+ arch_spin_lock(&dcscb_lock);
+
+ dcscb_use_count[cpu][cluster]++;
+ if (dcscb_use_count[cpu][cluster] == 1) {
+ rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4);
+ if (rst_hold & (1 << 8)) {
+ /* remove cluster reset and add individual CPU's reset */
+ rst_hold &= ~(1 << 8);
+ rst_hold |= mcpm_mask;
+ }
+ rst_hold &= ~(cpumask | (cpumask << 4));
+ writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4);
+ } else if (dcscb_use_count[cpu][cluster] != 2) {
+ /*
+ * The only possible values are:
+ * 0 = CPU down
+ * 1 = CPU (still) up
+ * 2 = CPU requested to be up before it had a chance
+ * to actually make itself down.
+ * Any other value is a bug.
+ */
+ BUG();
+ }
+
+ arch_spin_unlock(&dcscb_lock);
+ local_irq_enable();
+
+ return 0;
+}
+
+static void dcscb_power_down(void)
+{
+ unsigned int mpidr, cpu, cluster, rst_hold, cpumask, mcpm_mask;
+ bool last_man = false, skip_wfi = false;
+
+ mpidr = read_cpuid_mpidr();
+ cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+ cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+ cpumask = (1 << cpu);
+ mcpm_mask = dcscb_mcpm_cpu_mask[cluster];
+
+ pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
+ BUG_ON(cpu >= 4 || cluster >= 2);
+
+ __mcpm_cpu_going_down(cpu, cluster);
+
+ arch_spin_lock(&dcscb_lock);
+ BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP);
+ dcscb_use_count[cpu][cluster]--;
+ if (dcscb_use_count[cpu][cluster] == 0) {
+ rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4);
+ rst_hold |= cpumask;
+ if (((rst_hold | (rst_hold >> 4)) & mcpm_mask) == mcpm_mask) {
+ rst_hold |= (1 << 8);
+ last_man = true;
+ }
+ writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4);
+ } else if (dcscb_use_count[cpu][cluster] == 1) {
+ /*
+ * A power_up request went ahead of us.
+ * Even if we do not want to shut this CPU down,
+ * the caller expects a certain state as if the WFI
+ * was aborted. So let's continue with cache cleaning.
+ */
+ skip_wfi = true;
+ } else
+ BUG();
+
+ if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
+ arch_spin_unlock(&dcscb_lock);
+
+ /*
+ * Flush all cache levels for this cluster.
+ *
+ * A15/A7 can hit in the cache with SCTLR.C=0, so we don't need
+ * a preliminary flush here for those CPUs. At least, that's
+ * the theory -- without the extra flush, Linux explodes on
+ * RTSM (maybe not needed anymore, to be investigated).
+ */
+ flush_cache_all();
+ set_cr(get_cr() & ~CR_C);
+ flush_cache_all();
+
+ /*
+ * This is a harmless no-op. On platforms with a real
+ * outer cache this might either be needed or not,
+ * depending on where the outer cache sits.
+ */
+ outer_flush_all();
+
+ /* Disable local coherency by clearing the ACTLR "SMP" bit: */
+ set_auxcr(get_auxcr() & ~(1 << 6));
+
+ /*
+ * Disable cluster-level coherency by masking
+ * incoming snoops and DVM messages:
+ */
+ disable_cci(cluster);
+
+ __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
+ } else {
+ arch_spin_unlock(&dcscb_lock);
+
+ /*
+ * Flush the local CPU cache.
+ *
+ * A15/A7 can hit in the cache with SCTLR.C=0, so we don't need
+ * a preliminary flush here for those CPUs. At least, that's
+ * the theory -- without the extra flush, Linux explodes on
+ * RTSM (maybe not needed anymore, to be investigated).
+ */
+ flush_cache_louis();
+ set_cr(get_cr() & ~CR_C);
+ flush_cache_louis();
+
+ /* Disable local coherency by clearing the ACTLR "SMP" bit: */
+ set_auxcr(get_auxcr() & ~(1 << 6));
+ }
+
+ __mcpm_cpu_down(cpu, cluster);
+
+ /* Now we are prepared for power-down, do it: */
+ if (!skip_wfi) {
+ dsb();
+ wfi();
+ }
+
+ /* Not dead at this point? Let our caller cope. */
+}
+
+static const struct mcpm_platform_ops dcscb_power_ops = {
+ .power_up = dcscb_power_up,
+ .power_down = dcscb_power_down,
+};
+
+static void __init dcscb_usage_count_init(void)
+{
+ unsigned int mpidr, cpu, cluster;
+
+ mpidr = read_cpuid_mpidr();
+ cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+ cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+
+ pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
+ BUG_ON(cpu >= 4 || cluster >= 2);
+ dcscb_use_count[cpu][cluster] = 1;
+}
+
+extern void dcscb_power_up_setup(unsigned int affinity_level);
+
+static int __init dcscb_init(void)
+{
+ struct device_node *node;
+ unsigned int cfg;
+ int ret;
+
+ ret = psci_probe();
+ if (!ret) {
+ pr_debug("psci found. Aborting native init\n");
+ return -ENODEV;
+ }
+
+ node = of_find_compatible_node(NULL, NULL, "arm,rtsm,dcscb");
+ if (!node)
+ return -ENODEV;
+ dcscb_base= of_iomap(node, 0);
+ if (!dcscb_base)
+ return -EADDRNOTAVAIL;
+ cfg = readl_relaxed(dcscb_base + DCS_CFG_R);
+ dcscb_mcpm_cpu_mask[0] = (1 << (((cfg >> 16) >> (0 << 2)) & 0xf)) - 1;
+ dcscb_mcpm_cpu_mask[1] = (1 << (((cfg >> 16) >> (1 << 2)) & 0xf)) - 1;
+ dcscb_usage_count_init();
+
+ ret = mcpm_platform_register(&dcscb_power_ops);
+ if (!ret)
+ ret = mcpm_sync_init(dcscb_power_up_setup);
+ if (ret) {
+ iounmap(dcscb_base);
+ return ret;
+ }
+
+ /*
+ * Future entries into the kernel can now go
+ * through the cluster entry vectors.
+ */
+ vexpress_flags_set(virt_to_phys(mcpm_entry_point));
+
+ return 0;
+}
+
+early_initcall(dcscb_init);
diff --git a/arch/arm/mach-vexpress/dcscb_setup.S b/arch/arm/mach-vexpress/dcscb_setup.S
new file mode 100644
index 00000000000..93bd13f458a
--- /dev/null
+++ b/arch/arm/mach-vexpress/dcscb_setup.S
@@ -0,0 +1,80 @@
+/*
+ * arch/arm/include/asm/dcscb_setup.S
+ *
+ * Created by: Dave Martin, 2012-06-22
+ * Copyright: (C) 2012-2013 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+
+#include <linux/linkage.h>
+#include <asm/mcpm.h>
+
+
+#define SLAVE_SNOOPCTL_OFFSET 0
+#define SNOOPCTL_SNOOP_ENABLE (1 << 0)
+#define SNOOPCTL_DVM_ENABLE (1 << 1)
+
+#define CCI_STATUS_OFFSET 0xc
+#define STATUS_CHANGE_PENDING (1 << 0)
+
+#define CCI_SLAVE_OFFSET(n) (0x1000 + 0x1000 * (n))
+
+#define RTSM_CCI_PHYS_BASE 0x2c090000
+#define RTSM_CCI_SLAVE_A15 3
+#define RTSM_CCI_SLAVE_A7 4
+
+#define RTSM_CCI_A15_OFFSET CCI_SLAVE_OFFSET(RTSM_CCI_SLAVE_A15)
+#define RTSM_CCI_A7_OFFSET CCI_SLAVE_OFFSET(RTSM_CCI_SLAVE_A7)
+
+
+ENTRY(dcscb_power_up_setup)
+
+ cmp r0, #0 @ check affinity level
+ beq 2f
+
+/*
+ * Enable cluster-level coherency, in preparation for turning on the MMU.
+ * The ACTLR SMP bit does not need to be set here, because cpu_resume()
+ * already restores that.
+ */
+
+ mrc p15, 0, r0, c0, c0, 5 @ MPIDR
+ ubfx r0, r0, #8, #4 @ cluster
+
+ @ A15/A7 may not require explicit L2 invalidation on reset, dependent
+ @ on hardware integration desicions.
+ @ For now, this code assumes that L2 is either already invalidated, or
+ @ invalidation is not required.
+
+ ldr r3, =RTSM_CCI_PHYS_BASE + RTSM_CCI_A15_OFFSET
+ cmp r0, #0 @ A15 cluster?
+ addne r3, r3, #RTSM_CCI_A7_OFFSET - RTSM_CCI_A15_OFFSET
+
+ @ r3 now points to the correct CCI slave register block
+
+ ldr r0, [r3, #SLAVE_SNOOPCTL_OFFSET]
+ orr r0, r0, #SNOOPCTL_SNOOP_ENABLE | SNOOPCTL_DVM_ENABLE
+ str r0, [r3, #SLAVE_SNOOPCTL_OFFSET] @ enable CCI snoops
+
+ @ Wait for snoop control change to complete:
+
+ ldr r3, =RTSM_CCI_PHYS_BASE
+
+1: ldr r0, [r3, #CCI_STATUS_OFFSET]
+ tst r0, #STATUS_CHANGE_PENDING
+ bne 1b
+
+ dsb @ Synchronise side-effects of enabling CCI
+
+ bx lr
+
+2: @ Implementation-specific local CPU setup operations should go here,
+ @ if any. In this case, there is nothing to do.
+
+ bx lr
+
+ENDPROC(dcscb_power_up_setup)
diff --git a/arch/arm/mach-vexpress/include/mach/tc2.h b/arch/arm/mach-vexpress/include/mach/tc2.h
new file mode 100644
index 00000000000..d3b5a2225a0
--- /dev/null
+++ b/arch/arm/mach-vexpress/include/mach/tc2.h
@@ -0,0 +1,10 @@
+#ifndef __MACH_TC2_H
+#define __MACH_TC2_H
+
+/*
+ * cpu and cluster limits
+ */
+#define TC2_MAX_CPUS 3
+#define TC2_MAX_CLUSTERS 2
+
+#endif
diff --git a/arch/arm/mach-vexpress/platsmp.c b/arch/arm/mach-vexpress/platsmp.c
index dc1ace55d55..21368ba6ca2 100644
--- a/arch/arm/mach-vexpress/platsmp.c
+++ b/arch/arm/mach-vexpress/platsmp.c
@@ -12,9 +12,11 @@
#include <linux/errno.h>
#include <linux/smp.h>
#include <linux/io.h>
+#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/vexpress.h>
+#include <asm/mcpm.h>
#include <asm/smp_scu.h>
#include <asm/mach/map.h>
@@ -203,3 +205,14 @@ struct smp_operations __initdata vexpress_smp_ops = {
.cpu_die = vexpress_cpu_die,
#endif
};
+
+bool __init vexpress_smp_init_ops(void)
+{
+#ifdef CONFIG_MCPM
+ if(of_find_compatible_node(NULL, NULL, "arm,cci")) {
+ mcpm_smp_set_ops();
+ return true;
+ }
+#endif
+ return false;
+}
diff --git a/arch/arm/mach-vexpress/tc2_pm.c b/arch/arm/mach-vexpress/tc2_pm.c
new file mode 100644
index 00000000000..f2e9959fb26
--- /dev/null
+++ b/arch/arm/mach-vexpress/tc2_pm.c
@@ -0,0 +1,271 @@
+/*
+ * arch/arm/mach-vexpress/tc2_pm.c - TC2 power management support
+ *
+ * Created by: Nicolas Pitre, October 2012
+ * Copyright: (C) 2012 Linaro Limited
+ *
+ * Some portions of this file were originally written by Achin Gupta
+ * Copyright: (C) 2012 ARM Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/errno.h>
+#include <linux/irqchip/arm-gic.h>
+
+#include <asm/mcpm.h>
+#include <asm/proc-fns.h>
+#include <asm/cacheflush.h>
+#include <asm/cputype.h>
+#include <asm/cp15.h>
+#include <asm/psci.h>
+
+#include <mach/motherboard.h>
+#include <mach/tc2.h>
+
+#include <linux/vexpress.h>
+#include <linux/arm-cci.h>
+
+/*
+ * We can't use regular spinlocks. In the switcher case, it is possible
+ * for an outbound CPU to call power_down() after its inbound counterpart
+ * is already live using the same logical CPU number which trips lockdep
+ * debugging.
+ */
+static arch_spinlock_t tc2_pm_lock = __ARCH_SPIN_LOCK_UNLOCKED;
+
+static int tc2_pm_use_count[TC2_MAX_CPUS][TC2_MAX_CLUSTERS];
+
+static int tc2_pm_power_up(unsigned int cpu, unsigned int cluster)
+{
+ pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
+ if (cluster >= TC2_MAX_CLUSTERS ||
+ cpu >= vexpress_spc_get_nb_cpus(cluster))
+ return -EINVAL;
+
+ /*
+ * Since this is called with IRQs enabled, and no arch_spin_lock_irq
+ * variant exists, we need to disable IRQs manually here.
+ */
+ local_irq_disable();
+ arch_spin_lock(&tc2_pm_lock);
+
+ if (!tc2_pm_use_count[0][cluster] &&
+ !tc2_pm_use_count[1][cluster] &&
+ !tc2_pm_use_count[2][cluster])
+ vexpress_spc_powerdown_enable(cluster, 0);
+
+ tc2_pm_use_count[cpu][cluster]++;
+ if (tc2_pm_use_count[cpu][cluster] == 1) {
+ vexpress_spc_write_bxaddr_reg(cluster, cpu,
+ virt_to_phys(mcpm_entry_point));
+ vexpress_spc_set_cpu_wakeup_irq(cpu, cluster, 1);
+ } else if (tc2_pm_use_count[cpu][cluster] != 2) {
+ /*
+ * The only possible values are:
+ * 0 = CPU down
+ * 1 = CPU (still) up
+ * 2 = CPU requested to be up before it had a chance
+ * to actually make itself down.
+ * Any other value is a bug.
+ */
+ BUG();
+ }
+
+ arch_spin_unlock(&tc2_pm_lock);
+ local_irq_enable();
+
+ return 0;
+}
+
+static void tc2_pm_down(u64 residency)
+{
+ unsigned int mpidr, cpu, cluster;
+ bool last_man = false, skip_wfi = false;
+
+ mpidr = read_cpuid_mpidr();
+ cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+ cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+
+ pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
+ BUG_ON(cluster >= TC2_MAX_CLUSTERS ||
+ cpu >= vexpress_spc_get_nb_cpus(cluster));
+
+ __mcpm_cpu_going_down(cpu, cluster);
+
+ arch_spin_lock(&tc2_pm_lock);
+ BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP);
+ tc2_pm_use_count[cpu][cluster]--;
+ if (tc2_pm_use_count[cpu][cluster] == 0) {
+ vexpress_spc_set_cpu_wakeup_irq(cpu, cluster, 1);
+ if (!tc2_pm_use_count[0][cluster] &&
+ !tc2_pm_use_count[1][cluster] &&
+ !tc2_pm_use_count[2][cluster] &&
+ (!residency || residency > 5000)) {
+ vexpress_spc_powerdown_enable(cluster, 1);
+ vexpress_spc_set_global_wakeup_intr(1);
+ last_man = true;
+ }
+ } else if (tc2_pm_use_count[cpu][cluster] == 1) {
+ /*
+ * A power_up request went ahead of us.
+ * Even if we do not want to shut this CPU down,
+ * the caller expects a certain state as if the WFI
+ * was aborted. So let's continue with cache cleaning.
+ */
+ skip_wfi = true;
+ } else
+ BUG();
+
+ gic_cpu_if_down();
+
+ if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
+ arch_spin_unlock(&tc2_pm_lock);
+
+ set_cr(get_cr() & ~CR_C);
+ flush_cache_all();
+ asm volatile ("clrex");
+ set_auxcr(get_auxcr() & ~(1 << 6));
+
+ disable_cci(cluster);
+
+ /*
+ * Ensure that both C & I bits are disabled in the SCTLR
+ * before disabling ACE snoops. This ensures that no
+ * coherency traffic will originate from this cpu after
+ * ACE snoops are turned off.
+ */
+ cpu_proc_fin();
+
+ __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
+ } else {
+ /*
+ * If last man then undo any setup done previously.
+ */
+ if (last_man) {
+ vexpress_spc_powerdown_enable(cluster, 0);
+ vexpress_spc_set_global_wakeup_intr(0);
+ }
+
+ arch_spin_unlock(&tc2_pm_lock);
+
+ set_cr(get_cr() & ~CR_C);
+ flush_cache_louis();
+ asm volatile ("clrex");
+ set_auxcr(get_auxcr() & ~(1 << 6));
+ }
+
+ __mcpm_cpu_down(cpu, cluster);
+
+ /* Now we are prepared for power-down, do it: */
+ if (!skip_wfi)
+ wfi();
+
+ /* Not dead at this point? Let our caller cope. */
+}
+
+static void tc2_pm_power_down(void)
+{
+ tc2_pm_down(0);
+}
+
+static void tc2_pm_suspend(u64 residency)
+{
+ extern void tc2_resume(void);
+ unsigned int mpidr, cpu, cluster;
+
+ mpidr = read_cpuid_mpidr();
+ cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+ cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+ vexpress_spc_write_bxaddr_reg(cluster, cpu,
+ virt_to_phys(tc2_resume));
+
+ tc2_pm_down(residency);
+}
+
+static void tc2_pm_powered_up(void)
+{
+ unsigned int mpidr, cpu, cluster;
+ unsigned long flags;
+
+ mpidr = read_cpuid_mpidr();
+ cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+ cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+
+ pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
+ BUG_ON(cluster >= TC2_MAX_CLUSTERS ||
+ cpu >= vexpress_spc_get_nb_cpus(cluster));
+
+ local_irq_save(flags);
+ arch_spin_lock(&tc2_pm_lock);
+
+ if (!tc2_pm_use_count[0][cluster] &&
+ !tc2_pm_use_count[1][cluster] &&
+ !tc2_pm_use_count[2][cluster]) {
+ vexpress_spc_powerdown_enable(cluster, 0);
+ vexpress_spc_set_global_wakeup_intr(0);
+ }
+
+ if (!tc2_pm_use_count[cpu][cluster])
+ tc2_pm_use_count[cpu][cluster] = 1;
+
+ vexpress_spc_set_cpu_wakeup_irq(cpu, cluster, 0);
+ vexpress_spc_write_bxaddr_reg(cluster, cpu, 0);
+
+ arch_spin_unlock(&tc2_pm_lock);
+ local_irq_restore(flags);
+}
+
+static const struct mcpm_platform_ops tc2_pm_power_ops = {
+ .power_up = tc2_pm_power_up,
+ .power_down = tc2_pm_power_down,
+ .suspend = tc2_pm_suspend,
+ .powered_up = tc2_pm_powered_up,
+};
+
+static void __init tc2_pm_usage_count_init(void)
+{
+ unsigned int mpidr, cpu, cluster;
+
+ mpidr = read_cpuid_mpidr();
+ cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+ cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+
+ pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
+ BUG_ON(cluster >= TC2_MAX_CLUSTERS ||
+ cpu >= vexpress_spc_get_nb_cpus(cluster));
+
+ tc2_pm_use_count[cpu][cluster] = 1;
+}
+
+extern void tc2_pm_power_up_setup(unsigned int affinity_level);
+
+static int __init tc2_pm_init(void)
+{
+ int ret;
+
+ ret = psci_probe();
+ if (!ret) {
+ pr_debug("psci found. Aborting native init\n");
+ return -ENODEV;
+ }
+
+ if (!vexpress_spc_check_loaded())
+ return -ENODEV;
+
+ tc2_pm_usage_count_init();
+
+ ret = mcpm_platform_register(&tc2_pm_power_ops);
+ if (!ret)
+ ret = mcpm_sync_init(tc2_pm_power_up_setup);
+ if (!ret)
+ pr_info("TC2 power management initialized\n");
+ return ret;
+}
+
+early_initcall(tc2_pm_init);
diff --git a/arch/arm/mach-vexpress/tc2_pm_psci.c b/arch/arm/mach-vexpress/tc2_pm_psci.c
new file mode 100644
index 00000000000..5a5e4f56849
--- /dev/null
+++ b/arch/arm/mach-vexpress/tc2_pm_psci.c
@@ -0,0 +1,168 @@
+/*
+ * arch/arm/mach-vexpress/tc2_pm_psci.c - TC2 PSCI support
+ *
+ * Created by: Achin Gupta, December 2012
+ * Copyright: (C) 2012 ARM Limited
+ *
+ * Some portions of this file were originally written by Nicolas Pitre
+ * Copyright: (C) 2012 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/errno.h>
+
+#include <asm/mcpm.h>
+#include <asm/proc-fns.h>
+#include <asm/cacheflush.h>
+#include <asm/psci.h>
+#include <asm/atomic.h>
+#include <asm/cputype.h>
+#include <asm/cp15.h>
+
+#include <mach/motherboard.h>
+#include <mach/tc2.h>
+
+#include <linux/vexpress.h>
+
+/*
+ * Platform specific state id understood by the firmware and used to
+ * program the power controller
+ */
+#define PSCI_POWER_STATE_ID 0
+
+static atomic_t tc2_pm_use_count[TC2_MAX_CPUS][TC2_MAX_CLUSTERS];
+
+static int tc2_pm_psci_power_up(unsigned int cpu, unsigned int cluster)
+{
+ unsigned int mpidr = (cluster << 8) | cpu;
+ int ret = 0;
+
+ BUG_ON(!psci_ops.cpu_on);
+
+ switch (atomic_inc_return(&tc2_pm_use_count[cpu][cluster])) {
+ case 1:
+ /*
+ * This is a request to power up a cpu that linux thinks has
+ * been powered down. Retries are needed if the firmware has
+ * seen the power down request as yet.
+ */
+ do
+ ret = psci_ops.cpu_on(mpidr,
+ virt_to_phys(mcpm_entry_point));
+ while (ret == -EAGAIN);
+
+ return ret;
+ case 2:
+ /* This power up request has overtaken a power down request */
+ return ret;
+ default:
+ /* Any other value is a bug */
+ BUG();
+ }
+}
+
+static void tc2_pm_psci_power_down(void)
+{
+ struct psci_power_state power_state;
+ unsigned int mpidr, cpu, cluster;
+
+ mpidr = read_cpuid_mpidr();
+ cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+ cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+
+ BUG_ON(!psci_ops.cpu_off);
+
+ switch (atomic_dec_return(&tc2_pm_use_count[cpu][cluster])) {
+ case 1:
+ /*
+ * Overtaken by a power up. Flush caches, exit coherency,
+ * return & fake a reset
+ */
+ set_cr(get_cr() & ~CR_C);
+
+ flush_cache_louis();
+
+ asm volatile ("clrex");
+ set_auxcr(get_auxcr() & ~(1 << 6));
+
+ return;
+ case 0:
+ /* A normal request to possibly power down the cluster */
+ power_state.id = PSCI_POWER_STATE_ID;
+ power_state.type = PSCI_POWER_STATE_TYPE_POWER_DOWN;
+ power_state.affinity_level = PSCI_POWER_STATE_AFFINITY_LEVEL1;
+
+ psci_ops.cpu_off(power_state);
+
+ /* On success this function never returns */
+ default:
+ /* Any other value is a bug */
+ BUG();
+ }
+}
+
+static void tc2_pm_psci_suspend(u64 unused)
+{
+ struct psci_power_state power_state;
+
+ BUG_ON(!psci_ops.cpu_suspend);
+
+ /* On TC2 always attempt to power down the cluster */
+ power_state.id = PSCI_POWER_STATE_ID;
+ power_state.type = PSCI_POWER_STATE_TYPE_POWER_DOWN;
+ power_state.affinity_level = PSCI_POWER_STATE_AFFINITY_LEVEL1;
+
+ psci_ops.cpu_suspend(power_state, virt_to_phys(mcpm_entry_point));
+
+ /* On success this function never returns */
+ BUG();
+}
+
+static const struct mcpm_platform_ops tc2_pm_power_ops = {
+ .power_up = tc2_pm_psci_power_up,
+ .power_down = tc2_pm_psci_power_down,
+ .suspend = tc2_pm_psci_suspend,
+};
+
+static void __init tc2_pm_usage_count_init(void)
+{
+ unsigned int mpidr, cpu, cluster;
+
+ mpidr = read_cpuid_mpidr();
+ cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+ cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+
+ pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
+ BUG_ON(cluster >= TC2_MAX_CLUSTERS ||
+ cpu >= vexpress_spc_get_nb_cpus(cluster));
+
+ atomic_set(&tc2_pm_use_count[cpu][cluster], 1);
+}
+
+static int __init tc2_pm_psci_init(void)
+{
+ int ret;
+
+ ret = psci_probe();
+ if (ret) {
+ pr_debug("psci not found. Aborting psci init\n");
+ return -ENODEV;
+ }
+
+ tc2_pm_usage_count_init();
+
+ ret = mcpm_platform_register(&tc2_pm_power_ops);
+ if (!ret)
+ ret = mcpm_sync_init(NULL);
+ if (!ret)
+ pr_info("TC2 power management initialized\n");
+ return ret;
+}
+
+early_initcall(tc2_pm_psci_init);
diff --git a/arch/arm/mach-vexpress/tc2_pm_setup.S b/arch/arm/mach-vexpress/tc2_pm_setup.S
new file mode 100644
index 00000000000..4728f83731a
--- /dev/null
+++ b/arch/arm/mach-vexpress/tc2_pm_setup.S
@@ -0,0 +1,102 @@
+/*
+ * arch/arm/include/asm/tc2_pm_setup.S
+ *
+ * Created by: Nicolas Pitre, October 2012
+ ( (based on dcscb_setup.S by Dave Martin)
+ * Copyright: (C) 2012 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+
+#include <linux/linkage.h>
+#include <asm/mcpm.h>
+
+
+#define SPC_PHYS_BASE 0x7FFF0000
+#define SPC_WAKE_INT_STAT 0xb2c
+
+#define SNOOP_CTL_A15 0x404
+#define SNOOP_CTL_A7 0x504
+
+#define A15_SNOOP_MASK (0x3 << 7)
+#define A7_SNOOP_MASK (0x1 << 13)
+
+#define A15_BX_ADDR0 0xB68
+
+
+#define CCI_PHYS_BASE 0x2c090000
+
+#define SLAVE_SNOOPCTL_OFFSET 0
+#define SNOOPCTL_SNOOP_ENABLE (1 << 0)
+#define SNOOPCTL_DVM_ENABLE (1 << 1)
+
+#define CCI_STATUS_OFFSET 0xc
+#define STATUS_CHANGE_PENDING (1 << 0)
+
+#define CCI_SLAVE_OFFSET(n) (0x1000 + 0x1000 * (n))
+#define CCI_SLAVE_A15 3
+#define CCI_SLAVE_A7 4
+#define CCI_A15_OFFSET CCI_SLAVE_OFFSET(CCI_SLAVE_A15)
+#define CCI_A7_OFFSET CCI_SLAVE_OFFSET(CCI_SLAVE_A7)
+
+
+ENTRY(tc2_resume)
+ mrc p15, 0, r0, c0, c0, 5
+ ubfx r1, r0, #0, #4 @ r1 = cpu
+ ubfx r2, r0, #8, #4 @ r2 = cluster
+ add r1, r1, r2, lsl #2 @ r1 = index of CPU in WAKE_INT_STAT
+ ldr r3, =SPC_PHYS_BASE + SPC_WAKE_INT_STAT
+ ldr r3, [r3]
+ lsr r3, r1
+ tst r3, #1
+ wfieq @ if no pending IRQ reenters wfi
+ b mcpm_entry_point
+ENDPROC(tc2_resume)
+
+/*
+ * Enable cluster-level coherency, in preparation for turning on the MMU.
+ * The ACTLR SMP bit does not need to be set here, because cpu_resume()
+ * already restores that.
+ */
+
+ENTRY(tc2_pm_power_up_setup)
+
+ cmp r0, #0
+ beq 2f
+
+ @ Enable CCI snoops
+ mrc p15, 0, r0, c0, c0, 5 @ MPIDR
+ ubfx r0, r0, #8, #4 @ cluster
+ ldr r3, =CCI_PHYS_BASE + CCI_A15_OFFSET
+ cmp r0, #0 @ A15 cluster?
+ addne r3, r3, #CCI_A7_OFFSET - CCI_A15_OFFSET
+
+ @ r3 now points to the correct CCI slave register block
+ ldr r0, [r3, #SLAVE_SNOOPCTL_OFFSET]
+ orr r0, r0, #SNOOPCTL_SNOOP_ENABLE | SNOOPCTL_DVM_ENABLE
+ str r0, [r3, #SLAVE_SNOOPCTL_OFFSET] @ enable CCI snoops
+
+ @ Wait for snoop control change to complete:
+ ldr r3, =CCI_PHYS_BASE
+1: ldr r0, [r3, #CCI_STATUS_OFFSET]
+ tst r0, #STATUS_CHANGE_PENDING
+ bne 1b
+
+ bx lr
+
+2: @ Clear the BX addr register
+ ldr r3, =SPC_PHYS_BASE + A15_BX_ADDR0
+ mrc p15, 0, r0, c0, c0, 5 @ MPIDR
+ ubfx r1, r0, #8, #4 @ cluster
+ ubfx r0, r0, #0, #4 @ cpu
+ add r3, r3, r1, lsl #4
+ mov r1, #0
+ str r1, [r3, r0, lsl #2]
+ dsb
+
+ bx lr
+
+ENDPROC(tc2_pm_power_up_setup)
diff --git a/arch/arm/mach-vexpress/v2m.c b/arch/arm/mach-vexpress/v2m.c
index 915683cb67d..772b7a179dd 100644
--- a/arch/arm/mach-vexpress/v2m.c
+++ b/arch/arm/mach-vexpress/v2m.c
@@ -8,6 +8,7 @@
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/irqchip.h>
+#include <linux/memblock.h>
#include <linux/of_address.h>
#include <linux/of_fdt.h>
#include <linux/of_irq.h>
@@ -377,6 +378,31 @@ MACHINE_START(VEXPRESS, "ARM-Versatile Express")
.restart = vexpress_restart,
MACHINE_END
+static void __init v2m_dt_hdlcd_init(void)
+{
+ struct device_node *node;
+ int len, na, ns;
+ const __be32 *prop;
+ phys_addr_t fb_base, fb_size;
+
+ node = of_find_compatible_node(NULL, NULL, "arm,hdlcd");
+ if (!node)
+ return;
+
+ na = of_n_addr_cells(node);
+ ns = of_n_size_cells(node);
+
+ prop = of_get_property(node, "framebuffer", &len);
+ if (WARN_ON(!prop || len < (na + ns) * sizeof(*prop)))
+ return;
+
+ fb_base = of_read_number(prop, na);
+ fb_size = of_read_number(prop + na, ns);
+
+ if (WARN_ON(memblock_remove(fb_base, fb_size)))
+ return;
+};
+
static struct map_desc v2m_rs1_io_desc __initdata = {
.virtual = V2M_PERIPH,
.pfn = __phys_to_pfn(0x1c000000),
@@ -427,6 +453,8 @@ void __init v2m_dt_init_early(void)
pr_warning("vexpress: DT HBI (%x) is not matching "
"hardware (%x)!\n", dt_hbi, hbi);
}
+
+ v2m_dt_hdlcd_init();
}
static void __init v2m_dt_timer_init(void)
@@ -476,6 +504,7 @@ static const char * const v2m_dt_match[] __initconst = {
DT_MACHINE_START(VEXPRESS_DT, "ARM-Versatile Express")
.dt_compat = v2m_dt_match,
.smp = smp_ops(vexpress_smp_ops),
+ .smp_init = smp_init_ops(vexpress_smp_init_ops),
.map_io = v2m_dt_map_io,
.init_early = v2m_dt_init_early,
.init_irq = irqchip_init,
diff --git a/arch/arm/mach-virt/platsmp.c b/arch/arm/mach-virt/platsmp.c
index 8badaabe70a..f4143f5bfa5 100644
--- a/arch/arm/mach-virt/platsmp.c
+++ b/arch/arm/mach-virt/platsmp.c
@@ -21,8 +21,6 @@
#include <linux/smp.h>
#include <linux/of.h>
-#include <linux/irqchip/arm-gic.h>
-
#include <asm/psci.h>
#include <asm/smp_plat.h>
@@ -45,14 +43,8 @@ static int __cpuinit virt_boot_secondary(unsigned int cpu,
return -ENODEV;
}
-static void __cpuinit virt_secondary_init(unsigned int cpu)
-{
- gic_secondary_init(0);
-}
-
struct smp_operations __initdata virt_smp_ops = {
.smp_init_cpus = virt_smp_init_cpus,
.smp_prepare_cpus = virt_smp_prepare_cpus,
- .smp_secondary_init = virt_secondary_init,
.smp_boot_secondary = virt_boot_secondary,
};
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 4045c4931a3..35955b54944 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -397,6 +397,13 @@ config CPU_V7
select CPU_PABRT_V7
select CPU_TLB_V7 if MMU
+config CPU_THUMBONLY
+ bool
+ # There are no CPUs available with MMU that don't implement an ARM ISA:
+ depends on !MMU
+ help
+ Select this if your CPU doesn't support the 32 bit ARM instructions.
+
# Figure out what processor architecture version we should be using.
# This defines the compiler instruction set which depends on the machine type.
config CPU_32v3
@@ -605,7 +612,7 @@ config ARCH_DMA_ADDR_T_64BIT
bool
config ARM_THUMB
- bool "Support Thumb user binaries"
+ bool "Support Thumb user binaries" if !CPU_THUMBONLY
depends on CPU_ARM720T || CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_V6 || CPU_V6K || CPU_V7 || CPU_FEROCEON
default y
help
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index db26e2e543f..6f4585b8907 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -961,12 +961,14 @@ static int __init alignment_init(void)
return -ENOMEM;
#endif
+#ifdef CONFIG_CPU_CP15
if (cpu_is_v6_unaligned()) {
cr_alignment &= ~CR_A;
cr_no_alignment &= ~CR_A;
set_cr(cr_alignment);
ai_usermode = safe_usermode(ai_usermode, false);
}
+#endif
hook_fault_code(FAULT_CODE_ALIGNMENT, do_alignment, SIGBUS, BUS_ADRALN,
"alignment exception");
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 5dbf13f954f..e207aa5f846 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -446,8 +446,16 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
if (pud_none(*pud_k))
goto bad_area;
- if (!pud_present(*pud))
+ if (!pud_present(*pud)) {
set_pud(pud, *pud_k);
+ /*
+ * There is a small window during free_pgtables() where the
+ * user *pud entry is 0 but the TLB has not been invalidated
+ * and we get a level 2 (pmd) translation fault caused by the
+ * intermediate TLB caching of the old level 1 (pud) entry.
+ */
+ flush_tlb_kernel_page(addr);
+ }
pmd = pmd_offset(pud, addr);
pmd_k = pmd_offset(pud_k, addr);
@@ -470,8 +478,9 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
#endif
if (pmd_none(pmd_k[index]))
goto bad_area;
+ if (!pmd_present(pmd[index]))
+ copy_pmd(pmd, pmd_k);
- copy_pmd(pmd, pmd_k);
return 0;
bad_area:
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index a3da603504d..76a30333ec6 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -113,6 +113,7 @@ static struct cachepolicy cache_policies[] __initdata = {
}
};
+#ifdef CONFIG_CPU_CP15
/*
* These are useful for identifying cache coherency
* problems by allowing the cache or the cache and
@@ -211,6 +212,22 @@ void adjust_cr(unsigned long mask, unsigned long set)
}
#endif
+#else /* ifdef CONFIG_CPU_CP15 */
+
+static int __init early_cachepolicy(char *p)
+{
+ pr_warning("cachepolicy kernel parameter not supported without cp15\n");
+}
+early_param("cachepolicy", early_cachepolicy);
+
+static int __init noalign_setup(char *__unused)
+{
+ pr_warning("noalign kernel parameter not supported without cp15\n");
+}
+__setup("noalign", noalign_setup);
+
+#endif /* ifdef CONFIG_CPU_CP15 / else */
+
#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
#define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S
index 78f520bc0e9..2f76880060d 100644
--- a/arch/arm/mm/proc-v7-2level.S
+++ b/arch/arm/mm/proc-v7-2level.S
@@ -110,7 +110,9 @@ ENTRY(cpu_v7_set_pte_ext)
ARM( str r3, [r0, #2048]! )
THUMB( add r0, r0, #2048 )
THUMB( str r3, [r0] )
- mcr p15, 0, r0, c7, c10, 1 @ flush_pte
+ mrc p15, 0, r3, c0, c1, 7 @ read ID_MMFR3
+ tst r3, #0xf << 20 @ check the coherent walk bits
+ mcreq p15, 0, r0, c7, c10, 1 @ clean D-cache to PoU
#endif
mov pc, lr
ENDPROC(cpu_v7_set_pte_ext)
diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S
index 6ffd78c0f9a..2098e026632 100644
--- a/arch/arm/mm/proc-v7-3level.S
+++ b/arch/arm/mm/proc-v7-3level.S
@@ -73,7 +73,9 @@ ENTRY(cpu_v7_set_pte_ext)
tst r3, #1 << (55 - 32) @ L_PTE_DIRTY
orreq r2, #L_PTE_RDONLY
1: strd r2, r3, [r0]
- mcr p15, 0, r0, c7, c10, 1 @ flush_pte
+ mrc p15, 0, r3, c0, c1, 7 @ read ID_MMFR3
+ tst r3, #0xf << 20 @ check the coherent walk bits
+ mcreq p15, 0, r0, c7, c10, 1 @ clean D-cache to PoU
#endif
mov pc, lr
ENDPROC(cpu_v7_set_pte_ext)
diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c
index 2d4b6414609..7bb961918c8 100644
--- a/arch/arm/plat-orion/common.c
+++ b/arch/arm/plat-orion/common.c
@@ -373,7 +373,7 @@ static struct resource orion_ge10_shared_resources[] = {
static struct platform_device orion_ge10_shared = {
.name = MV643XX_ETH_SHARED_NAME,
- .id = 1,
+ .id = 2,
.dev = {
.platform_data = &orion_ge10_shared_data,
},
@@ -388,8 +388,8 @@ static struct resource orion_ge10_resources[] = {
static struct platform_device orion_ge10 = {
.name = MV643XX_ETH_NAME,
- .id = 1,
- .num_resources = 2,
+ .id = 2,
+ .num_resources = 1,
.resource = orion_ge10_resources,
.dev = {
.coherent_dma_mask = DMA_BIT_MASK(32),
@@ -425,7 +425,7 @@ static struct resource orion_ge11_shared_resources[] = {
static struct platform_device orion_ge11_shared = {
.name = MV643XX_ETH_SHARED_NAME,
- .id = 1,
+ .id = 3,
.dev = {
.platform_data = &orion_ge11_shared_data,
},
@@ -440,8 +440,8 @@ static struct resource orion_ge11_resources[] = {
static struct platform_device orion_ge11 = {
.name = MV643XX_ETH_NAME,
- .id = 1,
- .num_resources = 2,
+ .id = 3,
+ .num_resources = 1,
.resource = orion_ge11_resources,
.dev = {
.coherent_dma_mask = DMA_BIT_MASK(32),
diff --git a/arch/arm/plat-samsung/irq-vic-timer.c b/arch/arm/plat-samsung/irq-vic-timer.c
index f980cf3d2ba..5d205e74e49 100644
--- a/arch/arm/plat-samsung/irq-vic-timer.c
+++ b/arch/arm/plat-samsung/irq-vic-timer.c
@@ -16,6 +16,7 @@
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/io.h>
#include <mach/map.h>
@@ -23,8 +24,6 @@
#include <plat/irq-vic-timer.h>
#include <plat/regs-timer.h>
-#include <asm/mach/irq.h>
-
static void s3c_irq_demux_vic_timer(unsigned int irq, struct irq_desc *desc)
{
struct irq_chip *chip = irq_get_chip(irq);
diff --git a/arch/arm/plat-samsung/s5p-irq-gpioint.c b/arch/arm/plat-samsung/s5p-irq-gpioint.c
index bae56131a50..fafdb059043 100644
--- a/arch/arm/plat-samsung/s5p-irq-gpioint.c
+++ b/arch/arm/plat-samsung/s5p-irq-gpioint.c
@@ -14,6 +14,7 @@
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/io.h>
#include <linux/gpio.h>
#include <linux/slab.h>
@@ -22,8 +23,6 @@
#include <plat/gpio-core.h>
#include <plat/gpio-cfg.h>
-#include <asm/mach/irq.h>
-
#define GPIO_BASE(chip) ((void __iomem *)((unsigned long)((chip)->base) & 0xFFFFF000u))
#define CON_OFFSET 0x700
diff --git a/arch/arm/plat-samsung/setup-mipiphy.c b/arch/arm/plat-samsung/setup-mipiphy.c
index 14745932760..66df315990a 100644
--- a/arch/arm/plat-samsung/setup-mipiphy.c
+++ b/arch/arm/plat-samsung/setup-mipiphy.c
@@ -8,6 +8,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/io.h>
@@ -50,8 +51,10 @@ int s5p_csis_phy_enable(int id, bool on)
{
return __s5p_mipi_phy_control(id, on, S5P_MIPI_DPHY_SRESETN);
}
+EXPORT_SYMBOL(s5p_csis_phy_enable);
int s5p_dsim_phy_enable(struct platform_device *pdev, bool on)
{
return __s5p_mipi_phy_control(pdev->id, on, S5P_MIPI_DPHY_MRESETN);
}
+EXPORT_SYMBOL(s5p_dsim_phy_enable);
diff --git a/arch/arm/plat-versatile/platsmp.c b/arch/arm/plat-versatile/platsmp.c
index f2ac1556177..1e1b2d76974 100644
--- a/arch/arm/plat-versatile/platsmp.c
+++ b/arch/arm/plat-versatile/platsmp.c
@@ -14,7 +14,6 @@
#include <linux/device.h>
#include <linux/jiffies.h>
#include <linux/smp.h>
-#include <linux/irqchip/arm-gic.h>
#include <asm/cacheflush.h>
#include <asm/smp_plat.h>
@@ -37,13 +36,6 @@ static DEFINE_SPINLOCK(boot_lock);
void __cpuinit versatile_secondary_init(unsigned int cpu)
{
/*
- * if any interrupts are already enabled for the primary
- * core (e.g. timer irq), then they will not have been enabled
- * for us: do so
- */
- gic_secondary_init(0);
-
- /*
* let the primary processor know we're out of the
* pen, then head off into the C entry point
*/
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index 8dc0605a9ce..99ce18915a8 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -239,7 +239,7 @@ static int __init xen_init_events(void)
xen_init_IRQ();
if (request_percpu_irq(xen_events_irq, xen_arm_callback,
- "events", xen_vcpu)) {
+ "events", &xen_vcpu)) {
pr_err("Error requesting IRQ %d\n", xen_events_irq);
return -EINVAL;
}
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index 0c3ba9f5137..f4726dc054b 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -136,8 +136,6 @@ void disable_debug_monitors(enum debug_el el)
*/
static void clear_os_lock(void *unused)
{
- asm volatile("msr mdscr_el1, %0" : : "r" (0));
- isb();
asm volatile("msr oslar_el1, %0" : : "r" (0));
isb();
}
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index b3c5f628bdb..671136e1a6f 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -317,14 +317,20 @@ asmlinkage long do_ni_syscall(struct pt_regs *regs)
*/
asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
{
+ siginfo_t info;
+ void __user *pc = (void __user *)instruction_pointer(regs);
console_verbose();
pr_crit("Bad mode in %s handler detected, code 0x%08x\n",
handler[reason], esr);
+ __show_regs(regs);
+
+ info.si_signo = SIGILL;
+ info.si_errno = 0;
+ info.si_code = ILL_ILLOPC;
+ info.si_addr = pc;
- die("Oops - bad mode", regs, 0);
- local_irq_disable();
- panic("bad mode");
+ arm64_notify_die("Oops - bad mode", regs, &info, 0);
}
void __pte_error(const char *file, int line, unsigned long val)
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index abe69b80cf7..48a386094fa 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -52,7 +52,7 @@ loop1:
add x2, x2, #4 // add 4 (line length offset)
mov x4, #0x3ff
and x4, x4, x1, lsr #3 // find maximum number on the way size
- clz x5, x4 // find bit position of way size increment
+ clz w5, w4 // find bit position of way size increment
mov x7, #0x7fff
and x7, x7, x1, lsr #13 // extract max number of the index size
loop2:
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index afadae6682e..0782eaf4913 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -148,6 +148,7 @@ void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs)
#define VM_FAULT_BADACCESS 0x020000
#define ESR_WRITE (1 << 6)
+#define ESR_CM (1 << 8)
#define ESR_LNX_EXEC (1 << 24)
/*
@@ -206,7 +207,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
struct task_struct *tsk;
struct mm_struct *mm;
int fault, sig, code;
- int write = esr & ESR_WRITE;
+ bool write = (esr & ESR_WRITE) && !(esr & ESR_CM);
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
(write ? FAULT_FLAG_WRITE : 0);
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index f1d8b9bbfda..a82ae886807 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -119,8 +119,7 @@ ENTRY(__cpu_setup)
mov x0, #3 << 20
msr cpacr_el1, x0 // Enable FP/ASIMD
- mov x0, #1
- msr oslar_el1, x0 // Set the debug OS lock
+ msr mdscr_el1, xzr // Reset mdscr_el1
tlbi vmalle1is // invalidate I + D TLBs
/*
* Memory region attributes for LPAE:
diff --git a/arch/avr32/configs/favr-32_defconfig b/arch/avr32/configs/favr-32_defconfig
index 0421498d666..97918204f79 100644
--- a/arch/avr32/configs/favr-32_defconfig
+++ b/arch/avr32/configs/favr-32_defconfig
@@ -122,7 +122,6 @@ CONFIG_USB_G_SERIAL=m
CONFIG_USB_CDC_COMPOSITE=m
CONFIG_MMC=y
CONFIG_MMC_ATMELMCI=y
-CONFIG_MMC_ATMELMCI_DMA=y
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
CONFIG_LEDS_ATMEL_PWM=m
diff --git a/arch/avr32/configs/merisc_defconfig b/arch/avr32/configs/merisc_defconfig
index 3befab96682..65de4431108 100644
--- a/arch/avr32/configs/merisc_defconfig
+++ b/arch/avr32/configs/merisc_defconfig
@@ -102,7 +102,6 @@ CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
CONFIG_MMC=y
CONFIG_MMC_ATMELMCI=y
-CONFIG_MMC_ATMELMCI_DMA=y
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
CONFIG_LEDS_ATMEL_PWM=y
diff --git a/arch/avr32/kernel/module.c b/arch/avr32/kernel/module.c
index 596f7305d93..2c941290802 100644
--- a/arch/avr32/kernel/module.c
+++ b/arch/avr32/kernel/module.c
@@ -264,7 +264,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
break;
case R_AVR32_GOT18SW:
if ((relocation & 0xfffe0003) != 0
- && (relocation & 0xfffc0003) != 0xffff0000)
+ && (relocation & 0xfffc0000) != 0xfffc0000)
return reloc_overflow(module, "R_AVR32_GOT18SW",
relocation);
relocation >>= 2;
diff --git a/arch/ia64/include/asm/futex.h b/arch/ia64/include/asm/futex.h
index d2bf1fd5e44..76acbcd5c06 100644
--- a/arch/ia64/include/asm/futex.h
+++ b/arch/ia64/include/asm/futex.h
@@ -106,16 +106,15 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
return -EFAULT;
{
- register unsigned long r8 __asm ("r8");
+ register unsigned long r8 __asm ("r8") = 0;
unsigned long prev;
__asm__ __volatile__(
" mf;; \n"
- " mov %0=r0 \n"
" mov ar.ccv=%4;; \n"
"[1:] cmpxchg4.acq %1=[%2],%3,ar.ccv \n"
" .xdata4 \"__ex_table\", 1b-., 2f-. \n"
"[2:]"
- : "=r" (r8), "=r" (prev)
+ : "+r" (r8), "=&r" (prev)
: "r" (uaddr), "r" (newval),
"rO" ((long) (unsigned) oldval)
: "memory");
diff --git a/arch/ia64/include/asm/mca.h b/arch/ia64/include/asm/mca.h
index 43f96ab18fa..8c709616871 100644
--- a/arch/ia64/include/asm/mca.h
+++ b/arch/ia64/include/asm/mca.h
@@ -143,6 +143,7 @@ extern unsigned long __per_cpu_mca[NR_CPUS];
extern int cpe_vector;
extern int ia64_cpe_irq;
extern void ia64_mca_init(void);
+extern void ia64_mca_irq_init(void);
extern void ia64_mca_cpu_init(void *);
extern void ia64_os_mca_dispatch(void);
extern void ia64_os_mca_dispatch_end(void);
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c
index ad69606613e..f2c41828113 100644
--- a/arch/ia64/kernel/irq.c
+++ b/arch/ia64/kernel/irq.c
@@ -23,6 +23,8 @@
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
+#include <asm/mca.h>
+
/*
* 'what should we do if we get a hw irq event on an illegal vector'.
* each architecture has to answer this themselves.
@@ -83,6 +85,12 @@ bool is_affinity_mask_valid(const struct cpumask *cpumask)
#endif /* CONFIG_SMP */
+int __init arch_early_irq_init(void)
+{
+ ia64_mca_irq_init();
+ return 0;
+}
+
#ifdef CONFIG_HOTPLUG_CPU
unsigned int vectors_in_migration[NR_IRQS];
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 65bf9cd3904..d7396dbb07b 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -2074,22 +2074,16 @@ ia64_mca_init(void)
printk(KERN_INFO "MCA related initialization done\n");
}
+
/*
- * ia64_mca_late_init
- *
- * Opportunity to setup things that require initialization later
- * than ia64_mca_init. Setup a timer to poll for CPEs if the
- * platform doesn't support an interrupt driven mechanism.
- *
- * Inputs : None
- * Outputs : Status
+ * These pieces cannot be done in ia64_mca_init() because it is called before
+ * early_irq_init() which would wipe out our percpu irq registrations. But we
+ * cannot leave them until ia64_mca_late_init() because by then all the other
+ * processors have been brought online and have set their own CMC vectors to
+ * point at a non-existant action. Called from arch_early_irq_init().
*/
-static int __init
-ia64_mca_late_init(void)
+void __init ia64_mca_irq_init(void)
{
- if (!mca_init)
- return 0;
-
/*
* Configure the CMCI/P vector and handler. Interrupts for CMC are
* per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c).
@@ -2108,6 +2102,23 @@ ia64_mca_late_init(void)
/* Setup the CPEI/P handler */
register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction);
#endif
+}
+
+/*
+ * ia64_mca_late_init
+ *
+ * Opportunity to setup things that require initialization later
+ * than ia64_mca_init. Setup a timer to poll for CPEs if the
+ * platform doesn't support an interrupt driven mechanism.
+ *
+ * Inputs : None
+ * Outputs : Status
+ */
+static int __init
+ia64_mca_late_init(void)
+{
+ if (!mca_init)
+ return 0;
register_hotcpu_notifier(&mca_cpu_notifier);
diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c
index 4332f7ee520..a7869f8f49a 100644
--- a/arch/ia64/kvm/vtlb.c
+++ b/arch/ia64/kvm/vtlb.c
@@ -256,7 +256,7 @@ u64 guest_vhpt_lookup(u64 iha, u64 *pte)
"srlz.d;;"
"ssm psr.i;;"
"srlz.d;;"
- : "=r"(ret) : "r"(iha), "r"(pte):"memory");
+ : "=&r"(ret) : "r"(iha), "r"(pte) : "memory");
return ret;
}
diff --git a/arch/m68k/kernel/head.S b/arch/m68k/kernel/head.S
index d197e7ff62c..ac85f16534a 100644
--- a/arch/m68k/kernel/head.S
+++ b/arch/m68k/kernel/head.S
@@ -2752,11 +2752,9 @@ func_return get_new_page
#ifdef CONFIG_MAC
L(scc_initable_mac):
- .byte 9,12 /* Reset */
.byte 4,0x44 /* x16, 1 stopbit, no parity */
.byte 3,0xc0 /* receiver: 8 bpc */
.byte 5,0xe2 /* transmitter: 8 bpc, assert dtr/rts */
- .byte 9,0 /* no interrupts */
.byte 10,0 /* NRZ */
.byte 11,0x50 /* use baud rate generator */
.byte 12,1,13,0 /* 38400 baud */
@@ -2899,6 +2897,7 @@ func_start serial_init,%d0/%d1/%a0/%a1
is_not_mac(L(serial_init_not_mac))
#ifdef SERIAL_DEBUG
+
/* You may define either or both of these. */
#define MAC_USE_SCC_A /* Modem port */
#define MAC_USE_SCC_B /* Printer port */
@@ -2908,9 +2907,21 @@ func_start serial_init,%d0/%d1/%a0/%a1
#define mac_scc_cha_b_data_offset 0x4
#define mac_scc_cha_a_data_offset 0x6
+#if defined(MAC_USE_SCC_A) || defined(MAC_USE_SCC_B)
+ movel %pc@(L(mac_sccbase)),%a0
+ /* Reset SCC device */
+ moveb #9,%a0@(mac_scc_cha_a_ctrl_offset)
+ moveb #0xc0,%a0@(mac_scc_cha_a_ctrl_offset)
+ /* Wait for 5 PCLK cycles, which is about 68 CPU cycles */
+ /* 5 / 3.6864 MHz = approx. 1.36 us = 68 / 50 MHz */
+ movel #35,%d0
+5:
+ subq #1,%d0
+ jne 5b
+#endif
+
#ifdef MAC_USE_SCC_A
/* Initialize channel A */
- movel %pc@(L(mac_sccbase)),%a0
lea %pc@(L(scc_initable_mac)),%a1
5: moveb %a1@+,%d0
jmi 6f
@@ -2922,9 +2933,6 @@ func_start serial_init,%d0/%d1/%a0/%a1
#ifdef MAC_USE_SCC_B
/* Initialize channel B */
-#ifndef MAC_USE_SCC_A /* Load mac_sccbase only if needed */
- movel %pc@(L(mac_sccbase)),%a0
-#endif /* MAC_USE_SCC_A */
lea %pc@(L(scc_initable_mac)),%a1
7: moveb %a1@+,%d0
jmi 8f
@@ -2933,6 +2941,7 @@ func_start serial_init,%d0/%d1/%a0/%a1
jra 7b
8:
#endif /* MAC_USE_SCC_B */
+
#endif /* SERIAL_DEBUG */
jra L(serial_init_done)
@@ -3006,17 +3015,17 @@ func_start serial_putc,%d0/%d1/%a0/%a1
#ifdef SERIAL_DEBUG
-#ifdef MAC_USE_SCC_A
+#if defined(MAC_USE_SCC_A) || defined(MAC_USE_SCC_B)
movel %pc@(L(mac_sccbase)),%a1
+#endif
+
+#ifdef MAC_USE_SCC_A
3: btst #2,%a1@(mac_scc_cha_a_ctrl_offset)
jeq 3b
moveb %d0,%a1@(mac_scc_cha_a_data_offset)
#endif /* MAC_USE_SCC_A */
#ifdef MAC_USE_SCC_B
-#ifndef MAC_USE_SCC_A /* Load mac_sccbase only if needed */
- movel %pc@(L(mac_sccbase)),%a1
-#endif /* MAC_USE_SCC_A */
4: btst #2,%a1@(mac_scc_cha_b_ctrl_offset)
jeq 4b
moveb %d0,%a1@(mac_scc_cha_b_data_offset)
diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile
index 113e2820650..197690068f8 100644
--- a/arch/parisc/Makefile
+++ b/arch/parisc/Makefile
@@ -23,26 +23,21 @@ NM = sh $(srctree)/arch/parisc/nm
CHECKFLAGS += -D__hppa__=1
LIBGCC = $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
-MACHINE := $(shell uname -m)
-ifeq ($(MACHINE),parisc*)
-NATIVE := 1
-endif
-
ifdef CONFIG_64BIT
UTS_MACHINE := parisc64
CHECKFLAGS += -D__LP64__=1 -m64
-WIDTH := 64
+CC_ARCHES = hppa64
else # 32-bit
-WIDTH :=
+CC_ARCHES = hppa hppa2.0 hppa1.1
endif
-# attempt to help out folks who are cross-compiling
-ifeq ($(NATIVE),1)
-CROSS_COMPILE := hppa$(WIDTH)-linux-
-else
- ifeq ($(CROSS_COMPILE),)
- CROSS_COMPILE := hppa$(WIDTH)-linux-gnu-
- endif
+ifneq ($(SUBARCH),$(UTS_MACHINE))
+ ifeq ($(CROSS_COMPILE),)
+ CC_SUFFIXES = linux linux-gnu unknown-linux-gnu
+ CROSS_COMPILE := $(call cc-cross-prefix, \
+ $(foreach a,$(CC_ARCHES), \
+ $(foreach s,$(CC_SUFFIXES),$(a)-$(s)-)))
+ endif
endif
OBJCOPY_FLAGS =-O binary -R .note -R .comment -S
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index f33201bf897..897bce412c5 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -444,9 +444,41 @@
L2_ptep \pgd,\pte,\index,\va,\fault
.endm
+ /* Acquire pa_dbit_lock lock. */
+ .macro dbit_lock spc,tmp,tmp1
+#ifdef CONFIG_SMP
+ cmpib,COND(=),n 0,\spc,2f
+ load32 PA(pa_dbit_lock),\tmp
+1: LDCW 0(\tmp),\tmp1
+ cmpib,COND(=) 0,\tmp1,1b
+ nop
+2:
+#endif
+ .endm
+
+ /* Release pa_dbit_lock lock without reloading lock address. */
+ .macro dbit_unlock0 spc,tmp
+#ifdef CONFIG_SMP
+ or,COND(=) %r0,\spc,%r0
+ stw \spc,0(\tmp)
+#endif
+ .endm
+
+ /* Release pa_dbit_lock lock. */
+ .macro dbit_unlock1 spc,tmp
+#ifdef CONFIG_SMP
+ load32 PA(pa_dbit_lock),\tmp
+ dbit_unlock0 \spc,\tmp
+#endif
+ .endm
+
/* Set the _PAGE_ACCESSED bit of the PTE. Be clever and
* don't needlessly dirty the cache line if it was already set */
- .macro update_ptep ptep,pte,tmp,tmp1
+ .macro update_ptep spc,ptep,pte,tmp,tmp1
+#ifdef CONFIG_SMP
+ or,COND(=) %r0,\spc,%r0
+ LDREG 0(\ptep),\pte
+#endif
ldi _PAGE_ACCESSED,\tmp1
or \tmp1,\pte,\tmp
and,COND(<>) \tmp1,\pte,%r0
@@ -455,7 +487,11 @@
/* Set the dirty bit (and accessed bit). No need to be
* clever, this is only used from the dirty fault */
- .macro update_dirty ptep,pte,tmp
+ .macro update_dirty spc,ptep,pte,tmp
+#ifdef CONFIG_SMP
+ or,COND(=) %r0,\spc,%r0
+ LDREG 0(\ptep),\pte
+#endif
ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp
or \tmp,\pte,\pte
STREG \pte,0(\ptep)
@@ -825,11 +861,6 @@ ENTRY(syscall_exit_rfi)
STREG %r19,PT_SR7(%r16)
intr_return:
- /* NOTE: Need to enable interrupts incase we schedule. */
- ssm PSW_SM_I, %r0
-
-intr_check_resched:
-
/* check for reschedule */
mfctl %cr30,%r1
LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */
@@ -856,6 +887,11 @@ intr_check_sig:
LDREG PT_IASQ1(%r16), %r20
cmpib,COND(=),n 0,%r20,intr_restore /* backward */
+ /* NOTE: We need to enable interrupts if we have to deliver
+ * signals. We used to do this earlier but it caused kernel
+ * stack overflows. */
+ ssm PSW_SM_I, %r0
+
copy %r0, %r25 /* long in_syscall = 0 */
#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
@@ -907,6 +943,10 @@ intr_do_resched:
cmpib,COND(=) 0, %r20, intr_do_preempt
nop
+ /* NOTE: We need to enable interrupts if we schedule. We used
+ * to do this earlier but it caused kernel stack overflows. */
+ ssm PSW_SM_I, %r0
+
#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
#endif
@@ -1099,11 +1139,13 @@ dtlb_miss_20w:
L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w
- update_ptep ptp,pte,t0,t1
+ dbit_lock spc,t0,t1
+ update_ptep spc,ptp,pte,t0,t1
make_insert_tlb spc,pte,prot
idtlbt pte,prot
+ dbit_unlock1 spc,t0
rfir
nop
@@ -1123,11 +1165,13 @@ nadtlb_miss_20w:
L3_ptep ptp,pte,t0,va,nadtlb_check_alias_20w
- update_ptep ptp,pte,t0,t1
+ dbit_lock spc,t0,t1
+ update_ptep spc,ptp,pte,t0,t1
make_insert_tlb spc,pte,prot
idtlbt pte,prot
+ dbit_unlock1 spc,t0
rfir
nop
@@ -1149,7 +1193,8 @@ dtlb_miss_11:
L2_ptep ptp,pte,t0,va,dtlb_check_alias_11
- update_ptep ptp,pte,t0,t1
+ dbit_lock spc,t0,t1
+ update_ptep spc,ptp,pte,t0,t1
make_insert_tlb_11 spc,pte,prot
@@ -1160,6 +1205,7 @@ dtlb_miss_11:
idtlbp prot,(%sr1,va)
mtsp t0, %sr1 /* Restore sr1 */
+ dbit_unlock1 spc,t0
rfir
nop
@@ -1180,7 +1226,8 @@ nadtlb_miss_11:
L2_ptep ptp,pte,t0,va,nadtlb_check_alias_11
- update_ptep ptp,pte,t0,t1
+ dbit_lock spc,t0,t1
+ update_ptep spc,ptp,pte,t0,t1
make_insert_tlb_11 spc,pte,prot
@@ -1192,6 +1239,7 @@ nadtlb_miss_11:
idtlbp prot,(%sr1,va)
mtsp t0, %sr1 /* Restore sr1 */
+ dbit_unlock1 spc,t0
rfir
nop
@@ -1212,13 +1260,15 @@ dtlb_miss_20:
L2_ptep ptp,pte,t0,va,dtlb_check_alias_20
- update_ptep ptp,pte,t0,t1
+ dbit_lock spc,t0,t1
+ update_ptep spc,ptp,pte,t0,t1
make_insert_tlb spc,pte,prot
f_extend pte,t0
idtlbt pte,prot
+ dbit_unlock1 spc,t0
rfir
nop
@@ -1238,13 +1288,15 @@ nadtlb_miss_20:
L2_ptep ptp,pte,t0,va,nadtlb_check_alias_20
- update_ptep ptp,pte,t0,t1
+ dbit_lock spc,t0,t1
+ update_ptep spc,ptp,pte,t0,t1
make_insert_tlb spc,pte,prot
f_extend pte,t0
idtlbt pte,prot
+ dbit_unlock1 spc,t0
rfir
nop
@@ -1345,11 +1397,13 @@ itlb_miss_20w:
L3_ptep ptp,pte,t0,va,itlb_fault
- update_ptep ptp,pte,t0,t1
+ dbit_lock spc,t0,t1
+ update_ptep spc,ptp,pte,t0,t1
make_insert_tlb spc,pte,prot
iitlbt pte,prot
+ dbit_unlock1 spc,t0
rfir
nop
@@ -1367,11 +1421,13 @@ naitlb_miss_20w:
L3_ptep ptp,pte,t0,va,naitlb_check_alias_20w
- update_ptep ptp,pte,t0,t1
+ dbit_lock spc,t0,t1
+ update_ptep spc,ptp,pte,t0,t1
make_insert_tlb spc,pte,prot
iitlbt pte,prot
+ dbit_unlock1 spc,t0
rfir
nop
@@ -1393,7 +1449,8 @@ itlb_miss_11:
L2_ptep ptp,pte,t0,va,itlb_fault
- update_ptep ptp,pte,t0,t1
+ dbit_lock spc,t0,t1
+ update_ptep spc,ptp,pte,t0,t1
make_insert_tlb_11 spc,pte,prot
@@ -1404,6 +1461,7 @@ itlb_miss_11:
iitlbp prot,(%sr1,va)
mtsp t0, %sr1 /* Restore sr1 */
+ dbit_unlock1 spc,t0
rfir
nop
@@ -1415,7 +1473,8 @@ naitlb_miss_11:
L2_ptep ptp,pte,t0,va,naitlb_check_alias_11
- update_ptep ptp,pte,t0,t1
+ dbit_lock spc,t0,t1
+ update_ptep spc,ptp,pte,t0,t1
make_insert_tlb_11 spc,pte,prot
@@ -1426,6 +1485,7 @@ naitlb_miss_11:
iitlbp prot,(%sr1,va)
mtsp t0, %sr1 /* Restore sr1 */
+ dbit_unlock1 spc,t0
rfir
nop
@@ -1447,13 +1507,15 @@ itlb_miss_20:
L2_ptep ptp,pte,t0,va,itlb_fault
- update_ptep ptp,pte,t0,t1
+ dbit_lock spc,t0,t1
+ update_ptep spc,ptp,pte,t0,t1
make_insert_tlb spc,pte,prot
f_extend pte,t0
iitlbt pte,prot
+ dbit_unlock1 spc,t0
rfir
nop
@@ -1465,13 +1527,15 @@ naitlb_miss_20:
L2_ptep ptp,pte,t0,va,naitlb_check_alias_20
- update_ptep ptp,pte,t0,t1
+ dbit_lock spc,t0,t1
+ update_ptep spc,ptp,pte,t0,t1
make_insert_tlb spc,pte,prot
f_extend pte,t0
iitlbt pte,prot
+ dbit_unlock1 spc,t0
rfir
nop
@@ -1495,29 +1559,13 @@ dbit_trap_20w:
L3_ptep ptp,pte,t0,va,dbit_fault
-#ifdef CONFIG_SMP
- cmpib,COND(=),n 0,spc,dbit_nolock_20w
- load32 PA(pa_dbit_lock),t0
-
-dbit_spin_20w:
- LDCW 0(t0),t1
- cmpib,COND(=) 0,t1,dbit_spin_20w
- nop
-
-dbit_nolock_20w:
-#endif
- update_dirty ptp,pte,t1
+ dbit_lock spc,t0,t1
+ update_dirty spc,ptp,pte,t1
make_insert_tlb spc,pte,prot
idtlbt pte,prot
-#ifdef CONFIG_SMP
- cmpib,COND(=),n 0,spc,dbit_nounlock_20w
- ldi 1,t1
- stw t1,0(t0)
-
-dbit_nounlock_20w:
-#endif
+ dbit_unlock0 spc,t0
rfir
nop
@@ -1531,18 +1579,8 @@ dbit_trap_11:
L2_ptep ptp,pte,t0,va,dbit_fault
-#ifdef CONFIG_SMP
- cmpib,COND(=),n 0,spc,dbit_nolock_11
- load32 PA(pa_dbit_lock),t0
-
-dbit_spin_11:
- LDCW 0(t0),t1
- cmpib,= 0,t1,dbit_spin_11
- nop
-
-dbit_nolock_11:
-#endif
- update_dirty ptp,pte,t1
+ dbit_lock spc,t0,t1
+ update_dirty spc,ptp,pte,t1
make_insert_tlb_11 spc,pte,prot
@@ -1553,13 +1591,7 @@ dbit_nolock_11:
idtlbp prot,(%sr1,va)
mtsp t1, %sr1 /* Restore sr1 */
-#ifdef CONFIG_SMP
- cmpib,COND(=),n 0,spc,dbit_nounlock_11
- ldi 1,t1
- stw t1,0(t0)
-
-dbit_nounlock_11:
-#endif
+ dbit_unlock0 spc,t0
rfir
nop
@@ -1571,32 +1603,15 @@ dbit_trap_20:
L2_ptep ptp,pte,t0,va,dbit_fault
-#ifdef CONFIG_SMP
- cmpib,COND(=),n 0,spc,dbit_nolock_20
- load32 PA(pa_dbit_lock),t0
-
-dbit_spin_20:
- LDCW 0(t0),t1
- cmpib,= 0,t1,dbit_spin_20
- nop
-
-dbit_nolock_20:
-#endif
- update_dirty ptp,pte,t1
+ dbit_lock spc,t0,t1
+ update_dirty spc,ptp,pte,t1
make_insert_tlb spc,pte,prot
f_extend pte,t1
idtlbt pte,prot
-
-#ifdef CONFIG_SMP
- cmpib,COND(=),n 0,spc,dbit_nounlock_20
- ldi 1,t1
- stw t1,0(t0)
-
-dbit_nounlock_20:
-#endif
+ dbit_unlock0 spc,t0
rfir
nop
@@ -1694,7 +1709,8 @@ ENTRY(sys_\name\()_wrapper)
ldo TASK_REGS(%r1),%r1
reg_save %r1
mfctl %cr27, %r28
- b sys_\name
+ ldil L%sys_\name, %r31
+ be R%sys_\name(%sr4,%r31)
STREG %r28, PT_CR27(%r1)
ENDPROC(sys_\name\()_wrapper)
.endm
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index fb3245e928e..6e11f78ebc7 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -175,6 +175,7 @@ extern const char *powerpc_base_platform;
#define CPU_FTR_BCTAR LONG_ASM_CONST(0x0100000000000000)
#define CPU_FTR_HAS_PPR LONG_ASM_CONST(0x0200000000000000)
#define CPU_FTR_DAWR LONG_ASM_CONST(0x0400000000000000)
+#define CPU_FTR_DABRX LONG_ASM_CONST(0x0800000000000000)
#ifndef __ASSEMBLY__
@@ -391,19 +392,20 @@ extern const char *powerpc_base_platform;
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_201 | \
CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA | \
CPU_FTR_CP_USE_DCBTZ | CPU_FTR_STCX_CHECKS_ADDRESS | \
- CPU_FTR_HVMODE)
+ CPU_FTR_HVMODE | CPU_FTR_DABRX)
#define CPU_FTRS_POWER5 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_MMCRA | CPU_FTR_SMT | \
CPU_FTR_COHERENT_ICACHE | CPU_FTR_PURR | \
- CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB)
+ CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_DABRX)
#define CPU_FTRS_POWER6 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_MMCRA | CPU_FTR_SMT | \
CPU_FTR_COHERENT_ICACHE | \
CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
CPU_FTR_DSCR | CPU_FTR_UNALIGNED_LD_STD | \
- CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_CFAR)
+ CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_CFAR | \
+ CPU_FTR_DABRX)
#define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\
CPU_FTR_MMCRA | CPU_FTR_SMT | \
@@ -412,7 +414,7 @@ extern const char *powerpc_base_platform;
CPU_FTR_DSCR | CPU_FTR_SAO | CPU_FTR_ASYM_SMT | \
CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE | \
- CPU_FTR_VMX_COPY | CPU_FTR_HAS_PPR)
+ CPU_FTR_VMX_COPY | CPU_FTR_HAS_PPR | CPU_FTR_DABRX)
#define CPU_FTRS_POWER8 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\
CPU_FTR_MMCRA | CPU_FTR_SMT | \
@@ -427,14 +429,15 @@ extern const char *powerpc_base_platform;
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
CPU_FTR_PAUSE_ZERO | CPU_FTR_CELL_TB_BUG | CPU_FTR_CP_USE_DCBTZ | \
- CPU_FTR_UNALIGNED_LD_STD)
+ CPU_FTR_UNALIGNED_LD_STD | CPU_FTR_DABRX)
#define CPU_FTRS_PA6T (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP | \
- CPU_FTR_PURR | CPU_FTR_REAL_LE)
+ CPU_FTR_PURR | CPU_FTR_REAL_LE | CPU_FTR_DABRX)
#define CPU_FTRS_COMPATIBLE (CPU_FTR_USE_TB | CPU_FTR_PPCAS_ARCH_V2)
#define CPU_FTRS_A2 (CPU_FTR_USE_TB | CPU_FTR_SMT | CPU_FTR_DBELL | \
- CPU_FTR_NOEXECUTE | CPU_FTR_NODSISRALIGN | CPU_FTR_ICSWX)
+ CPU_FTR_NOEXECUTE | CPU_FTR_NODSISRALIGN | \
+ CPU_FTR_ICSWX | CPU_FTR_DABRX )
#ifdef __powerpc64__
#ifdef CONFIG_PPC_BOOK3E
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index 3d6b4100dac..ccfe2c83e7a 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -29,6 +29,7 @@ struct rtc_time;
struct file;
struct pci_controller;
struct kimage;
+struct pci_host_bridge;
struct machdep_calls {
char *name;
@@ -107,6 +108,8 @@ struct machdep_calls {
void (*pcibios_fixup)(void);
int (*pci_probe_mode)(struct pci_bus *);
void (*pci_irq_fixup)(struct pci_dev *dev);
+ int (*pcibios_root_bridge_prepare)(struct pci_host_bridge
+ *bridge);
/* To setup PHBs when using automatic OF platform driver for PCI */
int (*pci_setup_phb)(struct pci_controller *host);
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h
index 025a130729b..bb21f5a2ad4 100644
--- a/arch/powerpc/include/asm/pci-bridge.h
+++ b/arch/powerpc/include/asm/pci-bridge.h
@@ -154,6 +154,8 @@ struct pci_dn {
int pci_ext_config_space; /* for pci devices */
+ int force_32bit_msi:1;
+
struct pci_dev *pcidev; /* back-pointer to the pci device */
#ifdef CONFIG_EEH
struct eeh_dev *edev; /* eeh device */
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 8752bc8e34a..8cbc6e54e4c 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -113,6 +113,10 @@
#define PPC_INST_MFSPR_DSCR_MASK 0xfc1fffff
#define PPC_INST_MTSPR_DSCR 0x7c1103a6
#define PPC_INST_MTSPR_DSCR_MASK 0xfc1fffff
+#define PPC_INST_MFSPR_DSCR_USER 0x7c0302a6
+#define PPC_INST_MFSPR_DSCR_USER_MASK 0xfc1fffff
+#define PPC_INST_MTSPR_DSCR_USER 0x7c0303a6
+#define PPC_INST_MTSPR_DSCR_USER_MASK 0xfc1fffff
#define PPC_INST_SLBFEE 0x7c0007a7
#define PPC_INST_STRING 0x7c00042a
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 7ff9eaa3ea6..a7b42ca96ee 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -407,21 +407,16 @@ static inline void prefetchw(const void *x)
#endif
#ifdef CONFIG_PPC64
-static inline unsigned long get_clean_sp(struct pt_regs *regs, int is_32)
+static inline unsigned long get_clean_sp(unsigned long sp, int is_32)
{
- unsigned long sp;
-
if (is_32)
- sp = regs->gpr[1] & 0x0ffffffffUL;
- else
- sp = regs->gpr[1];
-
+ return sp & 0x0ffffffffUL;
return sp;
}
#else
-static inline unsigned long get_clean_sp(struct pt_regs *regs, int is_32)
+static inline unsigned long get_clean_sp(unsigned long sp, int is_32)
{
- return regs->gpr[1];
+ return sp;
}
#endif
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index c9c67fc888c..3b097a84a63 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -111,17 +111,6 @@
#define MSR_TM_TRANSACTIONAL(x) (((x) & MSR_TS_MASK) == MSR_TS_T)
#define MSR_TM_SUSPENDED(x) (((x) & MSR_TS_MASK) == MSR_TS_S)
-/* Reason codes describing kernel causes for transaction aborts. By
- convention, bit0 is copied to TEXASR[56] (IBM bit 7) which is set if
- the failure is persistent.
-*/
-#define TM_CAUSE_RESCHED 0xfe
-#define TM_CAUSE_TLBI 0xfc
-#define TM_CAUSE_FAC_UNAV 0xfa
-#define TM_CAUSE_SYSCALL 0xf9 /* Persistent */
-#define TM_CAUSE_MISC 0xf6
-#define TM_CAUSE_SIGNAL 0xf4
-
#if defined(CONFIG_PPC_BOOK3S_64)
#define MSR_64BIT MSR_SF
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
index aef00c67590..ee38f29ef3e 100644
--- a/arch/powerpc/include/asm/rtas.h
+++ b/arch/powerpc/include/asm/rtas.h
@@ -262,6 +262,8 @@ extern void rtas_progress(char *s, unsigned short hex);
extern void rtas_initialize(void);
extern int rtas_suspend_cpu(struct rtas_suspend_me_data *data);
extern int rtas_suspend_last_cpu(struct rtas_suspend_me_data *data);
+extern int rtas_online_cpus_mask(cpumask_var_t cpus);
+extern int rtas_offline_cpus_mask(cpumask_var_t cpus);
extern int rtas_ibm_suspend_me(struct rtas_args *);
struct rtc_time;
diff --git a/arch/powerpc/include/asm/signal.h b/arch/powerpc/include/asm/signal.h
index fbe66c46389..9322c28aebd 100644
--- a/arch/powerpc/include/asm/signal.h
+++ b/arch/powerpc/include/asm/signal.h
@@ -3,5 +3,8 @@
#define __ARCH_HAS_SA_RESTORER
#include <uapi/asm/signal.h>
+#include <uapi/asm/ptrace.h>
+
+extern unsigned long get_tm_stackpointer(struct pt_regs *regs);
#endif /* _ASM_POWERPC_SIGNAL_H */
diff --git a/arch/powerpc/include/asm/tm.h b/arch/powerpc/include/asm/tm.h
index 4b4449abf3f..9dfbc34bdbf 100644
--- a/arch/powerpc/include/asm/tm.h
+++ b/arch/powerpc/include/asm/tm.h
@@ -5,6 +5,8 @@
* Copyright 2012 Matt Evans & Michael Neuling, IBM Corporation.
*/
+#include <uapi/asm/tm.h>
+
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
extern void do_load_up_transact_fpu(struct thread_struct *thread);
extern void do_load_up_transact_altivec(struct thread_struct *thread);
diff --git a/arch/powerpc/include/uapi/asm/Kbuild b/arch/powerpc/include/uapi/asm/Kbuild
index f7bca637074..5182c8622b5 100644
--- a/arch/powerpc/include/uapi/asm/Kbuild
+++ b/arch/powerpc/include/uapi/asm/Kbuild
@@ -40,6 +40,7 @@ header-y += statfs.h
header-y += swab.h
header-y += termbits.h
header-y += termios.h
+header-y += tm.h
header-y += types.h
header-y += ucontext.h
header-y += unistd.h
diff --git a/arch/powerpc/include/uapi/asm/tm.h b/arch/powerpc/include/uapi/asm/tm.h
new file mode 100644
index 00000000000..85059a00f56
--- /dev/null
+++ b/arch/powerpc/include/uapi/asm/tm.h
@@ -0,0 +1,18 @@
+#ifndef _ASM_POWERPC_TM_H
+#define _ASM_POWERPC_TM_H
+
+/* Reason codes describing kernel causes for transaction aborts. By
+ * convention, bit0 is copied to TEXASR[56] (IBM bit 7) which is set if
+ * the failure is persistent. PAPR saves 0xff-0xe0 for the hypervisor.
+ */
+#define TM_CAUSE_PERSISTENT 0x01
+#define TM_CAUSE_RESCHED 0xde
+#define TM_CAUSE_TLBI 0xdc
+#define TM_CAUSE_FAC_UNAV 0xda
+#define TM_CAUSE_SYSCALL 0xd8 /* future use */
+#define TM_CAUSE_MISC 0xd6 /* future use */
+#define TM_CAUSE_SIGNAL 0xd4
+#define TM_CAUSE_ALIGNMENT 0xd2
+#define TM_CAUSE_EMULATE 0xd0
+
+#endif
diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S
index ea847abb0d0..116700886b3 100644
--- a/arch/powerpc/kernel/cpu_setup_power.S
+++ b/arch/powerpc/kernel/cpu_setup_power.S
@@ -66,6 +66,7 @@ _GLOBAL(__restore_cpu_power8)
bl __init_FSCR
mfmsr r3
rldicl. r0,r3,4,63
+ mtlr r11
beqlr
li r0,0
mtspr SPRN_LPID,r0
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index e514de57a12..4498467ac38 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -851,7 +851,7 @@ resume_kernel:
/* check current_thread_info, _TIF_EMULATE_STACK_STORE */
CURRENT_THREAD_INFO(r9, r1)
lwz r8,TI_FLAGS(r9)
- andis. r8,r8,_TIF_EMULATE_STACK_STORE@h
+ andis. r0,r8,_TIF_EMULATE_STACK_STORE@h
beq+ 1f
addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 56bd92362ce..3bbe7edf639 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -797,7 +797,7 @@ hardware_interrupt_relon_hv:
_MASKABLE_RELON_EXCEPTION_PSERIES(0x502, hardware_interrupt, EXC_HV, SOFTEN_TEST_HV)
FTR_SECTION_ELSE
_MASKABLE_RELON_EXCEPTION_PSERIES(0x500, hardware_interrupt, EXC_STD, SOFTEN_TEST_PR)
- ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_206)
+ ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
STD_RELON_EXCEPTION_PSERIES(0x4600, 0x600, alignment)
STD_RELON_EXCEPTION_PSERIES(0x4700, 0x700, program_check)
STD_RELON_EXCEPTION_PSERIES(0x4800, 0x800, fp_unavailable)
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 0886ae6dd5b..b61363d557b 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -509,6 +509,7 @@ _GLOBAL(copy_and_flush)
sync
addi r5,r5,8
addi r6,r6,8
+ isync
blr
.align 8
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c
index 466a2908bb6..611acdf3009 100644
--- a/arch/powerpc/kernel/machine_kexec_64.c
+++ b/arch/powerpc/kernel/machine_kexec_64.c
@@ -17,6 +17,7 @@
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/cpu.h>
+#include <linux/hardirq.h>
#include <asm/page.h>
#include <asm/current.h>
@@ -335,10 +336,13 @@ void default_machine_kexec(struct kimage *image)
pr_debug("kexec: Starting switchover sequence.\n");
/* switch to a staticly allocated stack. Based on irq stack code.
+ * We setup preempt_count to avoid using VMX in memcpy.
* XXX: the task struct will likely be invalid once we do the copy!
*/
kexec_stack.thread_info.task = current_thread_info()->task;
kexec_stack.thread_info.flags = 0;
+ kexec_stack.thread_info.preempt_count = HARDIRQ_OFFSET;
+ kexec_stack.thread_info.cpu = current_thread_info()->cpu;
/* We need a static PACA, too; copy this CPU's PACA over and switch to
* it. Also poison per_cpu_offset to catch anyone using non-static
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index fa12ae42d98..9b315e5b1d9 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -30,6 +30,7 @@
#include <linux/irq.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
+#include <linux/vgaarb.h>
#include <asm/processor.h>
#include <asm/io.h>
@@ -844,6 +845,14 @@ int pci_proc_domain(struct pci_bus *bus)
return 1;
}
+int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
+{
+ if (ppc_md.pcibios_root_bridge_prepare)
+ return ppc_md.pcibios_root_bridge_prepare(bridge);
+
+ return 0;
+}
+
/* This header fixup will do the resource fixup for all devices as they are
* probed, but not for bridge ranges
*/
@@ -1725,3 +1734,15 @@ static void fixup_hide_host_resource_fsl(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MOTOROLA, PCI_ANY_ID, fixup_hide_host_resource_fsl);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, fixup_hide_host_resource_fsl);
+
+static void fixup_vga(struct pci_dev *pdev)
+{
+ u16 cmd;
+
+ pci_read_config_word(pdev, PCI_COMMAND, &cmd);
+ if ((cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) || !vga_default_device())
+ vga_set_default_device(pdev);
+
+}
+DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_DISPLAY_VGA, 8, fixup_vga);
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 16e77a81ab4..9600c36f73b 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -392,7 +392,8 @@ static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
{
mtspr(SPRN_DABR, dabr);
- mtspr(SPRN_DABRX, dabrx);
+ if (cpu_has_feature(CPU_FTR_DABRX))
+ mtspr(SPRN_DABRX, dabrx);
return 0;
}
#else
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 1fd6e7b2f39..52add6f3e20 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -19,6 +19,7 @@
#include <linux/init.h>
#include <linux/capability.h>
#include <linux/delay.h>
+#include <linux/cpu.h>
#include <linux/smp.h>
#include <linux/completion.h>
#include <linux/cpumask.h>
@@ -807,6 +808,95 @@ static void rtas_percpu_suspend_me(void *info)
__rtas_suspend_cpu((struct rtas_suspend_me_data *)info, 1);
}
+enum rtas_cpu_state {
+ DOWN,
+ UP,
+};
+
+#ifndef CONFIG_SMP
+static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
+ cpumask_var_t cpus)
+{
+ if (!cpumask_empty(cpus)) {
+ cpumask_clear(cpus);
+ return -EINVAL;
+ } else
+ return 0;
+}
+#else
+/* On return cpumask will be altered to indicate CPUs changed.
+ * CPUs with states changed will be set in the mask,
+ * CPUs with status unchanged will be unset in the mask. */
+static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
+ cpumask_var_t cpus)
+{
+ int cpu;
+ int cpuret = 0;
+ int ret = 0;
+
+ if (cpumask_empty(cpus))
+ return 0;
+
+ for_each_cpu(cpu, cpus) {
+ switch (state) {
+ case DOWN:
+ cpuret = cpu_down(cpu);
+ break;
+ case UP:
+ cpuret = cpu_up(cpu);
+ break;
+ }
+ if (cpuret) {
+ pr_debug("%s: cpu_%s for cpu#%d returned %d.\n",
+ __func__,
+ ((state == UP) ? "up" : "down"),
+ cpu, cpuret);
+ if (!ret)
+ ret = cpuret;
+ if (state == UP) {
+ /* clear bits for unchanged cpus, return */
+ cpumask_shift_right(cpus, cpus, cpu);
+ cpumask_shift_left(cpus, cpus, cpu);
+ break;
+ } else {
+ /* clear bit for unchanged cpu, continue */
+ cpumask_clear_cpu(cpu, cpus);
+ }
+ }
+ }
+
+ return ret;
+}
+#endif
+
+int rtas_online_cpus_mask(cpumask_var_t cpus)
+{
+ int ret;
+
+ ret = rtas_cpu_state_change_mask(UP, cpus);
+
+ if (ret) {
+ cpumask_var_t tmp_mask;
+
+ if (!alloc_cpumask_var(&tmp_mask, GFP_TEMPORARY))
+ return ret;
+
+ /* Use tmp_mask to preserve cpus mask from first failure */
+ cpumask_copy(tmp_mask, cpus);
+ rtas_offline_cpus_mask(tmp_mask);
+ free_cpumask_var(tmp_mask);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(rtas_online_cpus_mask);
+
+int rtas_offline_cpus_mask(cpumask_var_t cpus)
+{
+ return rtas_cpu_state_change_mask(DOWN, cpus);
+}
+EXPORT_SYMBOL(rtas_offline_cpus_mask);
+
int rtas_ibm_suspend_me(struct rtas_args *args)
{
long state;
@@ -814,6 +904,8 @@ int rtas_ibm_suspend_me(struct rtas_args *args)
unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
struct rtas_suspend_me_data data;
DECLARE_COMPLETION_ONSTACK(done);
+ cpumask_var_t offline_mask;
+ int cpuret;
if (!rtas_service_present("ibm,suspend-me"))
return -ENOSYS;
@@ -837,11 +929,24 @@ int rtas_ibm_suspend_me(struct rtas_args *args)
return 0;
}
+ if (!alloc_cpumask_var(&offline_mask, GFP_TEMPORARY))
+ return -ENOMEM;
+
atomic_set(&data.working, 0);
atomic_set(&data.done, 0);
atomic_set(&data.error, 0);
data.token = rtas_token("ibm,suspend-me");
data.complete = &done;
+
+ /* All present CPUs must be online */
+ cpumask_andnot(offline_mask, cpu_present_mask, cpu_online_mask);
+ cpuret = rtas_online_cpus_mask(offline_mask);
+ if (cpuret) {
+ pr_err("%s: Could not bring present CPUs online.\n", __func__);
+ atomic_set(&data.error, cpuret);
+ goto out;
+ }
+
stop_topology_update();
/* Call function on all CPUs. One of us will make the
@@ -857,6 +962,14 @@ int rtas_ibm_suspend_me(struct rtas_args *args)
start_topology_update();
+ /* Take down CPUs not online prior to suspend */
+ cpuret = rtas_offline_cpus_mask(offline_mask);
+ if (cpuret)
+ pr_warn("%s: Could not restore CPUs to offline state.\n",
+ __func__);
+
+out:
+ free_cpumask_var(offline_mask);
return atomic_read(&data.error);
}
#else /* CONFIG_PPC_PSERIES */
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
index cf12eae02de..78760b81350 100644
--- a/arch/powerpc/kernel/signal.c
+++ b/arch/powerpc/kernel/signal.c
@@ -17,6 +17,7 @@
#include <asm/uaccess.h>
#include <asm/unistd.h>
#include <asm/debug.h>
+#include <asm/tm.h>
#include "signal.h"
@@ -29,13 +30,13 @@ int show_unhandled_signals = 0;
/*
* Allocate space for the signal frame
*/
-void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
+void __user * get_sigframe(struct k_sigaction *ka, unsigned long sp,
size_t frame_size, int is_32)
{
unsigned long oldsp, newsp;
/* Default to using normal stack */
- oldsp = get_clean_sp(regs, is_32);
+ oldsp = get_clean_sp(sp, is_32);
/* Check for alt stack */
if ((ka->sa.sa_flags & SA_ONSTACK) &&
@@ -170,3 +171,38 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
tracehook_notify_resume(regs);
}
}
+
+unsigned long get_tm_stackpointer(struct pt_regs *regs)
+{
+ /* When in an active transaction that takes a signal, we need to be
+ * careful with the stack. It's possible that the stack has moved back
+ * up after the tbegin. The obvious case here is when the tbegin is
+ * called inside a function that returns before a tend. In this case,
+ * the stack is part of the checkpointed transactional memory state.
+ * If we write over this non transactionally or in suspend, we are in
+ * trouble because if we get a tm abort, the program counter and stack
+ * pointer will be back at the tbegin but our in memory stack won't be
+ * valid anymore.
+ *
+ * To avoid this, when taking a signal in an active transaction, we
+ * need to use the stack pointer from the checkpointed state, rather
+ * than the speculated state. This ensures that the signal context
+ * (written tm suspended) will be written below the stack required for
+ * the rollback. The transaction is aborted becuase of the treclaim,
+ * so any memory written between the tbegin and the signal will be
+ * rolled back anyway.
+ *
+ * For signals taken in non-TM or suspended mode, we use the
+ * normal/non-checkpointed stack pointer.
+ */
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ if (MSR_TM_ACTIVE(regs->msr)) {
+ tm_enable();
+ tm_reclaim(&current->thread, regs->msr, TM_CAUSE_SIGNAL);
+ if (MSR_TM_TRANSACTIONAL(regs->msr))
+ return current->thread.ckpt_regs.gpr[1];
+ }
+#endif
+ return regs->gpr[1];
+}
diff --git a/arch/powerpc/kernel/signal.h b/arch/powerpc/kernel/signal.h
index ec84c901cea..c69b9aeb9f2 100644
--- a/arch/powerpc/kernel/signal.h
+++ b/arch/powerpc/kernel/signal.h
@@ -12,7 +12,7 @@
extern void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags);
-extern void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
+extern void __user * get_sigframe(struct k_sigaction *ka, unsigned long sp,
size_t frame_size, int is_32);
extern int handle_signal32(unsigned long sig, struct k_sigaction *ka,
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 95068bf569a..201385c3a1a 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -503,12 +503,6 @@ static int save_tm_user_regs(struct pt_regs *regs,
{
unsigned long msr = regs->msr;
- /* tm_reclaim rolls back all reg states, updating thread.ckpt_regs,
- * thread.transact_fpr[], thread.transact_vr[], etc.
- */
- tm_enable();
- tm_reclaim(&current->thread, msr, TM_CAUSE_SIGNAL);
-
/* Make sure floating point registers are stored in regs */
flush_fp_to_thread(current);
@@ -965,7 +959,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
/* Set up Signal Frame */
/* Put a Real Time Context onto stack */
- rt_sf = get_sigframe(ka, regs, sizeof(*rt_sf), 1);
+ rt_sf = get_sigframe(ka, get_tm_stackpointer(regs), sizeof(*rt_sf), 1);
addr = rt_sf;
if (unlikely(rt_sf == NULL))
goto badframe;
@@ -1403,7 +1397,7 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka,
unsigned long tramp;
/* Set up Signal Frame */
- frame = get_sigframe(ka, regs, sizeof(*frame), 1);
+ frame = get_sigframe(ka, get_tm_stackpointer(regs), sizeof(*frame), 1);
if (unlikely(frame == NULL))
goto badframe;
sc = (struct sigcontext __user *) &frame->sctx;
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index c1794286098..345947367ec 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -154,11 +154,12 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
* As above, but Transactional Memory is in use, so deliver sigcontexts
* containing checkpointed and transactional register states.
*
- * To do this, we treclaim to gather both sets of registers and set up the
- * 'normal' sigcontext registers with rolled-back register values such that a
- * simple signal handler sees a correct checkpointed register state.
- * If interested, a TM-aware sighandler can examine the transactional registers
- * in the 2nd sigcontext to determine the real origin of the signal.
+ * To do this, we treclaim (done before entering here) to gather both sets of
+ * registers and set up the 'normal' sigcontext registers with rolled-back
+ * register values such that a simple signal handler sees a correct
+ * checkpointed register state. If interested, a TM-aware sighandler can
+ * examine the transactional registers in the 2nd sigcontext to determine the
+ * real origin of the signal.
*/
static long setup_tm_sigcontexts(struct sigcontext __user *sc,
struct sigcontext __user *tm_sc,
@@ -184,16 +185,6 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
BUG_ON(!MSR_TM_ACTIVE(regs->msr));
- /* tm_reclaim rolls back all reg states, saving checkpointed (older)
- * GPRs to thread.ckpt_regs and (if used) FPRs to (newer)
- * thread.transact_fp and/or VRs to (newer) thread.transact_vr.
- * THEN we save out FP/VRs, if necessary, to the checkpointed (older)
- * thread.fr[]/vr[]s. The transactional (newer) GPRs are on the
- * stack, in *regs.
- */
- tm_enable();
- tm_reclaim(&current->thread, msr, TM_CAUSE_SIGNAL);
-
flush_fp_to_thread(current);
#ifdef CONFIG_ALTIVEC
@@ -711,7 +702,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
unsigned long newsp = 0;
long err = 0;
- frame = get_sigframe(ka, regs, sizeof(*frame), 0);
+ frame = get_sigframe(ka, get_tm_stackpointer(regs), sizeof(*frame), 0);
if (unlikely(frame == NULL))
goto badframe;
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 37cc40ef504..29857c6b6af 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -52,6 +52,7 @@
#ifdef CONFIG_PPC64
#include <asm/firmware.h>
#include <asm/processor.h>
+#include <asm/tm.h>
#endif
#include <asm/kexec.h>
#include <asm/ppc-opcode.h>
@@ -913,6 +914,28 @@ static int emulate_isel(struct pt_regs *regs, u32 instword)
return 0;
}
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+static inline bool tm_abort_check(struct pt_regs *regs, int cause)
+{
+ /* If we're emulating a load/store in an active transaction, we cannot
+ * emulate it as the kernel operates in transaction suspended context.
+ * We need to abort the transaction. This creates a persistent TM
+ * abort so tell the user what caused it with a new code.
+ */
+ if (MSR_TM_TRANSACTIONAL(regs->msr)) {
+ tm_enable();
+ tm_abort(cause);
+ return true;
+ }
+ return false;
+}
+#else
+static inline bool tm_abort_check(struct pt_regs *regs, int reason)
+{
+ return false;
+}
+#endif
+
static int emulate_instruction(struct pt_regs *regs)
{
u32 instword;
@@ -952,6 +975,9 @@ static int emulate_instruction(struct pt_regs *regs)
/* Emulate load/store string insn. */
if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) {
+ if (tm_abort_check(regs,
+ TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT))
+ return -EINVAL;
PPC_WARN_EMULATED(string, regs);
return emulate_string_inst(regs, instword);
}
@@ -970,7 +996,10 @@ static int emulate_instruction(struct pt_regs *regs)
#ifdef CONFIG_PPC64
/* Emulate the mfspr rD, DSCR. */
- if (((instword & PPC_INST_MFSPR_DSCR_MASK) == PPC_INST_MFSPR_DSCR) &&
+ if ((((instword & PPC_INST_MFSPR_DSCR_USER_MASK) ==
+ PPC_INST_MFSPR_DSCR_USER) ||
+ ((instword & PPC_INST_MFSPR_DSCR_MASK) ==
+ PPC_INST_MFSPR_DSCR)) &&
cpu_has_feature(CPU_FTR_DSCR)) {
PPC_WARN_EMULATED(mfdscr, regs);
rd = (instword >> 21) & 0x1f;
@@ -978,7 +1007,10 @@ static int emulate_instruction(struct pt_regs *regs)
return 0;
}
/* Emulate the mtspr DSCR, rD. */
- if (((instword & PPC_INST_MTSPR_DSCR_MASK) == PPC_INST_MTSPR_DSCR) &&
+ if ((((instword & PPC_INST_MTSPR_DSCR_USER_MASK) ==
+ PPC_INST_MTSPR_DSCR_USER) ||
+ ((instword & PPC_INST_MTSPR_DSCR_MASK) ==
+ PPC_INST_MTSPR_DSCR)) &&
cpu_has_feature(CPU_FTR_DSCR)) {
PPC_WARN_EMULATED(mtdscr, regs);
rd = (instword >> 21) & 0x1f;
@@ -1118,6 +1150,9 @@ void alignment_exception(struct pt_regs *regs)
if (!arch_irq_disabled_regs(regs))
local_irq_enable();
+ if (tm_abort_check(regs, TM_CAUSE_ALIGNMENT | TM_CAUSE_PERSISTENT))
+ return;
+
/* we don't implement logging of alignment exceptions */
if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
fixed = fix_alignment(regs);
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index f410c3e12c1..b75c52ff42c 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -1191,6 +1191,7 @@ void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, int ssize,
* unmapping it first, it may see the speculated version.
*/
if (local && cpu_has_feature(CPU_FTR_TM) &&
+ current->thread.regs &&
MSR_TM_ACTIVE(current->thread.regs->msr)) {
tm_enable();
tm_abort(TM_CAUSE_TLBI);
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index bba87ca2b4d..6a252c468d6 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -201,7 +201,7 @@ int __node_distance(int a, int b)
int distance = LOCAL_DISTANCE;
if (!form1_affinity)
- return distance;
+ return ((a == b) ? LOCAL_DISTANCE : REMOTE_DISTANCE);
for (i = 0; i < distance_ref_points_depth; i++) {
if (distance_lookup_table[a][i] == distance_lookup_table[b][i])
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 65362e98eb2..9d858b0aeb5 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -1528,7 +1528,7 @@ static void perf_event_interrupt(struct pt_regs *regs)
}
}
}
- if ((!found) && printk_ratelimit())
+ if (!found && !nmi && printk_ratelimit())
printk(KERN_WARNING "Can't find PMC that caused IRQ\n");
/*
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index 3f3bb4cdbbe..35f77a42bed 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -99,6 +99,7 @@ spufs_new_inode(struct super_block *sb, umode_t mode)
if (!inode)
goto out;
+ inode->i_ino = get_next_ino();
inode->i_mode = mode;
inode->i_uid = current_fsuid();
inode->i_gid = current_fsgid();
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index 9a0941bc4d3..b9fd0d31d80 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -18,6 +18,8 @@ config PPC_PSERIES
select PPC_PCI_CHOICE if EXPERT
select ZLIB_DEFLATE
select PPC_DOORBELL
+ select HOTPLUG if SMP
+ select HOTPLUG_CPU if SMP
default y
config PPC_SPLPAR
diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
index 19506f93573..b456b157d33 100644
--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
+++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
@@ -83,7 +83,11 @@ static int pseries_eeh_init(void)
ibm_configure_pe = rtas_token("ibm,configure-pe");
ibm_configure_bridge = rtas_token("ibm,configure-bridge");
- /* necessary sanity check */
+ /*
+ * Necessary sanity check. We needn't check "get-config-addr-info"
+ * and its variant since the old firmware probably support address
+ * of domain/bus/slot/function for EEH RTAS operations.
+ */
if (ibm_set_eeh_option == RTAS_UNKNOWN_SERVICE) {
pr_warning("%s: RTAS service <ibm,set-eeh-option> invalid\n",
__func__);
@@ -102,12 +106,6 @@ static int pseries_eeh_init(void)
pr_warning("%s: RTAS service <ibm,slot-error-detail> invalid\n",
__func__);
return -EINVAL;
- } else if (ibm_get_config_addr_info2 == RTAS_UNKNOWN_SERVICE &&
- ibm_get_config_addr_info == RTAS_UNKNOWN_SERVICE) {
- pr_warning("%s: RTAS service <ibm,get-config-addr-info2> and "
- "<ibm,get-config-addr-info> invalid\n",
- __func__);
- return -EINVAL;
} else if (ibm_configure_pe == RTAS_UNKNOWN_SERVICE &&
ibm_configure_bridge == RTAS_UNKNOWN_SERVICE) {
pr_warning("%s: RTAS service <ibm,configure-pe> and "
diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c
index e5b08472313..77b18a2fe30 100644
--- a/arch/powerpc/platforms/pseries/msi.c
+++ b/arch/powerpc/platforms/pseries/msi.c
@@ -24,6 +24,7 @@ static int query_token, change_token;
#define RTAS_RESET_FN 2
#define RTAS_CHANGE_MSI_FN 3
#define RTAS_CHANGE_MSIX_FN 4
+#define RTAS_CHANGE_32MSI_FN 5
static struct pci_dn *get_pdn(struct pci_dev *pdev)
{
@@ -58,7 +59,8 @@ static int rtas_change_msi(struct pci_dn *pdn, u32 func, u32 num_irqs)
seq_num = 1;
do {
- if (func == RTAS_CHANGE_MSI_FN || func == RTAS_CHANGE_MSIX_FN)
+ if (func == RTAS_CHANGE_MSI_FN || func == RTAS_CHANGE_MSIX_FN ||
+ func == RTAS_CHANGE_32MSI_FN)
rc = rtas_call(change_token, 6, 4, rtas_ret, addr,
BUID_HI(buid), BUID_LO(buid),
func, num_irqs, seq_num);
@@ -392,6 +394,25 @@ static int check_msix_entries(struct pci_dev *pdev)
return 0;
}
+static void rtas_hack_32bit_msi_gen2(struct pci_dev *pdev)
+{
+ u32 addr_hi, addr_lo;
+ int pos;
+
+ /*
+ * We should only get in here for IODA1 configs. This is based on the
+ * fact that we using RTAS for MSIs, we don't have the 32 bit MSI RTAS
+ * support, and we are in a PCIe Gen2 slot.
+ */
+ dev_info(&pdev->dev,
+ "rtas_msi: No 32 bit MSI firmware support, forcing 32 bit MSI\n");
+ pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
+ pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_HI, &addr_hi);
+ addr_lo = 0xffff0000 | ((addr_hi >> (48 - 32)) << 4);
+ pci_write_config_dword(pdev, pos + PCI_MSI_ADDRESS_LO, addr_lo);
+ pci_write_config_dword(pdev, pos + PCI_MSI_ADDRESS_HI, 0);
+}
+
static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec_in, int type)
{
struct pci_dn *pdn;
@@ -399,6 +420,7 @@ static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec_in, int type)
struct msi_desc *entry;
struct msi_msg msg;
int nvec = nvec_in;
+ int use_32bit_msi_hack = 0;
pdn = get_pdn(pdev);
if (!pdn)
@@ -426,12 +448,31 @@ static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec_in, int type)
*/
again:
if (type == PCI_CAP_ID_MSI) {
- rc = rtas_change_msi(pdn, RTAS_CHANGE_MSI_FN, nvec);
+ if (pdn->force_32bit_msi) {
+ rc = rtas_change_msi(pdn, RTAS_CHANGE_32MSI_FN, nvec);
+ if (rc < 0) {
+ /*
+ * We only want to run the 32 bit MSI hack below if
+ * the max bus speed is Gen2 speed
+ */
+ if (pdev->bus->max_bus_speed != PCIE_SPEED_5_0GT)
+ return rc;
+
+ use_32bit_msi_hack = 1;
+ }
+ } else
+ rc = -1;
+
+ if (rc < 0)
+ rc = rtas_change_msi(pdn, RTAS_CHANGE_MSI_FN, nvec);
if (rc < 0) {
pr_debug("rtas_msi: trying the old firmware call.\n");
rc = rtas_change_msi(pdn, RTAS_CHANGE_FN, nvec);
}
+
+ if (use_32bit_msi_hack && rc > 0)
+ rtas_hack_32bit_msi_gen2(pdev);
} else
rc = rtas_change_msi(pdn, RTAS_CHANGE_MSIX_FN, nvec);
@@ -512,3 +553,13 @@ static int rtas_msi_init(void)
return 0;
}
arch_initcall(rtas_msi_init);
+
+static void quirk_radeon(struct pci_dev *dev)
+{
+ struct pci_dn *pdn = get_pdn(dev);
+
+ if (pdn)
+ pdn->force_32bit_msi = 1;
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x68f2, quirk_radeon);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0xaa68, quirk_radeon);
diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c
index 0b580f413a9..5f93856cdf4 100644
--- a/arch/powerpc/platforms/pseries/pci.c
+++ b/arch/powerpc/platforms/pseries/pci.c
@@ -108,3 +108,56 @@ static void fixup_winbond_82c105(struct pci_dev* dev)
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_WINBOND, PCI_DEVICE_ID_WINBOND_82C105,
fixup_winbond_82c105);
+
+int pseries_root_bridge_prepare(struct pci_host_bridge *bridge)
+{
+ struct device_node *dn, *pdn;
+ struct pci_bus *bus;
+ const uint32_t *pcie_link_speed_stats;
+
+ bus = bridge->bus;
+
+ dn = pcibios_get_phb_of_node(bus);
+ if (!dn)
+ return 0;
+
+ for (pdn = dn; pdn != NULL; pdn = of_get_next_parent(pdn)) {
+ pcie_link_speed_stats = (const uint32_t *) of_get_property(pdn,
+ "ibm,pcie-link-speed-stats", NULL);
+ if (pcie_link_speed_stats)
+ break;
+ }
+
+ of_node_put(pdn);
+
+ if (!pcie_link_speed_stats) {
+ pr_err("no ibm,pcie-link-speed-stats property\n");
+ return 0;
+ }
+
+ switch (pcie_link_speed_stats[0]) {
+ case 0x01:
+ bus->max_bus_speed = PCIE_SPEED_2_5GT;
+ break;
+ case 0x02:
+ bus->max_bus_speed = PCIE_SPEED_5_0GT;
+ break;
+ default:
+ bus->max_bus_speed = PCI_SPEED_UNKNOWN;
+ break;
+ }
+
+ switch (pcie_link_speed_stats[1]) {
+ case 0x01:
+ bus->cur_bus_speed = PCIE_SPEED_2_5GT;
+ break;
+ case 0x02:
+ bus->cur_bus_speed = PCIE_SPEED_5_0GT;
+ break;
+ default:
+ bus->cur_bus_speed = PCI_SPEED_UNKNOWN;
+ break;
+ }
+
+ return 0;
+}
diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h
index 9a3dda07566..b79393d8e50 100644
--- a/arch/powerpc/platforms/pseries/pseries.h
+++ b/arch/powerpc/platforms/pseries/pseries.h
@@ -60,4 +60,8 @@ extern int dlpar_detach_node(struct device_node *);
/* Snooze Delay, pseries_idle */
DECLARE_PER_CPU(long, smt_snooze_delay);
+/* PCI root bridge prepare function override for pseries */
+struct pci_host_bridge;
+int pseries_root_bridge_prepare(struct pci_host_bridge *bridge);
+
#endif /* _PSERIES_PSERIES_H */
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 8bcc9ca6682..bf34cc97626 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -466,6 +466,8 @@ static void __init pSeries_setup_arch(void)
else
ppc_md.enable_pmcs = power4_enable_pmcs;
+ ppc_md.pcibios_root_bridge_prepare = pseries_root_bridge_prepare;
+
if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
long rc;
if ((rc = pSeries_enable_reloc_on_exc()) != H_SUCCESS) {
diff --git a/arch/powerpc/platforms/pseries/suspend.c b/arch/powerpc/platforms/pseries/suspend.c
index 47226e04126..5f997e79d57 100644
--- a/arch/powerpc/platforms/pseries/suspend.c
+++ b/arch/powerpc/platforms/pseries/suspend.c
@@ -16,6 +16,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include <linux/cpu.h>
#include <linux/delay.h>
#include <linux/suspend.h>
#include <linux/stat.h>
@@ -126,11 +127,15 @@ static ssize_t store_hibernate(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
+ cpumask_var_t offline_mask;
int rc;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
+ if (!alloc_cpumask_var(&offline_mask, GFP_TEMPORARY))
+ return -ENOMEM;
+
stream_id = simple_strtoul(buf, NULL, 16);
do {
@@ -140,15 +145,32 @@ static ssize_t store_hibernate(struct device *dev,
} while (rc == -EAGAIN);
if (!rc) {
+ /* All present CPUs must be online */
+ cpumask_andnot(offline_mask, cpu_present_mask,
+ cpu_online_mask);
+ rc = rtas_online_cpus_mask(offline_mask);
+ if (rc) {
+ pr_err("%s: Could not bring present CPUs online.\n",
+ __func__);
+ goto out;
+ }
+
stop_topology_update();
rc = pm_suspend(PM_SUSPEND_MEM);
start_topology_update();
+
+ /* Take down CPUs not online prior to suspend */
+ if (!rtas_offline_cpus_mask(offline_mask))
+ pr_warn("%s: Could not restore CPUs to offline "
+ "state.\n", __func__);
}
stream_id = 0;
if (!rc)
rc = count;
+out:
+ free_cpumask_var(offline_mask);
return rc;
}
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 3cb47cf0253..39a1c7103fc 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -637,7 +637,7 @@ static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste)
unsigned long address, bits;
unsigned char skey;
- if (!pte_present(*ptep))
+ if (pte_val(*ptep) & _PAGE_INVALID)
return pgste;
address = pte_val(*ptep) & PAGE_MASK;
skey = page_get_storage_key(address);
@@ -671,7 +671,7 @@ static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste)
#ifdef CONFIG_PGSTE
int young;
- if (!pte_present(*ptep))
+ if (pte_val(*ptep) & _PAGE_INVALID)
return pgste;
/* Get referenced bit from storage key */
young = page_reset_referenced(pte_val(*ptep) & PAGE_MASK);
@@ -697,7 +697,7 @@ static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry)
unsigned long address;
unsigned long okey, nkey;
- if (!pte_present(entry))
+ if (pte_val(entry) & _PAGE_INVALID)
return;
address = pte_val(entry) & PAGE_MASK;
okey = nkey = page_get_storage_key(address);
@@ -1063,15 +1063,19 @@ static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
unsigned long address,
pte_t *ptep)
{
+ pgste_t pgste;
pte_t pte;
mm->context.flush_mm = 1;
if (mm_has_pgste(mm))
- pgste_get_lock(ptep);
+ pgste = pgste_get_lock(ptep);
pte = *ptep;
if (!mm_exclusive(mm))
__ptep_ipte(address, ptep);
+
+ if (mm_has_pgste(mm))
+ pgste = pgste_update_all(&pte, pgste);
return pte;
}
@@ -1079,9 +1083,13 @@ static inline void ptep_modify_prot_commit(struct mm_struct *mm,
unsigned long address,
pte_t *ptep, pte_t pte)
{
+ pgste_t pgste;
+
if (mm_has_pgste(mm)) {
+ pgste = *(pgste_t *)(ptep + PTRS_PER_PTE);
+ pgste_set_key(ptep, pgste, pte);
pgste_set_pte(ptep, pte);
- pgste_set_unlock(ptep, *(pgste_t *)(ptep + PTRS_PER_PTE));
+ pgste_set_unlock(ptep, pgste);
} else
*ptep = pte;
}
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index 25877aebc68..41a2a0becc1 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -368,11 +368,17 @@ config HARDWALL
config KERNEL_PL
int "Processor protection level for kernel"
range 1 2
- default "1"
+ default 2 if TILEGX
+ default 1 if !TILEGX
---help---
- This setting determines the processor protection level the
- kernel will be built to run at. Generally you should use
- the default value here.
+ Since MDE 4.2, the Tilera hypervisor runs the kernel
+ at PL2 by default. If running under an older hypervisor,
+ or as a KVM guest, you must run at PL1. (The current
+ hypervisor may also be recompiled with "make HV_PL=2" to
+ allow it to run a kernel at PL1, but clients running at PL1
+ are not expected to be supported indefinitely.)
+
+ If you're not sure, don't change the default.
source "arch/tile/gxio/Kconfig"
diff --git a/arch/tile/include/hv/hypervisor.h b/arch/tile/include/hv/hypervisor.h
index ccd847e2347..837dca5328c 100644
--- a/arch/tile/include/hv/hypervisor.h
+++ b/arch/tile/include/hv/hypervisor.h
@@ -107,7 +107,22 @@
#define HV_DISPATCH_ENTRY_SIZE 32
/** Version of the hypervisor interface defined by this file */
-#define _HV_VERSION 11
+#define _HV_VERSION 13
+
+/** Last version of the hypervisor interface with old hv_init() ABI.
+ *
+ * The change from version 12 to version 13 corresponds to launching
+ * the client by default at PL2 instead of PL1 (corresponding to the
+ * hv itself running at PL3 instead of PL2). To make this explicit,
+ * the hv_init() API was also extended so the client can report its
+ * desired PL, resulting in a more helpful failure diagnostic. If you
+ * call hv_init() with _HV_VERSION_OLD_HV_INIT and omit the client_pl
+ * argument, the hypervisor will assume client_pl = 1.
+ *
+ * Note that this is a deprecated solution and we do not expect to
+ * support clients of the Tilera hypervisor running at PL1 indefinitely.
+ */
+#define _HV_VERSION_OLD_HV_INIT 12
/* Index into hypervisor interface dispatch code blocks.
*
@@ -377,7 +392,11 @@ typedef int HV_Errno;
#ifndef __ASSEMBLER__
/** Pass HV_VERSION to hv_init to request this version of the interface. */
-typedef enum { HV_VERSION = _HV_VERSION } HV_VersionNumber;
+typedef enum {
+ HV_VERSION = _HV_VERSION,
+ HV_VERSION_OLD_HV_INIT = _HV_VERSION_OLD_HV_INIT,
+
+} HV_VersionNumber;
/** Initializes the hypervisor.
*
@@ -385,9 +404,11 @@ typedef enum { HV_VERSION = _HV_VERSION } HV_VersionNumber;
* that this program expects, typically HV_VERSION.
* @param chip_num Architecture number of the chip the client was built for.
* @param chip_rev_num Revision number of the chip the client was built for.
+ * @param client_pl Privilege level the client is built for
+ * (not required if interface_version_number == HV_VERSION_OLD_HV_INIT).
*/
void hv_init(HV_VersionNumber interface_version_number,
- int chip_num, int chip_rev_num);
+ int chip_num, int chip_rev_num, int client_pl);
/** Queries we can make for hv_sysconf().
diff --git a/arch/tile/kernel/head_32.S b/arch/tile/kernel/head_32.S
index f71bfeeaf1a..ac115307e5e 100644
--- a/arch/tile/kernel/head_32.S
+++ b/arch/tile/kernel/head_32.S
@@ -38,7 +38,7 @@ ENTRY(_start)
movei r2, TILE_CHIP_REV
}
{
- moveli r0, _HV_VERSION
+ moveli r0, _HV_VERSION_OLD_HV_INIT
jal hv_init
}
/* Get a reasonable default ASID in r0 */
diff --git a/arch/tile/kernel/head_64.S b/arch/tile/kernel/head_64.S
index f9a2734f7b8..6093964fa5c 100644
--- a/arch/tile/kernel/head_64.S
+++ b/arch/tile/kernel/head_64.S
@@ -34,13 +34,19 @@
ENTRY(_start)
/* Notify the hypervisor of what version of the API we want */
{
+#if KERNEL_PL == 1 && _HV_VERSION == 13
+ /* Support older hypervisors by asking for API version 12. */
+ movei r0, _HV_VERSION_OLD_HV_INIT
+#else
+ movei r0, _HV_VERSION
+#endif
movei r1, TILE_CHIP
- movei r2, TILE_CHIP_REV
}
{
- moveli r0, _HV_VERSION
- jal hv_init
+ movei r2, TILE_CHIP_REV
+ movei r3, KERNEL_PL
}
+ jal hv_init
/* Get a reasonable default ASID in r0 */
{
move r0, zero
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 15b5cef4aa3..6ef2a378e3a 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -107,7 +107,6 @@ config X86
select GENERIC_CLOCKEVENTS_BROADCAST if X86_64 || (X86_32 && X86_LOCAL_APIC)
select GENERIC_TIME_VSYSCALL if X86_64
select KTIME_SCALAR if X86_32
- select ALWAYS_USE_PERSISTENT_CLOCK
select GENERIC_STRNCPY_FROM_USER
select GENERIC_STRNLEN_USER
select HAVE_CONTEXT_TRACKING if X86_64
diff --git a/arch/x86/crypto/crc32-pclmul_asm.S b/arch/x86/crypto/crc32-pclmul_asm.S
index c8335014a04..c18c3984d50 100644
--- a/arch/x86/crypto/crc32-pclmul_asm.S
+++ b/arch/x86/crypto/crc32-pclmul_asm.S
@@ -241,6 +241,6 @@ fold_64:
pand %xmm3, %xmm1
PCLMULQDQ 0x00, CONSTANT, %xmm1
pxor %xmm2, %xmm1
- pextrd $0x01, %xmm1, %eax
+ PEXTRD 0x01, %xmm1, %eax
ret
diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
index cf1a7ec4cc3..a59c64311d4 100644
--- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
+++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
@@ -42,6 +42,7 @@
* SOFTWARE.
*/
+#include <asm/inst.h>
#include <linux/linkage.h>
## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction
@@ -225,10 +226,10 @@ LABEL crc_ %i
movdqa (bufp), %xmm0 # 2 consts: K1:K2
movq crc_init, %xmm1 # CRC for block 1
- pclmulqdq $0x00,%xmm0,%xmm1 # Multiply by K2
+ PCLMULQDQ 0x00,%xmm0,%xmm1 # Multiply by K2
movq crc1, %xmm2 # CRC for block 2
- pclmulqdq $0x10, %xmm0, %xmm2 # Multiply by K1
+ PCLMULQDQ 0x10, %xmm0, %xmm2 # Multiply by K1
pxor %xmm2,%xmm1
movq %xmm1, %rax
diff --git a/arch/x86/include/asm/inst.h b/arch/x86/include/asm/inst.h
index 280bf7fb6ab..3e115273ed8 100644
--- a/arch/x86/include/asm/inst.h
+++ b/arch/x86/include/asm/inst.h
@@ -9,12 +9,68 @@
#define REG_NUM_INVALID 100
-#define REG_TYPE_R64 0
-#define REG_TYPE_XMM 1
+#define REG_TYPE_R32 0
+#define REG_TYPE_R64 1
+#define REG_TYPE_XMM 2
#define REG_TYPE_INVALID 100
+ .macro R32_NUM opd r32
+ \opd = REG_NUM_INVALID
+ .ifc \r32,%eax
+ \opd = 0
+ .endif
+ .ifc \r32,%ecx
+ \opd = 1
+ .endif
+ .ifc \r32,%edx
+ \opd = 2
+ .endif
+ .ifc \r32,%ebx
+ \opd = 3
+ .endif
+ .ifc \r32,%esp
+ \opd = 4
+ .endif
+ .ifc \r32,%ebp
+ \opd = 5
+ .endif
+ .ifc \r32,%esi
+ \opd = 6
+ .endif
+ .ifc \r32,%edi
+ \opd = 7
+ .endif
+#ifdef CONFIG_X86_64
+ .ifc \r32,%r8d
+ \opd = 8
+ .endif
+ .ifc \r32,%r9d
+ \opd = 9
+ .endif
+ .ifc \r32,%r10d
+ \opd = 10
+ .endif
+ .ifc \r32,%r11d
+ \opd = 11
+ .endif
+ .ifc \r32,%r12d
+ \opd = 12
+ .endif
+ .ifc \r32,%r13d
+ \opd = 13
+ .endif
+ .ifc \r32,%r14d
+ \opd = 14
+ .endif
+ .ifc \r32,%r15d
+ \opd = 15
+ .endif
+#endif
+ .endm
+
.macro R64_NUM opd r64
\opd = REG_NUM_INVALID
+#ifdef CONFIG_X86_64
.ifc \r64,%rax
\opd = 0
.endif
@@ -63,6 +119,7 @@
.ifc \r64,%r15
\opd = 15
.endif
+#endif
.endm
.macro XMM_NUM opd xmm
@@ -118,10 +175,13 @@
.endm
.macro REG_TYPE type reg
+ R32_NUM reg_type_r32 \reg
R64_NUM reg_type_r64 \reg
XMM_NUM reg_type_xmm \reg
.if reg_type_r64 <> REG_NUM_INVALID
\type = REG_TYPE_R64
+ .elseif reg_type_r32 <> REG_NUM_INVALID
+ \type = REG_TYPE_R32
.elseif reg_type_xmm <> REG_NUM_INVALID
\type = REG_TYPE_XMM
.else
@@ -162,6 +222,16 @@
.byte \imm8
.endm
+ .macro PEXTRD imm8 xmm gpr
+ R32_NUM extrd_opd1 \gpr
+ XMM_NUM extrd_opd2 \xmm
+ PFX_OPD_SIZE
+ PFX_REX extrd_opd1 extrd_opd2
+ .byte 0x0f, 0x3a, 0x16
+ MODRM 0xc0 extrd_opd1 extrd_opd2
+ .byte \imm8
+ .endm
+
.macro AESKEYGENASSIST rcon xmm1 xmm2
XMM_NUM aeskeygen_opd1 \xmm1
XMM_NUM aeskeygen_opd2 \xmm2
diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h
index 6cf0a9cc60c..5a0be0af46c 100644
--- a/arch/x86/include/asm/syscalls.h
+++ b/arch/x86/include/asm/syscalls.h
@@ -37,8 +37,8 @@ asmlinkage int sys_get_thread_area(struct user_desc __user *);
unsigned long sys_sigreturn(void);
/* kernel/vm86_32.c */
-int sys_vm86old(struct vm86_struct __user *);
-int sys_vm86(unsigned long, unsigned long);
+asmlinkage long sys_vm86old(struct vm86_struct __user *);
+asmlinkage long sys_vm86(unsigned long, unsigned long);
#else /* CONFIG_X86_32 */
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index cc45deb791b..4a0a462d5e9 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -125,10 +125,15 @@ static struct event_constraint intel_ivb_event_constraints[] __read_mostly =
INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
- INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
- INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
- INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
- INTEL_EVENT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
+ /*
+ * Errata BV98 -- MEM_*_RETIRED events can leak between counters of SMT
+ * siblings; disable these events because they can corrupt unrelated
+ * counters.
+ */
+ INTEL_EVENT_CONSTRAINT(0xd0, 0x0), /* MEM_UOPS_RETIRED.* */
+ INTEL_EVENT_CONSTRAINT(0xd1, 0x0), /* MEM_LOAD_UOPS_RETIRED.* */
+ INTEL_EVENT_CONSTRAINT(0xd2, 0x0), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
+ INTEL_EVENT_CONSTRAINT(0xd3, 0x0), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
EVENT_CONSTRAINT_END
};
diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
index da02e9cc375..d978353c939 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
@@ -310,7 +310,7 @@ void intel_pmu_lbr_read(void)
* - in case there is no HW filter
* - in case the HW filter has errata or limitations
*/
-static void intel_pmu_setup_sw_lbr_filter(struct perf_event *event)
+static int intel_pmu_setup_sw_lbr_filter(struct perf_event *event)
{
u64 br_type = event->attr.branch_sample_type;
int mask = 0;
@@ -318,8 +318,11 @@ static void intel_pmu_setup_sw_lbr_filter(struct perf_event *event)
if (br_type & PERF_SAMPLE_BRANCH_USER)
mask |= X86_BR_USER;
- if (br_type & PERF_SAMPLE_BRANCH_KERNEL)
+ if (br_type & PERF_SAMPLE_BRANCH_KERNEL) {
+ if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
+ return -EACCES;
mask |= X86_BR_KERNEL;
+ }
/* we ignore BRANCH_HV here */
@@ -339,6 +342,8 @@ static void intel_pmu_setup_sw_lbr_filter(struct perf_event *event)
* be used by fixup code for some CPU
*/
event->hw.branch_reg.reg = mask;
+
+ return 0;
}
/*
@@ -386,7 +391,9 @@ int intel_pmu_setup_lbr_filter(struct perf_event *event)
/*
* setup SW LBR filter
*/
- intel_pmu_setup_sw_lbr_filter(event);
+ ret = intel_pmu_setup_sw_lbr_filter(event);
+ if (ret)
+ return ret;
/*
* setup HW LBR filter, if any
@@ -442,8 +449,18 @@ static int branch_type(unsigned long from, unsigned long to)
return X86_BR_NONE;
addr = buf;
- } else
- addr = (void *)from;
+ } else {
+ /*
+ * The LBR logs any address in the IP, even if the IP just
+ * faulted. This means userspace can control the from address.
+ * Ensure we don't blindy read any address by validating it is
+ * a known text address.
+ */
+ if (kernel_text_address(from))
+ addr = (void *)from;
+ else
+ return X86_BR_NONE;
+ }
/*
* decoder needs to know the ABI especially
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index b43200dbfe7..3e091f04487 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -2428,7 +2428,7 @@ static void __init uncore_types_exit(struct intel_uncore_type **types)
static int __init uncore_type_init(struct intel_uncore_type *type)
{
struct intel_uncore_pmu *pmus;
- struct attribute_group *events_group;
+ struct attribute_group *attr_group;
struct attribute **attrs;
int i, j;
@@ -2455,19 +2455,19 @@ static int __init uncore_type_init(struct intel_uncore_type *type)
while (type->event_descs[i].attr.attr.name)
i++;
- events_group = kzalloc(sizeof(struct attribute *) * (i + 1) +
- sizeof(*events_group), GFP_KERNEL);
- if (!events_group)
+ attr_group = kzalloc(sizeof(struct attribute *) * (i + 1) +
+ sizeof(*attr_group), GFP_KERNEL);
+ if (!attr_group)
goto fail;
- attrs = (struct attribute **)(events_group + 1);
- events_group->name = "events";
- events_group->attrs = attrs;
+ attrs = (struct attribute **)(attr_group + 1);
+ attr_group->name = "events";
+ attr_group->attrs = attrs;
for (j = 0; j < i; j++)
attrs[j] = &type->event_descs[j].attr.attr;
- type->events_group = events_group;
+ type->events_group = attr_group;
}
type->pmu_group = &uncore_pmu_attr_group;
@@ -2853,6 +2853,7 @@ static int __init uncore_cpu_init(void)
msr_uncores = nhm_msr_uncores;
break;
case 42: /* Sandy Bridge */
+ case 58: /* Ivy Bridge */
if (snb_uncore_cbox.num_boxes > max_cores)
snb_uncore_cbox.num_boxes = max_cores;
msr_uncores = snb_msr_uncores;
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index c5e403f6d86..8f3201d59b1 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -34,6 +34,7 @@
extern pgd_t early_level4_pgt[PTRS_PER_PGD];
extern pmd_t early_dynamic_pgts[EARLY_DYNAMIC_PAGE_TABLES][PTRS_PER_PMD];
static unsigned int __initdata next_early_pgt = 2;
+pmdval_t early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX);
/* Wipe all early page tables except for the kernel symbol map */
static void __init reset_early_page_tables(void)
@@ -99,7 +100,7 @@ again:
pmd_p[i] = 0;
*pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
}
- pmd = (physaddr & PMD_MASK) + (__PAGE_KERNEL_LARGE & ~_PAGE_GLOBAL);
+ pmd = (physaddr & PMD_MASK) + early_pmd_flags;
pmd_p[pmd_index(address)] = pmd;
return 0;
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 6859e962644..321d65ebaff 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -115,8 +115,10 @@ startup_64:
movq %rdi, %rax
shrq $PUD_SHIFT, %rax
andl $(PTRS_PER_PUD-1), %eax
- movq %rdx, (4096+0)(%rbx,%rax,8)
- movq %rdx, (4096+8)(%rbx,%rax,8)
+ movq %rdx, 4096(%rbx,%rax,8)
+ incl %eax
+ andl $(PTRS_PER_PUD-1), %eax
+ movq %rdx, 4096(%rbx,%rax,8)
addq $8192, %rbx
movq %rdi, %rax
@@ -200,6 +202,7 @@ ENTRY(secondary_startup_64)
btl $20,%edi /* No Execute supported? */
jnc 1f
btsl $_EFER_NX, %eax
+ btsq $_PAGE_BIT_NX,early_pmd_flags(%rip)
1: wrmsr /* Make changes effective */
/* Setup cr0 */
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
index 245a71db401..cb339097b9e 100644
--- a/arch/x86/kernel/i387.c
+++ b/arch/x86/kernel/i387.c
@@ -22,23 +22,19 @@
/*
* Were we in an interrupt that interrupted kernel mode?
*
- * For now, with eagerfpu we will return interrupted kernel FPU
- * state as not-idle. TBD: Ideally we can change the return value
- * to something like __thread_has_fpu(current). But we need to
- * be careful of doing __thread_clear_has_fpu() before saving
- * the FPU etc for supporting nested uses etc. For now, take
- * the simple route!
- *
* On others, we can do a kernel_fpu_begin/end() pair *ONLY* if that
* pair does nothing at all: the thread must not have fpu (so
* that we don't try to save the FPU state), and TS must
* be set (so that the clts/stts pair does nothing that is
* visible in the interrupted kernel thread).
+ *
+ * Except for the eagerfpu case when we return 1 unless we've already
+ * been eager and saved the state in kernel_fpu_begin().
*/
static inline bool interrupted_kernel_fpu_idle(void)
{
if (use_eager_fpu())
- return 0;
+ return __thread_has_fpu(current);
return !__thread_has_fpu(current) &&
(read_cr0() & X86_CR0_TS);
@@ -78,8 +74,8 @@ void __kernel_fpu_begin(void)
struct task_struct *me = current;
if (__thread_has_fpu(me)) {
- __save_init_fpu(me);
__thread_clear_has_fpu(me);
+ __save_init_fpu(me);
/* We do 'stts()' in __kernel_fpu_end() */
} else if (!use_eager_fpu()) {
this_cpu_write(fpu_owner_task, NULL);
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index e4595f10591..84b778962c6 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -165,10 +165,6 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
u64 arch_irq_stat(void)
{
u64 sum = atomic_read(&irq_err_count);
-
-#ifdef CONFIG_X86_IO_APIC
- sum += atomic_read(&irq_mis_count);
-#endif
return sum;
}
diff --git a/arch/x86/kernel/microcode_intel_early.c b/arch/x86/kernel/microcode_intel_early.c
index d893e8ed8ac..2e9e12871c2 100644
--- a/arch/x86/kernel/microcode_intel_early.c
+++ b/arch/x86/kernel/microcode_intel_early.c
@@ -487,6 +487,7 @@ static inline void show_saved_mc(void)
#endif
#if defined(CONFIG_MICROCODE_INTEL_EARLY) && defined(CONFIG_HOTPLUG_CPU)
+static DEFINE_MUTEX(x86_cpu_microcode_mutex);
/*
* Save this mc into mc_saved_data. So it will be loaded early when a CPU is
* hot added or resumes.
@@ -507,7 +508,7 @@ int save_mc_for_early(u8 *mc)
* Hold hotplug lock so mc_saved_data is not accessed by a CPU in
* hotplug.
*/
- cpu_hotplug_driver_lock();
+ mutex_lock(&x86_cpu_microcode_mutex);
mc_saved_count_init = mc_saved_data.mc_saved_count;
mc_saved_count = mc_saved_data.mc_saved_count;
@@ -544,7 +545,7 @@ int save_mc_for_early(u8 *mc)
}
out:
- cpu_hotplug_driver_unlock();
+ mutex_unlock(&x86_cpu_microcode_mutex);
return ret;
}
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index 1cf5766dde1..3dbdd9c839d 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -33,6 +33,7 @@
#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
+#include <linux/syscalls.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/signal.h>
@@ -48,7 +49,6 @@
#include <asm/io.h>
#include <asm/tlbflush.h>
#include <asm/irq.h>
-#include <asm/syscalls.h>
/*
* Known problems:
@@ -202,17 +202,16 @@ out:
static int do_vm86_irq_handling(int subfunction, int irqnumber);
static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk);
-int sys_vm86old(struct vm86_struct __user *v86)
+SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, v86)
{
struct kernel_vm86_struct info; /* declare this _on top_,
* this avoids wasting of stack space.
* This remains on the stack until we
* return to 32 bit user space.
*/
- struct task_struct *tsk;
+ struct task_struct *tsk = current;
int tmp, ret = -EPERM;
- tsk = current;
if (tsk->thread.saved_sp0)
goto out;
tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
@@ -227,11 +226,12 @@ int sys_vm86old(struct vm86_struct __user *v86)
do_sys_vm86(&info, tsk);
ret = 0; /* we never return here */
out:
+ asmlinkage_protect(1, ret, v86);
return ret;
}
-int sys_vm86(unsigned long cmd, unsigned long arg)
+SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
{
struct kernel_vm86_struct info; /* declare this _on top_,
* this avoids wasting of stack space.
@@ -278,6 +278,7 @@ int sys_vm86(unsigned long cmd, unsigned long arg)
do_sys_vm86(&info, tsk);
ret = 0; /* we never return here */
out:
+ asmlinkage_protect(2, ret, cmd, arg);
return ret;
}
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index a335cc6cde7..698eecee918 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -60,6 +60,7 @@
#define OpGS 25ull /* GS */
#define OpMem8 26ull /* 8-bit zero extended memory operand */
#define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
+#define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
#define OpBits 5 /* Width of operand field */
#define OpMask ((1ull << OpBits) - 1)
@@ -99,6 +100,7 @@
#define SrcImmUByte (OpImmUByte << SrcShift)
#define SrcImmU (OpImmU << SrcShift)
#define SrcSI (OpSI << SrcShift)
+#define SrcXLat (OpXLat << SrcShift)
#define SrcImmFAddr (OpImmFAddr << SrcShift)
#define SrcMemFAddr (OpMemFAddr << SrcShift)
#define SrcAcc (OpAcc << SrcShift)
@@ -532,6 +534,9 @@ FOP_SETCC(setle)
FOP_SETCC(setnle)
FOP_END;
+FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
+FOP_END;
+
#define __emulate_1op_rax_rdx(ctxt, _op, _suffix, _ex) \
do { \
unsigned long _tmp; \
@@ -1234,9 +1239,12 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt,
ctxt->modrm_seg = VCPU_SREG_DS;
if (ctxt->modrm_mod == 3) {
+ int highbyte_regs = ctxt->rex_prefix == 0;
+
op->type = OP_REG;
op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
- op->addr.reg = decode_register(ctxt, ctxt->modrm_rm, ctxt->d & ByteOp);
+ op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
+ highbyte_regs && (ctxt->d & ByteOp));
if (ctxt->d & Sse) {
op->type = OP_XMM;
op->bytes = 16;
@@ -2986,6 +2994,28 @@ static int em_das(struct x86_emulate_ctxt *ctxt)
return X86EMUL_CONTINUE;
}
+static int em_aam(struct x86_emulate_ctxt *ctxt)
+{
+ u8 al, ah;
+
+ if (ctxt->src.val == 0)
+ return emulate_de(ctxt);
+
+ al = ctxt->dst.val & 0xff;
+ ah = al / ctxt->src.val;
+ al %= ctxt->src.val;
+
+ ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
+
+ /* Set PF, ZF, SF */
+ ctxt->src.type = OP_IMM;
+ ctxt->src.val = 0;
+ ctxt->src.bytes = 1;
+ fastop(ctxt, em_or);
+
+ return X86EMUL_CONTINUE;
+}
+
static int em_aad(struct x86_emulate_ctxt *ctxt)
{
u8 al = ctxt->dst.val & 0xff;
@@ -3926,7 +3956,10 @@ static const struct opcode opcode_table[256] = {
/* 0xD0 - 0xD7 */
G(Src2One | ByteOp, group2), G(Src2One, group2),
G(Src2CL | ByteOp, group2), G(Src2CL, group2),
- N, I(DstAcc | SrcImmByte | No64, em_aad), N, N,
+ I(DstAcc | SrcImmUByte | No64, em_aam),
+ I(DstAcc | SrcImmUByte | No64, em_aad),
+ F(DstAcc | ByteOp | No64, em_salc),
+ I(DstAcc | SrcXLat | ByteOp, em_mov),
/* 0xD8 - 0xDF */
N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
/* 0xE0 - 0xE7 */
@@ -3957,7 +3990,8 @@ static const struct opcode twobyte_table[256] = {
DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
N, D(ImplicitOps | ModRM), N, N,
/* 0x10 - 0x1F */
- N, N, N, N, N, N, N, N, D(ImplicitOps | ModRM), N, N, N, N, N, N, N,
+ N, N, N, N, N, N, N, N,
+ D(ImplicitOps | ModRM), N, N, N, N, N, N, D(ImplicitOps | ModRM),
/* 0x20 - 0x2F */
DIP(ModRM | DstMem | Priv | Op3264, cr_read, check_cr_read),
DIP(ModRM | DstMem | Priv | Op3264, dr_read, check_dr_read),
@@ -4162,6 +4196,10 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
break;
case OpMem8:
ctxt->memop.bytes = 1;
+ if (ctxt->memop.type == OP_REG) {
+ ctxt->memop.addr.reg = decode_register(ctxt, ctxt->modrm_rm, 1);
+ fetch_register_operand(&ctxt->memop);
+ }
goto mem_common;
case OpMem16:
ctxt->memop.bytes = 2;
@@ -4184,6 +4222,16 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
op->val = 0;
op->count = 1;
break;
+ case OpXLat:
+ op->type = OP_MEM;
+ op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
+ op->addr.mem.ea =
+ register_address(ctxt,
+ reg_read(ctxt, VCPU_REGS_RBX) +
+ (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
+ op->addr.mem.seg = seg_override(ctxt);
+ op->val = 0;
+ break;
case OpImmFAddr:
op->type = OP_IMM;
op->addr.mem.ea = ctxt->_eip;
@@ -4781,6 +4829,7 @@ twobyte_insn:
case 0x08: /* invd */
case 0x0d: /* GrpP (prefetch) */
case 0x18: /* Grp16 (prefetch/nop) */
+ case 0x1f: /* nop */
break;
case 0x20: /* mov cr, reg */
ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 6667042714c..0af18077012 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -5197,6 +5197,12 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
return 0;
}
+ if (vcpu->arch.halt_request) {
+ vcpu->arch.halt_request = 0;
+ ret = kvm_emulate_halt(vcpu);
+ goto out;
+ }
+
if (signal_pending(current))
goto out;
if (need_resched())
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index 901177d75ff..1dc3d7c86b8 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -617,7 +617,9 @@ int pcibios_add_device(struct pci_dev *dev)
pa_data = boot_params.hdr.setup_data;
while (pa_data) {
- data = phys_to_virt(pa_data);
+ data = ioremap(pa_data, sizeof(*rom));
+ if (!data)
+ return -ENOMEM;
if (data->type == SETUP_PCI) {
rom = (struct pci_setup_rom *)data;
@@ -634,6 +636,7 @@ int pcibios_add_device(struct pci_dev *dev)
}
}
pa_data = data->next;
+ iounmap(data);
}
return 0;
}
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index c8e1c7b95c3..cf95e192d05 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -156,6 +156,21 @@ static void xen_vcpu_setup(int cpu)
BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
+ /*
+ * This path is called twice on PVHVM - first during bootup via
+ * smp_init -> xen_hvm_cpu_notify, and then if the VCPU is being
+ * hotplugged: cpu_up -> xen_hvm_cpu_notify.
+ * As we can only do the VCPUOP_register_vcpu_info once lets
+ * not over-write its result.
+ *
+ * For PV it is called during restore (xen_vcpu_restore) and bootup
+ * (xen_setup_vcpu_info_placement). The hotplug mechanism does not
+ * use this function.
+ */
+ if (xen_hvm_domain()) {
+ if (per_cpu(xen_vcpu, cpu) == &per_cpu(xen_vcpu_info, cpu))
+ return;
+ }
if (cpu < MAX_VIRT_CPUS)
per_cpu(xen_vcpu,cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
@@ -1589,8 +1604,11 @@ static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self,
switch (action) {
case CPU_UP_PREPARE:
xen_vcpu_setup(cpu);
- if (xen_have_vector_callback)
+ if (xen_have_vector_callback) {
xen_init_lock_cpu(cpu);
+ if (xen_feature(XENFEAT_hvm_safe_pvclock))
+ xen_setup_timer(cpu);
+ }
break;
default:
break;
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 09ea61d2e02..96c4e853492 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -17,6 +17,7 @@
#include <linux/slab.h>
#include <linux/smp.h>
#include <linux/irq_work.h>
+#include <linux/tick.h>
#include <asm/paravirt.h>
#include <asm/desc.h>
@@ -436,6 +437,13 @@ static void __cpuinit xen_play_dead(void) /* used only with HOTPLUG_CPU */
play_dead_common();
HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
cpu_bringup();
+ /*
+ * commit 4b0c0f294 (tick: Cleanup NOHZ per cpu data on cpu down)
+ * clears certain data that the cpu_idle loop (which called us
+ * and that we return from) expects. The only way to get that
+ * data back is to call:
+ */
+ tick_nohz_idle_enter();
}
#else /* !CONFIG_HOTPLUG_CPU */
@@ -662,6 +670,8 @@ static void xen_hvm_cpu_die(unsigned int cpu)
unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL);
unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL);
+ xen_uninit_lock_cpu(cpu);
+ xen_teardown_timer(cpu);
native_cpu_die(cpu);
}
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index 0296a952250..054cc01bb84 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -497,7 +497,11 @@ static void xen_hvm_setup_cpu_clockevents(void)
{
int cpu = smp_processor_id();
xen_setup_runstate_info(cpu);
- xen_setup_timer(cpu);
+ /*
+ * xen_setup_timer(cpu) - snprintf is bad in atomic context. Hence
+ * doing it xen_hvm_cpu_notify (which gets called by smp_init during
+ * early bootup and also during CPU hotplug events).
+ */
xen_setup_cpu_clockevents();
}
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index b2b9837f9dd..e8918ffaf96 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -972,10 +972,10 @@ int blkcg_activate_policy(struct request_queue *q,
if (!new_blkg)
return -ENOMEM;
- preloaded = !radix_tree_preload(GFP_KERNEL);
-
blk_queue_bypass_start(q);
+ preloaded = !radix_tree_preload(GFP_KERNEL);
+
/*
* Make sure the root blkg exists and count the existing blkgs. As
* @q is bypassing at this point, blkg_lookup_create() can't be
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 474fcfeba66..eb8278a6419 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -24,7 +24,7 @@ acpi-y += nvs.o
# Power management related files
acpi-y += wakeup.o
acpi-y += sleep.o
-acpi-$(CONFIG_PM) += device_pm.o
+acpi-y += device_pm.o
acpi-$(CONFIG_ACPI_SLEEP) += proc.o
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
index ec7f5690031..c84ee956fa4 100644
--- a/drivers/acpi/acpica/exfldio.c
+++ b/drivers/acpi/acpica/exfldio.c
@@ -720,7 +720,19 @@ acpi_ex_extract_from_field(union acpi_operand_object *obj_desc,
if ((obj_desc->common_field.start_field_bit_offset == 0) &&
(obj_desc->common_field.bit_length == access_bit_width)) {
- status = acpi_ex_field_datum_io(obj_desc, 0, buffer, ACPI_READ);
+ if (buffer_length >= sizeof(u64)) {
+ status =
+ acpi_ex_field_datum_io(obj_desc, 0, buffer,
+ ACPI_READ);
+ } else {
+ /* Use raw_datum (u64) to handle buffers < 64 bits */
+
+ status =
+ acpi_ex_field_datum_io(obj_desc, 0, &raw_datum,
+ ACPI_READ);
+ ACPI_MEMCPY(buffer, &raw_datum, buffer_length);
+ }
+
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index dd314ef9bff..391010afe8f 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -37,68 +37,6 @@
#define _COMPONENT ACPI_POWER_COMPONENT
ACPI_MODULE_NAME("device_pm");
-static DEFINE_MUTEX(acpi_pm_notifier_lock);
-
-/**
- * acpi_add_pm_notifier - Register PM notifier for given ACPI device.
- * @adev: ACPI device to add the notifier for.
- * @context: Context information to pass to the notifier routine.
- *
- * NOTE: @adev need not be a run-wake or wakeup device to be a valid source of
- * PM wakeup events. For example, wakeup events may be generated for bridges
- * if one of the devices below the bridge is signaling wakeup, even if the
- * bridge itself doesn't have a wakeup GPE associated with it.
- */
-acpi_status acpi_add_pm_notifier(struct acpi_device *adev,
- acpi_notify_handler handler, void *context)
-{
- acpi_status status = AE_ALREADY_EXISTS;
-
- mutex_lock(&acpi_pm_notifier_lock);
-
- if (adev->wakeup.flags.notifier_present)
- goto out;
-
- status = acpi_install_notify_handler(adev->handle,
- ACPI_SYSTEM_NOTIFY,
- handler, context);
- if (ACPI_FAILURE(status))
- goto out;
-
- adev->wakeup.flags.notifier_present = true;
-
- out:
- mutex_unlock(&acpi_pm_notifier_lock);
- return status;
-}
-
-/**
- * acpi_remove_pm_notifier - Unregister PM notifier from given ACPI device.
- * @adev: ACPI device to remove the notifier from.
- */
-acpi_status acpi_remove_pm_notifier(struct acpi_device *adev,
- acpi_notify_handler handler)
-{
- acpi_status status = AE_BAD_PARAMETER;
-
- mutex_lock(&acpi_pm_notifier_lock);
-
- if (!adev->wakeup.flags.notifier_present)
- goto out;
-
- status = acpi_remove_notify_handler(adev->handle,
- ACPI_SYSTEM_NOTIFY,
- handler);
- if (ACPI_FAILURE(status))
- goto out;
-
- adev->wakeup.flags.notifier_present = false;
-
- out:
- mutex_unlock(&acpi_pm_notifier_lock);
- return status;
-}
-
/**
* acpi_power_state_string - String representation of ACPI device power state.
* @state: ACPI device power state to return the string representation of.
@@ -331,11 +269,13 @@ int acpi_bus_init_power(struct acpi_device *device)
if (result)
return result;
} else if (state == ACPI_STATE_UNKNOWN) {
- /* No power resources and missing _PSC? Try to force D0. */
+ /*
+ * No power resources and missing _PSC? Cross fingers and make
+ * it D0 in hope that this is what the BIOS put the device into.
+ * [We tried to force D0 here by executing _PS0, but that broke
+ * Toshiba P870-303 in a nasty way.]
+ */
state = ACPI_STATE_D0;
- result = acpi_dev_pm_explicit_set(device, state);
- if (result)
- return result;
}
device->power.state = state;
return 0;
@@ -376,6 +316,69 @@ bool acpi_bus_power_manageable(acpi_handle handle)
}
EXPORT_SYMBOL(acpi_bus_power_manageable);
+#ifdef CONFIG_PM
+static DEFINE_MUTEX(acpi_pm_notifier_lock);
+
+/**
+ * acpi_add_pm_notifier - Register PM notifier for given ACPI device.
+ * @adev: ACPI device to add the notifier for.
+ * @context: Context information to pass to the notifier routine.
+ *
+ * NOTE: @adev need not be a run-wake or wakeup device to be a valid source of
+ * PM wakeup events. For example, wakeup events may be generated for bridges
+ * if one of the devices below the bridge is signaling wakeup, even if the
+ * bridge itself doesn't have a wakeup GPE associated with it.
+ */
+acpi_status acpi_add_pm_notifier(struct acpi_device *adev,
+ acpi_notify_handler handler, void *context)
+{
+ acpi_status status = AE_ALREADY_EXISTS;
+
+ mutex_lock(&acpi_pm_notifier_lock);
+
+ if (adev->wakeup.flags.notifier_present)
+ goto out;
+
+ status = acpi_install_notify_handler(adev->handle,
+ ACPI_SYSTEM_NOTIFY,
+ handler, context);
+ if (ACPI_FAILURE(status))
+ goto out;
+
+ adev->wakeup.flags.notifier_present = true;
+
+ out:
+ mutex_unlock(&acpi_pm_notifier_lock);
+ return status;
+}
+
+/**
+ * acpi_remove_pm_notifier - Unregister PM notifier from given ACPI device.
+ * @adev: ACPI device to remove the notifier from.
+ */
+acpi_status acpi_remove_pm_notifier(struct acpi_device *adev,
+ acpi_notify_handler handler)
+{
+ acpi_status status = AE_BAD_PARAMETER;
+
+ mutex_lock(&acpi_pm_notifier_lock);
+
+ if (!adev->wakeup.flags.notifier_present)
+ goto out;
+
+ status = acpi_remove_notify_handler(adev->handle,
+ ACPI_SYSTEM_NOTIFY,
+ handler);
+ if (ACPI_FAILURE(status))
+ goto out;
+
+ adev->wakeup.flags.notifier_present = false;
+
+ out:
+ mutex_unlock(&acpi_pm_notifier_lock);
+ return status;
+}
+
bool acpi_bus_can_wakeup(acpi_handle handle)
{
struct acpi_device *device;
@@ -1014,3 +1017,4 @@ void acpi_dev_pm_remove_dependent(acpi_handle handle, struct device *depdev)
mutex_unlock(&adev->physical_node_lock);
}
EXPORT_SYMBOL_GPL(acpi_dev_pm_remove_dependent);
+#endif /* CONFIG_PM */
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index d45b2871d33..edc00818c80 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -223,7 +223,7 @@ static int ec_check_sci_sync(struct acpi_ec *ec, u8 state)
static int ec_poll(struct acpi_ec *ec)
{
unsigned long flags;
- int repeat = 2; /* number of command restarts */
+ int repeat = 5; /* number of command restarts */
while (repeat--) {
unsigned long delay = jiffies +
msecs_to_jiffies(ec_delay);
@@ -241,8 +241,6 @@ static int ec_poll(struct acpi_ec *ec)
}
advance_transaction(ec, acpi_ec_read_status(ec));
} while (time_before(jiffies, delay));
- if (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF)
- break;
pr_debug(PREFIX "controller reset, restart transaction\n");
spin_lock_irqsave(&ec->lock, flags);
start_transaction(ec);
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 586e7e993d3..bcb7a3b905b 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -641,7 +641,7 @@ void __init acpi_initrd_override(void *data, size_t size)
* Both memblock_reserve and e820_add_region (via arch_reserve_mem_area)
* works fine.
*/
- memblock_reserve(acpi_tables_addr, acpi_tables_addr + all_tables_size);
+ memblock_reserve(acpi_tables_addr, all_tables_size);
arch_reserve_mem_area(acpi_tables_addr, all_tables_size);
p = early_ioremap(acpi_tables_addr, all_tables_size);
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 6ae5e440436..4241b8d844e 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -201,8 +201,8 @@ static acpi_status acpi_pci_query_osc(struct acpi_pci_root *root,
*control &= OSC_PCI_CONTROL_MASKS;
capbuf[OSC_CONTROL_TYPE] = *control | root->osc_control_set;
} else {
- /* Run _OSC query for all possible controls. */
- capbuf[OSC_CONTROL_TYPE] = OSC_PCI_CONTROL_MASKS;
+ /* Run _OSC query only with existing controls. */
+ capbuf[OSC_CONTROL_TYPE] = root->osc_control_set;
}
status = acpi_pci_run_osc(root->device->handle, capbuf, &result);
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 8470771e5ea..a33821ca389 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -723,9 +723,19 @@ static int thermal_get_trend(struct thermal_zone_device *thermal,
return -EINVAL;
if (type == THERMAL_TRIP_ACTIVE) {
- /* aggressive active cooling */
- *trend = THERMAL_TREND_RAISING;
- return 0;
+ unsigned long trip_temp;
+ unsigned long temp = KELVIN_TO_MILLICELSIUS(tz->temperature,
+ tz->kelvin_offset);
+ if (thermal_get_trip_temp(thermal, trip, &trip_temp))
+ return -EINVAL;
+
+ if (temp > trip_temp) {
+ *trend = THERMAL_TREND_RAISING;
+ return 0;
+ } else {
+ /* Fall back on default trend */
+ return -EINVAL;
+ }
}
/*
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 313f959413d..81a9335092f 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -447,6 +447,22 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
DMI_MATCH(DMI_PRODUCT_NAME, "HP Folio 13 - 2000 Notebook PC"),
},
},
+ {
+ .callback = video_ignore_initial_backlight,
+ .ident = "HP Pavilion g6 Notebook PC",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion g6 Notebook PC"),
+ },
+ },
+ {
+ .callback = video_ignore_initial_backlight,
+ .ident = "HP Pavilion m4",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion m4 Notebook PC"),
+ },
+ },
{}
};
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 4ac2593234e..abcae6925ce 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -164,6 +164,14 @@ static struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "UL30VT"),
},
},
+ {
+ .callback = video_detect_force_vendor,
+ .ident = "Asus UL30A",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "UL30A"),
+ },
+ },
{ },
};
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 2f48123d74c..93cb09297c2 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -151,6 +151,7 @@ enum piix_controller_ids {
piix_pata_vmw, /* PIIX4 for VMware, spurious DMA_ERR */
ich8_sata_snb,
ich8_2port_sata_snb,
+ ich8_2port_sata_byt,
};
struct piix_map_db {
@@ -334,6 +335,9 @@ static const struct pci_device_id piix_pci_tbl[] = {
{ 0x8086, 0x8d60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
/* SATA Controller IDE (Wellsburg) */
{ 0x8086, 0x8d68, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+ /* SATA Controller IDE (BayTrail) */
+ { 0x8086, 0x0F20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_byt },
+ { 0x8086, 0x0F21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_byt },
{ } /* terminate list */
};
@@ -441,6 +445,7 @@ static const struct piix_map_db *piix_map_db_table[] = {
[tolapai_sata] = &tolapai_map_db,
[ich8_sata_snb] = &ich8_map_db,
[ich8_2port_sata_snb] = &ich8_2port_map_db,
+ [ich8_2port_sata_byt] = &ich8_2port_map_db,
};
static struct pci_bits piix_enable_bits[] = {
@@ -1254,6 +1259,16 @@ static struct ata_port_info piix_port_info[] = {
.udma_mask = ATA_UDMA6,
.port_ops = &piix_sata_ops,
},
+
+ [ich8_2port_sata_byt] =
+ {
+ .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR | PIIX_FLAG_PIO16,
+ .pio_mask = ATA_PIO4,
+ .mwdma_mask = ATA_MWDMA2,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &piix_sata_ops,
+ },
+
};
#define AHCI_PCI_BAR 5
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index 8a52dab412e..3badf1887df 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -61,7 +61,8 @@ acpi_handle ata_ap_acpi_handle(struct ata_port *ap)
if (ap->flags & ATA_FLAG_ACPI_SATA)
return NULL;
- return acpi_get_child(DEVICE_ACPI_HANDLE(ap->host->dev), ap->port_no);
+ return ap->scsi_host ?
+ DEVICE_ACPI_HANDLE(&ap->scsi_host->shost_gendev) : NULL;
}
EXPORT_SYMBOL(ata_ap_acpi_handle);
@@ -240,28 +241,15 @@ void ata_acpi_dissociate(struct ata_host *host)
}
}
-/**
- * ata_acpi_gtm - execute _GTM
- * @ap: target ATA port
- * @gtm: out parameter for _GTM result
- *
- * Evaluate _GTM and store the result in @gtm.
- *
- * LOCKING:
- * EH context.
- *
- * RETURNS:
- * 0 on success, -ENOENT if _GTM doesn't exist, -errno on failure.
- */
-int ata_acpi_gtm(struct ata_port *ap, struct ata_acpi_gtm *gtm)
+static int __ata_acpi_gtm(struct ata_port *ap, acpi_handle handle,
+ struct ata_acpi_gtm *gtm)
{
struct acpi_buffer output = { .length = ACPI_ALLOCATE_BUFFER };
union acpi_object *out_obj;
acpi_status status;
int rc = 0;
- status = acpi_evaluate_object(ata_ap_acpi_handle(ap), "_GTM", NULL,
- &output);
+ status = acpi_evaluate_object(handle, "_GTM", NULL, &output);
rc = -ENOENT;
if (status == AE_NOT_FOUND)
@@ -295,6 +283,27 @@ int ata_acpi_gtm(struct ata_port *ap, struct ata_acpi_gtm *gtm)
return rc;
}
+/**
+ * ata_acpi_gtm - execute _GTM
+ * @ap: target ATA port
+ * @gtm: out parameter for _GTM result
+ *
+ * Evaluate _GTM and store the result in @gtm.
+ *
+ * LOCKING:
+ * EH context.
+ *
+ * RETURNS:
+ * 0 on success, -ENOENT if _GTM doesn't exist, -errno on failure.
+ */
+int ata_acpi_gtm(struct ata_port *ap, struct ata_acpi_gtm *gtm)
+{
+ if (ata_ap_acpi_handle(ap))
+ return __ata_acpi_gtm(ap, ata_ap_acpi_handle(ap), gtm);
+ else
+ return -EINVAL;
+}
+
EXPORT_SYMBOL_GPL(ata_acpi_gtm);
/**
@@ -1080,7 +1089,7 @@ static int ata_acpi_bind_host(struct ata_port *ap, acpi_handle *handle)
if (!*handle)
return -ENODEV;
- if (ata_acpi_gtm(ap, &ap->__acpi_init_gtm) == 0)
+ if (__ata_acpi_gtm(ap, *handle, &ap->__acpi_init_gtm) == 0)
ap->pflags |= ATA_PFLAG_INIT_GTM_VALID;
return 0;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 63c743baf92..cf15aee0cf7 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -1602,6 +1602,12 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
qc->tf = *tf;
if (cdb)
memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
+
+ /* some SATA bridges need us to indicate data xfer direction */
+ if (tf->protocol == ATAPI_PROT_DMA && (dev->flags & ATA_DFLAG_DMADIR) &&
+ dma_dir == DMA_FROM_DEVICE)
+ qc->tf.feature |= ATAPI_DMADIR;
+
qc->flags |= ATA_QCFLAG_RESULT_TF;
qc->dma_dir = dma_dir;
if (dma_dir != DMA_NONE) {
diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c
index 5dba77ccaa0..b1a664a8f55 100644
--- a/drivers/ata/sata_highbank.c
+++ b/drivers/ata/sata_highbank.c
@@ -251,7 +251,7 @@ static const struct ata_port_info ahci_highbank_port_info = {
};
static struct scsi_host_template ahci_highbank_platform_sht = {
- AHCI_SHT("highbank-ahci"),
+ AHCI_SHT("sata_highbank"),
};
static const struct of_device_id ahci_of_match[] = {
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
index caf33f620c3..d7b77e02b10 100644
--- a/drivers/ata/sata_rcar.c
+++ b/drivers/ata/sata_rcar.c
@@ -548,6 +548,7 @@ static void sata_rcar_bmdma_start(struct ata_queued_cmd *qc)
/* start host DMA transaction */
dmactl = ioread32(priv->base + ATAPI_CONTROL1_REG);
+ dmactl &= ~ATAPI_CONTROL1_STOP;
dmactl |= ATAPI_CONTROL1_START;
iowrite32(dmactl, priv->base + ATAPI_CONTROL1_REG);
}
@@ -617,17 +618,16 @@ static struct ata_port_operations sata_rcar_port_ops = {
.bmdma_status = sata_rcar_bmdma_status,
};
-static int sata_rcar_serr_interrupt(struct ata_port *ap)
+static void sata_rcar_serr_interrupt(struct ata_port *ap)
{
struct sata_rcar_priv *priv = ap->host->private_data;
struct ata_eh_info *ehi = &ap->link.eh_info;
int freeze = 0;
- int handled = 0;
u32 serror;
serror = ioread32(priv->base + SCRSERR_REG);
if (!serror)
- return 0;
+ return;
DPRINTK("SError @host_intr: 0x%x\n", serror);
@@ -640,7 +640,6 @@ static int sata_rcar_serr_interrupt(struct ata_port *ap)
ata_ehi_push_desc(ehi, "%s", "hotplug");
freeze = serror & SERR_COMM_WAKE ? 0 : 1;
- handled = 1;
}
/* freeze or abort */
@@ -648,11 +647,9 @@ static int sata_rcar_serr_interrupt(struct ata_port *ap)
ata_port_freeze(ap);
else
ata_port_abort(ap);
-
- return handled;
}
-static int sata_rcar_ata_interrupt(struct ata_port *ap)
+static void sata_rcar_ata_interrupt(struct ata_port *ap)
{
struct ata_queued_cmd *qc;
int handled = 0;
@@ -661,7 +658,9 @@ static int sata_rcar_ata_interrupt(struct ata_port *ap)
if (qc)
handled |= ata_bmdma_port_intr(ap, qc);
- return handled;
+ /* be sure to clear ATA interrupt */
+ if (!handled)
+ sata_rcar_check_status(ap);
}
static irqreturn_t sata_rcar_interrupt(int irq, void *dev_instance)
@@ -676,20 +675,21 @@ static irqreturn_t sata_rcar_interrupt(int irq, void *dev_instance)
spin_lock_irqsave(&host->lock, flags);
sataintstat = ioread32(priv->base + SATAINTSTAT_REG);
+ sataintstat &= SATA_RCAR_INT_MASK;
if (!sataintstat)
goto done;
/* ack */
- iowrite32(sataintstat & ~SATA_RCAR_INT_MASK,
- priv->base + SATAINTSTAT_REG);
+ iowrite32(~sataintstat & 0x7ff, priv->base + SATAINTSTAT_REG);
ap = host->ports[0];
if (sataintstat & SATAINTSTAT_ATA)
- handled |= sata_rcar_ata_interrupt(ap);
+ sata_rcar_ata_interrupt(ap);
if (sataintstat & SATAINTSTAT_SERR)
- handled |= sata_rcar_serr_interrupt(ap);
+ sata_rcar_serr_interrupt(ap);
+ handled = 1;
done:
spin_unlock_irqrestore(&host->lock, flags);
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 531ceb31d0f..4e8213aa02f 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -117,13 +117,13 @@ static struct page *brd_insert_page(struct brd_device *brd, sector_t sector)
spin_lock(&brd->brd_lock);
idx = sector >> PAGE_SECTORS_SHIFT;
+ page->index = idx;
if (radix_tree_insert(&brd->brd_pages, idx, page)) {
__free_page(page);
page = radix_tree_lookup(&brd->brd_pages, idx);
BUG_ON(!page);
BUG_ON(page->index != idx);
- } else
- page->index = idx;
+ }
spin_unlock(&brd->brd_lock);
radix_tree_preload_end();
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index e98da675f0c..54d03d4ab50 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -2795,6 +2795,7 @@ void drbd_free_bc(struct drbd_backing_dev *ldev)
blkdev_put(ldev->backing_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
blkdev_put(ldev->md_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
+ kfree(ldev->disk_conf);
kfree(ldev);
}
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index a9eccfc6079..2f5fffd976c 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -2661,7 +2661,6 @@ static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
if (hg == -1 && mdev->state.role == R_PRIMARY) {
enum drbd_state_rv rv2;
- drbd_set_role(mdev, R_SECONDARY, 0);
/* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
* we might be here in C_WF_REPORT_PARAMS which is transient.
* we do not need to wait for the after state change work either. */
@@ -4659,8 +4658,8 @@ static int drbd_do_features(struct drbd_tconn *tconn)
#if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
static int drbd_do_auth(struct drbd_tconn *tconn)
{
- dev_err(DEV, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
- dev_err(DEV, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
+ conn_err(tconn, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
+ conn_err(tconn, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
return -1;
}
#else
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index 0f51ed687dc..cd4ac9f001f 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -19,4 +19,9 @@ config OMAP_INTERCONNECT
help
Driver to enable OMAP interconnect error handling driver.
+
+config ARM_CCI
+ bool "ARM CCI driver support"
+ depends on ARM
+
endmenu
diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile
index 45d997c8545..55aac809e5b 100644
--- a/drivers/bus/Makefile
+++ b/drivers/bus/Makefile
@@ -6,3 +6,5 @@ obj-$(CONFIG_OMAP_OCP2SCP) += omap-ocp2scp.o
# Interconnect bus driver for OMAP SoCs.
obj-$(CONFIG_OMAP_INTERCONNECT) += omap_l3_smx.o omap_l3_noc.o
+
+obj-$(CONFIG_ARM_CCI) += arm-cci.o
diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c
new file mode 100644
index 00000000000..eee1c5722fd
--- /dev/null
+++ b/drivers/bus/arm-cci.c
@@ -0,0 +1,510 @@
+/*
+ * ARM Cache Coherency Interconnect (CCI400) support
+ *
+ * Copyright (C) 2012-2013 ARM Ltd.
+ * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/arm-cci.h>
+
+#include <asm/cacheflush.h>
+#include <asm/memory.h>
+#include <asm/outercache.h>
+
+#include <asm/irq_regs.h>
+#include <asm/pmu.h>
+
+#define CCI_STATUS_OFFSET 0xc
+#define STATUS_CHANGE_PENDING (1 << 0)
+
+#define CCI400_PMCR 0x0100
+
+#define CCI_SLAVE_OFFSET(n) (0x1000 + 0x1000 * (n))
+#define CCI400_EAG_OFFSET CCI_SLAVE_OFFSET(3)
+#define CCI400_KF_OFFSET CCI_SLAVE_OFFSET(4)
+
+#define DRIVER_NAME "CCI"
+struct cci_drvdata {
+ void __iomem *baseaddr;
+};
+
+static struct cci_drvdata *info;
+
+#ifdef CONFIG_HW_PERF_EVENTS
+
+#define CCI400_PMU_CYCLE_CNTR_BASE 0x9000
+#define CCI400_PMU_CNTR_BASE(idx) (CCI400_PMU_CYCLE_CNTR_BASE + (idx) * 0x1000)
+
+#define CCI400_PMCR_CEN 0x00000001
+#define CCI400_PMCR_RST 0x00000002
+#define CCI400_PMCR_CCR 0x00000004
+#define CCI400_PMCR_CCD 0x00000008
+#define CCI400_PMCR_EX 0x00000010
+#define CCI400_PMCR_DP 0x00000020
+#define CCI400_PMCR_NCNT_MASK 0x0000F800
+#define CCI400_PMCR_NCNT_SHIFT 11
+
+#define CCI400_PMU_EVT_SEL 0x000
+#define CCI400_PMU_CNTR 0x004
+#define CCI400_PMU_CNTR_CTRL 0x008
+#define CCI400_PMU_OVERFLOW 0x00C
+
+#define CCI400_PMU_OVERFLOW_FLAG 1
+
+enum cci400_perf_events {
+ CCI400_PMU_CYCLES = 0xFF
+};
+
+#define CCI400_PMU_EVENT_MASK 0xff
+#define CCI400_PMU_EVENT_SOURCE(event) ((event >> 5) & 0x7)
+#define CCI400_PMU_EVENT_CODE(event) (event & 0x1f)
+
+#define CCI400_PMU_EVENT_SOURCE_S0 0
+#define CCI400_PMU_EVENT_SOURCE_S4 4
+#define CCI400_PMU_EVENT_SOURCE_M0 5
+#define CCI400_PMU_EVENT_SOURCE_M2 7
+
+#define CCI400_PMU_EVENT_SLAVE_MIN 0x0
+#define CCI400_PMU_EVENT_SLAVE_MAX 0x13
+
+#define CCI400_PMU_EVENT_MASTER_MIN 0x14
+#define CCI400_PMU_EVENT_MASTER_MAX 0x1A
+
+#define CCI400_PMU_MAX_HW_EVENTS 5 /* CCI PMU has 4 counters + 1 cycle counter */
+
+#define CCI400_PMU_CYCLE_COUNTER_IDX 0
+#define CCI400_PMU_COUNTER0_IDX 1
+#define CCI400_PMU_COUNTER_LAST(cci_pmu) (CCI400_PMU_CYCLE_COUNTER_IDX + cci_pmu->num_events - 1)
+
+
+static struct perf_event *events[CCI400_PMU_MAX_HW_EVENTS];
+static unsigned long used_mask[BITS_TO_LONGS(CCI400_PMU_MAX_HW_EVENTS)];
+static struct pmu_hw_events cci_hw_events = {
+ .events = events,
+ .used_mask = used_mask,
+};
+
+static int cci_pmu_validate_hw_event(u8 hw_event)
+{
+ u8 ev_source = CCI400_PMU_EVENT_SOURCE(hw_event);
+ u8 ev_code = CCI400_PMU_EVENT_CODE(hw_event);
+
+ if (ev_source <= CCI400_PMU_EVENT_SOURCE_S4 &&
+ ev_code <= CCI400_PMU_EVENT_SLAVE_MAX)
+ return hw_event;
+ else if (CCI400_PMU_EVENT_SOURCE_M0 <= ev_source &&
+ ev_source <= CCI400_PMU_EVENT_SOURCE_M2 &&
+ CCI400_PMU_EVENT_MASTER_MIN <= ev_code &&
+ ev_code <= CCI400_PMU_EVENT_MASTER_MAX)
+ return hw_event;
+
+ return -EINVAL;
+}
+
+static inline int cci_pmu_counter_is_valid(struct arm_pmu *cci_pmu, int idx)
+{
+ return CCI400_PMU_CYCLE_COUNTER_IDX <= idx &&
+ idx <= CCI400_PMU_COUNTER_LAST(cci_pmu);
+}
+
+static inline u32 cci_pmu_read_register(int idx, unsigned int offset)
+{
+ return readl_relaxed(info->baseaddr + CCI400_PMU_CNTR_BASE(idx) + offset);
+}
+
+static inline void cci_pmu_write_register(u32 value, int idx, unsigned int offset)
+{
+ return writel_relaxed(value, info->baseaddr + CCI400_PMU_CNTR_BASE(idx) + offset);
+}
+
+static inline void cci_pmu_disable_counter(int idx)
+{
+ cci_pmu_write_register(0, idx, CCI400_PMU_CNTR_CTRL);
+}
+
+static inline void cci_pmu_enable_counter(int idx)
+{
+ cci_pmu_write_register(1, idx, CCI400_PMU_CNTR_CTRL);
+}
+
+static inline void cci_pmu_select_event(int idx, unsigned long event)
+{
+ event &= CCI400_PMU_EVENT_MASK;
+ cci_pmu_write_register(event, idx, CCI400_PMU_EVT_SEL);
+}
+
+static u32 cci_pmu_get_max_counters(void)
+{
+ u32 n_cnts = (readl_relaxed(info->baseaddr + CCI400_PMCR) &
+ CCI400_PMCR_NCNT_MASK) >> CCI400_PMCR_NCNT_SHIFT;
+
+ /* add 1 for cycle counter */
+ return n_cnts + 1;
+}
+
+static struct pmu_hw_events *cci_pmu_get_hw_events(
+ struct arm_pmu *__always_unused pmu)
+{
+ return &cci_hw_events;
+}
+
+static int cci_pmu_get_event_idx(struct pmu_hw_events *hw, struct perf_event *event)
+{
+ struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu);
+ struct hw_perf_event *hw_event = &event->hw;
+ unsigned long cci_event = hw_event->config_base & CCI400_PMU_EVENT_MASK;
+ int idx;
+
+ if (cci_event == CCI400_PMU_CYCLES) {
+ if (test_and_set_bit(CCI400_PMU_CYCLE_COUNTER_IDX, hw->used_mask))
+ return -EAGAIN;
+
+ return CCI400_PMU_CYCLE_COUNTER_IDX;
+ }
+
+ for (idx = CCI400_PMU_COUNTER0_IDX; idx <= CCI400_PMU_COUNTER_LAST(cci_pmu); ++idx) {
+ if (!test_and_set_bit(idx, hw->used_mask))
+ return idx;
+ }
+
+ /* No counters available */
+ return -EAGAIN;
+}
+
+static int cci_pmu_map_event(struct perf_event *event)
+{
+ int mapping;
+ u8 config = event->attr.config & CCI400_PMU_EVENT_MASK;
+
+ if (event->attr.type < PERF_TYPE_MAX)
+ return -ENOENT;
+
+ /* 0xff is used to represent CCI Cycles */
+ if (config == 0xff)
+ mapping = config;
+ else
+ mapping = cci_pmu_validate_hw_event(config);
+
+ return mapping;
+}
+
+static int cci_pmu_request_irq(struct arm_pmu *cci_pmu, irq_handler_t handler)
+{
+ int irq, err, i = 0;
+ struct platform_device *pmu_device = cci_pmu->plat_device;
+
+ if (unlikely(!pmu_device))
+ return -ENODEV;
+
+ /* CCI exports 6 interrupts - 1 nERRORIRQ + 5 nEVNTCNTOVERFLOW (PMU)
+ nERRORIRQ will be handled by secure firmware on TC2. So we
+ assume that all CCI interrupts listed in the linux device
+ tree are PMU interrupts.
+
+ The following code should then be able to handle different routing
+ of the CCI PMU interrupts.
+ */
+ while ((irq = platform_get_irq(pmu_device, i)) > 0) {
+ err = request_irq(irq, handler, 0, "arm-cci-pmu", cci_pmu);
+ if (err) {
+ dev_err(&pmu_device->dev, "unable to request IRQ%d for ARM CCI PMU counters\n",
+ irq);
+ return err;
+ }
+ i++;
+ }
+
+ return 0;
+}
+
+static irqreturn_t cci_pmu_handle_irq(int irq_num, void *dev)
+{
+ struct arm_pmu *cci_pmu = (struct arm_pmu *)dev;
+ struct pmu_hw_events *events = cci_pmu->get_hw_events(cci_pmu);
+ struct perf_sample_data data;
+ struct pt_regs *regs;
+ int idx;
+
+ regs = get_irq_regs();
+
+ /* Iterate over counters and update the corresponding perf events.
+ This should work regardless of whether we have per-counter overflow
+ interrupt or a combined overflow interrupt. */
+ for (idx = CCI400_PMU_CYCLE_COUNTER_IDX; idx <= CCI400_PMU_COUNTER_LAST(cci_pmu); idx++) {
+ struct perf_event *event = events->events[idx];
+ struct hw_perf_event *hw_counter;
+
+ if (!event)
+ continue;
+
+ hw_counter = &event->hw;
+
+ /* Did this counter overflow? */
+ if (!(cci_pmu_read_register(idx, CCI400_PMU_OVERFLOW) & CCI400_PMU_OVERFLOW_FLAG))
+ continue;
+ cci_pmu_write_register(CCI400_PMU_OVERFLOW_FLAG, idx, CCI400_PMU_OVERFLOW);
+
+ armpmu_event_update(event);
+ perf_sample_data_init(&data, 0, hw_counter->last_period);
+ if (!armpmu_event_set_period(event))
+ continue;
+
+ if (perf_event_overflow(event, &data, regs))
+ cci_pmu->disable(event);
+ }
+
+ irq_work_run();
+ return IRQ_HANDLED;
+}
+
+static void cci_pmu_free_irq(struct arm_pmu *cci_pmu)
+{
+ int irq, i = 0;
+ struct platform_device *pmu_device = cci_pmu->plat_device;
+
+ while ((irq = platform_get_irq(pmu_device, i)) > 0) {
+ free_irq(irq, cci_pmu);
+ i++;
+ }
+}
+
+static void cci_pmu_enable_event(struct perf_event *event)
+{
+ unsigned long flags;
+ struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu);
+ struct pmu_hw_events *events = cci_pmu->get_hw_events(cci_pmu);
+ struct hw_perf_event *hw_counter = &event->hw;
+ int idx = hw_counter->idx;
+
+ if (unlikely(!cci_pmu_counter_is_valid(cci_pmu, idx))) {
+ dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
+ return;
+ }
+
+ raw_spin_lock_irqsave(&events->pmu_lock, flags);
+
+ /* Configure the event to count, unless you are counting cycles */
+ if (idx != CCI400_PMU_CYCLE_COUNTER_IDX)
+ cci_pmu_select_event(idx, hw_counter->config_base);
+
+ cci_pmu_enable_counter(idx);
+
+ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+}
+
+static void cci_pmu_disable_event(struct perf_event *event)
+{
+ unsigned long flags;
+ struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu);
+ struct pmu_hw_events *events = cci_pmu->get_hw_events(cci_pmu);
+ struct hw_perf_event *hw_counter = &event->hw;
+ int idx = hw_counter->idx;
+
+ if (unlikely(!cci_pmu_counter_is_valid(cci_pmu, idx))) {
+ dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
+ return;
+ }
+
+ raw_spin_lock_irqsave(&events->pmu_lock, flags);
+
+ cci_pmu_disable_counter(idx);
+
+ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+}
+
+static void cci_pmu_start(struct arm_pmu *cci_pmu)
+{
+ u32 val;
+ unsigned long flags;
+ struct cci_drvdata *info = platform_get_drvdata(cci_pmu->plat_device);
+ struct pmu_hw_events *events = cci_pmu->get_hw_events(cci_pmu);
+
+ raw_spin_lock_irqsave(&events->pmu_lock, flags);
+
+ /* Enable all the PMU counters. */
+ val = readl(info->baseaddr + CCI400_PMCR) | CCI400_PMCR_CEN;
+ writel(val, info->baseaddr + CCI400_PMCR);
+
+ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+}
+
+static void cci_pmu_stop(struct arm_pmu *cci_pmu)
+{
+ u32 val;
+ unsigned long flags;
+ struct cci_drvdata *info = platform_get_drvdata(cci_pmu->plat_device);
+ struct pmu_hw_events *events = cci_pmu->get_hw_events(cci_pmu);
+
+ raw_spin_lock_irqsave(&events->pmu_lock, flags);
+
+ /* Disable all the PMU counters. */
+ val = readl(info->baseaddr + CCI400_PMCR) & ~CCI400_PMCR_CEN;
+ writel(val, info->baseaddr + CCI400_PMCR);
+
+ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+}
+
+static u32 cci_pmu_read_counter(struct perf_event *event)
+{
+ struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu);
+ struct hw_perf_event *hw_counter = &event->hw;
+ int idx = hw_counter->idx;
+ u32 value;
+
+ if (unlikely(!cci_pmu_counter_is_valid(cci_pmu, idx))) {
+ dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
+ return 0;
+ }
+ value = cci_pmu_read_register(idx, CCI400_PMU_CNTR);
+
+ return value;
+}
+
+static void cci_pmu_write_counter(struct perf_event *event, u32 value)
+{
+ struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu);
+ struct hw_perf_event *hw_counter = &event->hw;
+ int idx = hw_counter->idx;
+
+ if (unlikely(!cci_pmu_counter_is_valid(cci_pmu, idx)))
+ dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
+ else
+ cci_pmu_write_register(value, idx, CCI400_PMU_CNTR);
+}
+
+static struct arm_pmu cci_pmu = {
+ .name = DRIVER_NAME,
+ .max_period = (1LLU << 32) - 1,
+ .get_hw_events = cci_pmu_get_hw_events,
+ .get_event_idx = cci_pmu_get_event_idx,
+ .map_event = cci_pmu_map_event,
+ .request_irq = cci_pmu_request_irq,
+ .handle_irq = cci_pmu_handle_irq,
+ .free_irq = cci_pmu_free_irq,
+ .enable = cci_pmu_enable_event,
+ .disable = cci_pmu_disable_event,
+ .start = cci_pmu_start,
+ .stop = cci_pmu_stop,
+ .read_counter = cci_pmu_read_counter,
+ .write_counter = cci_pmu_write_counter,
+};
+
+static int cci_pmu_init(struct platform_device *pdev)
+{
+ cci_pmu.plat_device = pdev;
+ cci_pmu.num_events = cci_pmu_get_max_counters();
+ raw_spin_lock_init(&cci_hw_events.pmu_lock);
+ cpumask_setall(&cci_pmu.valid_cpus);
+
+ return armpmu_register(&cci_pmu, -1);
+}
+
+#else
+
+static int cci_pmu_init(struct platform_device *pdev)
+{
+ return 0;
+}
+
+#endif /* CONFIG_HW_PERF_EVENTS */
+
+void notrace disable_cci(int cluster)
+{
+ u32 slave_reg = cluster ? CCI400_KF_OFFSET : CCI400_EAG_OFFSET;
+ writel_relaxed(0x0, info->baseaddr + slave_reg);
+
+ while (readl_relaxed(info->baseaddr + CCI_STATUS_OFFSET)
+ & STATUS_CHANGE_PENDING)
+ barrier();
+}
+EXPORT_SYMBOL_GPL(disable_cci);
+
+static int cci_driver_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ int ret = 0;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ dev_err(&pdev->dev, "unable to allocate mem\n");
+ return -ENOMEM;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "No memory resource\n");
+ ret = -EINVAL;
+ goto mem_free;
+ }
+
+ if (!request_mem_region(res->start, resource_size(res),
+ dev_name(&pdev->dev))) {
+ dev_err(&pdev->dev, "address 0x%x in use\n", (u32) res->start);
+ ret = -EBUSY;
+ goto mem_free;
+ }
+
+ info->baseaddr = ioremap(res->start, resource_size(res));
+ if (!info->baseaddr) {
+ ret = -EADDRNOTAVAIL;
+ goto ioremap_err;
+ }
+
+ /*
+ * Multi-cluster systems may need this data when non-coherent, during
+ * cluster power-up/power-down. Make sure it reaches main memory:
+ */
+ __cpuc_flush_dcache_area(info, sizeof *info);
+ __cpuc_flush_dcache_area(&info, sizeof info);
+ outer_clean_range(virt_to_phys(info), virt_to_phys(info + 1));
+ outer_clean_range(virt_to_phys(&info), virt_to_phys(&info + 1));
+
+ platform_set_drvdata(pdev, info);
+
+ if (cci_pmu_init(pdev) < 0)
+ pr_info("CCI PMU initialisation failed.\n");
+
+ pr_info("CCI loaded at %p\n", info->baseaddr);
+ return ret;
+
+ioremap_err:
+ release_region(res->start, resource_size(res));
+mem_free:
+ kfree(info);
+
+ return ret;
+}
+
+static const struct of_device_id arm_cci_matches[] = {
+ {.compatible = "arm,cci"},
+ {},
+};
+
+static struct platform_driver cci_platform_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = arm_cci_matches,
+ },
+ .probe = cci_driver_probe,
+};
+
+static int __init cci_init(void)
+{
+ return platform_driver_register(&cci_platform_driver);
+}
+
+core_initcall(cci_init);
diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c
index cdd4c09fda9..a22a7a50274 100644
--- a/drivers/char/ipmi/ipmi_bt_sm.c
+++ b/drivers/char/ipmi/ipmi_bt_sm.c
@@ -95,9 +95,9 @@ struct si_sm_data {
enum bt_states state;
unsigned char seq; /* BT sequence number */
struct si_sm_io *io;
- unsigned char write_data[IPMI_MAX_MSG_LENGTH];
+ unsigned char write_data[IPMI_MAX_MSG_LENGTH + 2]; /* +2 for memcpy */
int write_count;
- unsigned char read_data[IPMI_MAX_MSG_LENGTH];
+ unsigned char read_data[IPMI_MAX_MSG_LENGTH + 2]; /* +2 for memcpy */
int read_count;
int truncated;
long timeout; /* microseconds countdown */
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
index 9eb360ff8ca..d5a5f020810 100644
--- a/drivers/char/ipmi/ipmi_devintf.c
+++ b/drivers/char/ipmi/ipmi_devintf.c
@@ -837,13 +837,25 @@ static long compat_ipmi_ioctl(struct file *filep, unsigned int cmd,
return ipmi_ioctl(filep, cmd, arg);
}
}
+
+static long unlocked_compat_ipmi_ioctl(struct file *filep, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret;
+
+ mutex_lock(&ipmi_mutex);
+ ret = compat_ipmi_ioctl(filep, cmd, arg);
+ mutex_unlock(&ipmi_mutex);
+
+ return ret;
+}
#endif
static const struct file_operations ipmi_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = ipmi_unlocked_ioctl,
#ifdef CONFIG_COMPAT
- .compat_ioctl = compat_ipmi_ioctl,
+ .compat_ioctl = unlocked_compat_ipmi_ioctl,
#endif
.open = ipmi_open,
.release = ipmi_release,
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 32a6c576495..eccd7cc3b95 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -865,16 +865,24 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
if (r->entropy_count / 8 < min + reserved) {
nbytes = 0;
} else {
+ int entropy_count, orig;
+retry:
+ entropy_count = orig = ACCESS_ONCE(r->entropy_count);
/* If limited, never pull more than available */
- if (r->limit && nbytes + reserved >= r->entropy_count / 8)
- nbytes = r->entropy_count/8 - reserved;
-
- if (r->entropy_count / 8 >= nbytes + reserved)
- r->entropy_count -= nbytes*8;
- else
- r->entropy_count = reserved;
+ if (r->limit && nbytes + reserved >= entropy_count / 8)
+ nbytes = entropy_count/8 - reserved;
+
+ if (entropy_count / 8 >= nbytes + reserved) {
+ entropy_count -= nbytes*8;
+ if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
+ goto retry;
+ } else {
+ entropy_count = reserved;
+ if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
+ goto retry;
+ }
- if (r->entropy_count < random_write_wakeup_thresh)
+ if (entropy_count < random_write_wakeup_thresh)
wakeup_write = 1;
}
@@ -957,10 +965,23 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
{
ssize_t ret = 0, i;
__u8 tmp[EXTRACT_SIZE];
+ unsigned long flags;
/* if last_data isn't primed, we need EXTRACT_SIZE extra bytes */
- if (fips_enabled && !r->last_data_init)
- nbytes += EXTRACT_SIZE;
+ if (fips_enabled) {
+ spin_lock_irqsave(&r->lock, flags);
+ if (!r->last_data_init) {
+ r->last_data_init = true;
+ spin_unlock_irqrestore(&r->lock, flags);
+ trace_extract_entropy(r->name, EXTRACT_SIZE,
+ r->entropy_count, _RET_IP_);
+ xfer_secondary_pool(r, EXTRACT_SIZE);
+ extract_buf(r, tmp);
+ spin_lock_irqsave(&r->lock, flags);
+ memcpy(r->last_data, tmp, EXTRACT_SIZE);
+ }
+ spin_unlock_irqrestore(&r->lock, flags);
+ }
trace_extract_entropy(r->name, nbytes, r->entropy_count, _RET_IP_);
xfer_secondary_pool(r, nbytes);
@@ -970,19 +991,6 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
extract_buf(r, tmp);
if (fips_enabled) {
- unsigned long flags;
-
-
- /* prime last_data value if need be, per fips 140-2 */
- if (!r->last_data_init) {
- spin_lock_irqsave(&r->lock, flags);
- memcpy(r->last_data, tmp, EXTRACT_SIZE);
- r->last_data_init = true;
- nbytes -= EXTRACT_SIZE;
- spin_unlock_irqrestore(&r->lock, flags);
- extract_buf(r, tmp);
- }
-
spin_lock_irqsave(&r->lock, flags);
if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
panic("Hardware RNG duplicated output!\n");
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
index 0d2e82f9557..7c3b3dcbfbc 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -1337,7 +1337,7 @@ int tpm_pm_suspend(struct device *dev)
{
struct tpm_chip *chip = dev_get_drvdata(dev);
struct tpm_cmd_t cmd;
- int rc;
+ int rc, try;
u8 dummy_hash[TPM_DIGEST_SIZE] = { 0 };
@@ -1355,9 +1355,32 @@ int tpm_pm_suspend(struct device *dev)
}
/* now do the actual savestate */
- cmd.header.in = savestate_header;
- rc = transmit_cmd(chip, &cmd, SAVESTATE_RESULT_SIZE,
- "sending savestate before suspend");
+ for (try = 0; try < TPM_RETRY; try++) {
+ cmd.header.in = savestate_header;
+ rc = transmit_cmd(chip, &cmd, SAVESTATE_RESULT_SIZE, NULL);
+
+ /*
+ * If the TPM indicates that it is too busy to respond to
+ * this command then retry before giving up. It can take
+ * several seconds for this TPM to be ready.
+ *
+ * This can happen if the TPM has already been sent the
+ * SaveState command before the driver has loaded. TCG 1.2
+ * specification states that any communication after SaveState
+ * may cause the TPM to invalidate previously saved state.
+ */
+ if (rc != TPM_WARN_RETRY)
+ break;
+ msleep(TPM_TIMEOUT_RETRY);
+ }
+
+ if (rc)
+ dev_err(chip->dev,
+ "Error (%d) sending savestate before suspend\n", rc);
+ else if (try > 0)
+ dev_warn(chip->dev, "TPM savestate took %dms\n",
+ try * TPM_TIMEOUT_RETRY);
+
return rc;
}
EXPORT_SYMBOL_GPL(tpm_pm_suspend);
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 81b52015f66..0770d1d7936 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -32,10 +32,12 @@ enum tpm_const {
TPM_MINOR = 224, /* officially assigned */
TPM_BUFSIZE = 4096,
TPM_NUM_DEVICES = 256,
+ TPM_RETRY = 50, /* 5 seconds */
};
enum tpm_timeout {
TPM_TIMEOUT = 5, /* msecs */
+ TPM_TIMEOUT_RETRY = 100 /* msecs */
};
/* TPM addresses */
@@ -44,6 +46,7 @@ enum tpm_addr {
TPM_ADDR = 0x4E,
};
+#define TPM_WARN_RETRY 0x800
#define TPM_WARN_DOING_SELFTEST 0x802
#define TPM_ERR_DEACTIVATED 0x6
#define TPM_ERR_DISABLED 0x7
diff --git a/drivers/clk/versatile/Makefile b/drivers/clk/versatile/Makefile
index ec3b88fe3e6..d1359f44043 100644
--- a/drivers/clk/versatile/Makefile
+++ b/drivers/clk/versatile/Makefile
@@ -4,4 +4,4 @@ obj-$(CONFIG_ARCH_INTEGRATOR) += clk-integrator.o
obj-$(CONFIG_INTEGRATOR_IMPD1) += clk-impd1.o
obj-$(CONFIG_ARCH_REALVIEW) += clk-realview.o
obj-$(CONFIG_ARCH_VEXPRESS) += clk-vexpress.o
-obj-$(CONFIG_VEXPRESS_CONFIG) += clk-vexpress-osc.o
+obj-$(CONFIG_VEXPRESS_CONFIG) += clk-vexpress-osc.o clk-vexpress-spc.o
diff --git a/drivers/clk/versatile/clk-vexpress-spc.c b/drivers/clk/versatile/clk-vexpress-spc.c
new file mode 100644
index 00000000000..f35b70a0e68
--- /dev/null
+++ b/drivers/clk/versatile/clk-vexpress-spc.c
@@ -0,0 +1,131 @@
+/*
+ * Copyright (C) 2012 ARM Limited
+ * Copyright (C) 2012 Linaro
+ *
+ * Author: Viresh Kumar <viresh.kumar@linaro.org>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+/* SPC clock programming interface for Vexpress cpus */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/vexpress.h>
+
+struct clk_spc {
+ struct clk_hw hw;
+ spinlock_t *lock;
+ int cluster;
+};
+
+#define to_clk_spc(spc) container_of(spc, struct clk_spc, hw)
+
+static unsigned long spc_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_spc *spc = to_clk_spc(hw);
+ u32 freq;
+
+ if (vexpress_spc_get_performance(spc->cluster, &freq)) {
+ return -EIO;
+ pr_err("%s: Failed", __func__);
+ }
+
+ return freq * 1000;
+}
+
+static long spc_round_rate(struct clk_hw *hw, unsigned long drate,
+ unsigned long *parent_rate)
+{
+ return drate;
+}
+
+static int spc_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_spc *spc = to_clk_spc(hw);
+
+ return vexpress_spc_set_performance(spc->cluster, rate / 1000);
+}
+
+static struct clk_ops clk_spc_ops = {
+ .recalc_rate = spc_recalc_rate,
+ .round_rate = spc_round_rate,
+ .set_rate = spc_set_rate,
+};
+
+struct clk *vexpress_clk_register_spc(const char *name, int cluster_id)
+{
+ struct clk_init_data init;
+ struct clk_spc *spc;
+ struct clk *clk;
+
+ if (!name) {
+ pr_err("Invalid name passed");
+ return ERR_PTR(-EINVAL);
+ }
+
+ spc = kzalloc(sizeof(*spc), GFP_KERNEL);
+ if (!spc) {
+ pr_err("could not allocate spc clk\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ spc->hw.init = &init;
+ spc->cluster = cluster_id;
+
+ init.name = name;
+ init.ops = &clk_spc_ops;
+ init.flags = CLK_IS_ROOT | CLK_GET_RATE_NOCACHE;
+ init.num_parents = 0;
+
+ clk = clk_register(NULL, &spc->hw);
+ if (!IS_ERR_OR_NULL(clk))
+ return clk;
+
+ pr_err("clk register failed\n");
+ kfree(spc);
+
+ return NULL;
+}
+
+#if defined(CONFIG_OF)
+void __init vexpress_clk_of_register_spc(void)
+{
+ char name[9] = "clusterX";
+ struct device_node *node = NULL;
+ struct clk *clk;
+ const u32 *val;
+ int cluster_id = 0, len;
+
+ if (!of_find_compatible_node(NULL, NULL, "arm,spc")) {
+ pr_debug("%s: No SPC found, Exiting!!\n", __func__);
+ return;
+ }
+
+ while ((node = of_find_node_by_name(node, "cluster"))) {
+ val = of_get_property(node, "reg", &len);
+ if (val && len == 4)
+ cluster_id = be32_to_cpup(val);
+
+ name[7] = cluster_id + '0';
+ clk = vexpress_clk_register_spc(name, cluster_id);
+ if (IS_ERR(clk))
+ return;
+
+ pr_debug("Registered clock '%s'\n", name);
+ clk_register_clkdev(clk, name, NULL);
+ }
+}
+CLK_OF_DECLARE(spc, "arm,spc", vexpress_clk_of_register_spc);
+#endif
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 030ddf6dd3f..204812a5b92 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -2,6 +2,27 @@
# ARM CPU Frequency scaling drivers
#
+config ARM_BL_CPUFREQ
+ depends on EXPERIMENTAL
+ depends on BL_SWITCHER
+ tristate "Simple cpufreq interface for the ARM big.LITTLE switcher"
+ help
+ Provides a simple cpufreq interface to control the ARM
+ big.LITTLE switcher.
+
+ Refer to Documentation/cpu-freq/cpufreq-arm-bl.txt for details.
+
+ If unsure, say N.
+
+config ARM_BL_CPUFREQ_TEST
+ depends on ARM_BL_CPUFREQ
+ bool "Unit testing on cpufreq interface for the ARM big.LITTLE switcher"
+ help
+ Make tests on the cpufreq interface for the ARM big.LITTLE
+ switcher before loading it.
+
+ If unsure, say N.
+
config ARM_OMAP2PLUS_CPUFREQ
bool "TI OMAP2+"
depends on ARCH_OMAP2PLUS
@@ -113,3 +134,24 @@ config ARM_HIGHBANK_CPUFREQ
based boards.
If in doubt, say N.
+
+config ARM_BIG_LITTLE_CPUFREQ
+ tristate
+ depends on ARM_CPU_TOPOLOGY
+
+config ARM_DT_BL_CPUFREQ
+ tristate "Generic ARM big LITTLE CPUfreq driver probed via DT"
+ select ARM_BIG_LITTLE_CPUFREQ
+ depends on OF && BIG_LITTLE
+ default y
+ help
+ This enables the Generic CPUfreq driver for ARM big.LITTLE platform.
+ This gets frequency tables from DT.
+
+config ARM_VEXPRESS_BL_CPUFREQ
+ tristate "ARM Vexpress big LITTLE CPUfreq driver"
+ select ARM_BIG_LITTLE_CPUFREQ
+ depends on ARM_SPC && BIG_LITTLE
+ help
+ This enables the CPUfreq driver for ARM Vexpress big.LITTLE platform.
+ If in doubt, say N.
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index b27cc1cc592..3f0443fcf24 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -45,7 +45,8 @@ obj-$(CONFIG_X86_INTEL_PSTATE) += intel_pstate.o
##################################################################################
# ARM SoC drivers
-obj-$(CONFIG_UX500_SOC_DB8500) += dbx500-cpufreq.o
+obj-$(CONFIG_UX500_SOC_DB8500) += db8500-cpufreq.o
+obj-$(CONFIG_ARM_BL_CPUFREQ) += arm-bl-cpufreq.o
obj-$(CONFIG_ARM_S3C2416_CPUFREQ) += s3c2416-cpufreq.o
obj-$(CONFIG_ARM_S3C64XX_CPUFREQ) += s3c64xx-cpufreq.o
obj-$(CONFIG_ARM_S5PV210_CPUFREQ) += s5pv210-cpufreq.o
@@ -58,6 +59,11 @@ obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o
obj-$(CONFIG_ARM_SPEAR_CPUFREQ) += spear-cpufreq.o
obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ) += highbank-cpufreq.o
obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o
+obj-$(CONFIG_ARM_BIG_LITTLE_CPUFREQ) += arm_big_little.o
+obj-$(CONFIG_ARM_VEXPRESS_BL_CPUFREQ) += vexpress_big_little.o
+#Keep DT_BL_CPUFREQ as the last entry in all big LITTLE drivers, so that it is
+#probed last.
+obj-$(CONFIG_ARM_DT_BL_CPUFREQ) += arm_dt_big_little.o
##################################################################################
# PowerPC platform drivers
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 57a8774f0b4..bb5939bbef5 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -347,11 +347,11 @@ static u32 get_cur_val(const struct cpumask *mask)
switch (per_cpu(acfreq_data, cpumask_first(mask))->cpu_feature) {
case SYSTEM_INTEL_MSR_CAPABLE:
cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
- cmd.addr.msr.reg = MSR_IA32_PERF_STATUS;
+ cmd.addr.msr.reg = MSR_IA32_PERF_CTL;
break;
case SYSTEM_AMD_MSR_CAPABLE:
cmd.type = SYSTEM_AMD_MSR_CAPABLE;
- cmd.addr.msr.reg = MSR_AMD_PERF_STATUS;
+ cmd.addr.msr.reg = MSR_AMD_PERF_CTL;
break;
case SYSTEM_IO_CAPABLE:
cmd.type = SYSTEM_IO_CAPABLE;
diff --git a/drivers/cpufreq/arm-bl-cpufreq.c b/drivers/cpufreq/arm-bl-cpufreq.c
new file mode 100644
index 00000000000..bc633f2d7b0
--- /dev/null
+++ b/drivers/cpufreq/arm-bl-cpufreq.c
@@ -0,0 +1,270 @@
+/*
+ * arm-bl-cpufreq.c: Simple cpufreq backend for the ARM big.LITTLE switcher
+ * Copyright (C) 2012 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#define MODULE_NAME "arm-bl-cpufreq"
+#define __module_pr_fmt(prefix, fmt) MODULE_NAME ": " prefix fmt
+#define pr_fmt(fmt) __module_pr_fmt("", fmt)
+
+#include <linux/bug.h>
+#include <linux/cache.h>
+#include <linux/cpufreq.h>
+#include <linux/cpumask.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/string.h>
+
+#include <asm/bL_switcher.h>
+
+#include "arm-bl-cpufreq.h"
+
+/*
+ * Include tests prototypes and includes
+ * We need to include this file a second time with ARM_BL_CPUFREQ_DEFINE_TESTS
+ * defined to include functions body.
+ */
+#include "arm-bl-cpufreq_tests.c"
+
+#define ARM_BL_CPUFREQ_DEFINE_TESTS
+#include "arm-bl-cpufreq_tests.c"
+
+/* Dummy frequencies representing the big and little clusters: */
+#define FREQ_BIG 1000000
+#define FREQ_LITTLE 100000
+
+/* Cluster numbers */
+#define CLUSTER_BIG 0
+#define CLUSTER_LITTLE 1
+
+/* Miscellaneous helpers */
+
+static unsigned int entry_to_freq(
+ struct cpufreq_frequency_table const *entry)
+{
+ return entry->frequency;
+}
+
+static unsigned int entry_to_cluster(
+ struct cpufreq_frequency_table const *entry)
+{
+ return entry->index;
+}
+
+static struct cpufreq_frequency_table const *find_entry_by_cluster(int cluster)
+{
+ unsigned int i;
+
+ for(i = 0; entry_to_freq(&bl_freqs[i]) != CPUFREQ_TABLE_END; i++)
+ if(entry_to_cluster(&bl_freqs[i]) == cluster)
+ return &bl_freqs[i];
+
+ WARN(1, pr_fmt("%s(): invalid cluster number %d, assuming 0\n"),
+ __func__, cluster);
+ return &bl_freqs[0];
+}
+
+static unsigned int cluster_to_freq(int cluster)
+{
+ return entry_to_freq(find_entry_by_cluster(cluster));
+}
+
+/*
+ * Functions to get the current status.
+ *
+ * Beware that the cluster for another CPU may change unexpectedly.
+ */
+
+static unsigned int get_local_cluster(void)
+{
+ unsigned int mpidr;
+ asm ("mrc\tp15, 0, %0, c0, c0, 5" : "=r" (mpidr));
+ return MPIDR_AFFINITY_LEVEL(mpidr, 1);
+}
+
+static void __get_current_cluster(void *_data)
+{
+ unsigned int *_cluster = _data;
+ *_cluster = get_local_cluster();
+}
+
+static int get_current_cluster(unsigned int cpu)
+{
+ unsigned int cluster = 0;
+ smp_call_function_single(cpu, __get_current_cluster, &cluster, 1);
+ return cluster;
+}
+
+static int get_current_cached_cluster(unsigned int cpu)
+{
+ return per_cpu(cpu_cur_cluster, cpu);
+}
+
+static unsigned int get_current_freq(unsigned int cpu)
+{
+ return cluster_to_freq(get_current_cluster(cpu));
+}
+
+/*
+ * Switch to the requested cluster.
+ */
+static void switch_to_entry(unsigned int cpu,
+ struct cpufreq_frequency_table const *target)
+{
+ int old_cluster, new_cluster;
+ struct cpufreq_freqs freqs;
+
+ old_cluster = get_current_cached_cluster(cpu);
+ new_cluster = entry_to_cluster(target);
+
+ pr_debug("Switching to cluster %d on CPU %d\n", new_cluster, cpu);
+
+ if(new_cluster == old_cluster)
+ return;
+
+ freqs.cpu = cpu;
+ freqs.old = cluster_to_freq(old_cluster);
+ freqs.new = entry_to_freq(target);
+
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+ bL_switch_request(cpu, new_cluster);
+ per_cpu(cpu_cur_cluster, cpu) = new_cluster;
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+}
+
+
+/* Cpufreq methods and module code */
+
+static int bl_cpufreq_init(struct cpufreq_policy *policy)
+{
+ unsigned int cluster, cpu = policy->cpu;
+ int err;
+
+ /*
+ * Set CPU and policy min and max frequencies based on bl_freqs:
+ */
+ err = cpufreq_frequency_table_cpuinfo(policy, bl_freqs);
+ if (err)
+ goto error;
+ /*
+ * Publish frequency table so that it is available to governors
+ * and sysfs:
+ */
+ cpufreq_frequency_table_get_attr(bl_freqs, policy->cpu);
+
+ cluster = get_current_cluster(cpu);
+ per_cpu(cpu_cur_cluster, cpu) = cluster;
+
+ /*
+ * Ideally, transition_latency should be calibrated here.
+ */
+ policy->cpuinfo.transition_latency = BL_CPUFREQ_FAKE_LATENCY;
+ policy->cur = cluster_to_freq(cluster);
+ policy->shared_type = CPUFREQ_SHARED_TYPE_NONE;
+
+ pr_info("cpufreq initialised successfully\n");
+ return 0;
+
+error:
+ pr_warning("cpufreq initialisation failed (%d)\n", err);
+ return err;
+}
+
+static int bl_cpufreq_verify(struct cpufreq_policy *policy)
+{
+ return cpufreq_frequency_table_verify(policy, bl_freqs);
+}
+
+static int bl_cpufreq_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ int err;
+ int index;
+
+ err = cpufreq_frequency_table_target(policy, bl_freqs, target_freq,
+ relation, &index);
+ if(err)
+ return err;
+
+ switch_to_entry(policy->cpu, &bl_freqs[index]);
+ return 0;
+}
+
+static unsigned int bl_cpufreq_get(unsigned int cpu)
+{
+ return get_current_freq(cpu);
+}
+
+static struct freq_attr *bl_cpufreq_attrs[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ NULL
+};
+
+static struct cpufreq_driver __read_mostly bl_cpufreq_driver = {
+ .owner = THIS_MODULE,
+ .name = MODULE_NAME,
+
+ .init = bl_cpufreq_init,
+ .verify = bl_cpufreq_verify,
+ .target = bl_cpufreq_target,
+ .get = bl_cpufreq_get,
+ .attr = bl_cpufreq_attrs,
+ /* what else? */
+};
+
+static int __init bl_cpufreq_module_init(void)
+{
+ int err;
+
+ /* test_config :
+ * - 0: Do not run tests
+ * - 1: Run tests and then register cpufreq driver if tests passed
+ */
+ if ((test_config > 0) && (pre_init_tests() != 0))
+ return -EINVAL;
+
+ err = cpufreq_register_driver(&bl_cpufreq_driver);
+ if(err)
+ pr_info("cpufreq backend driver registration failed (%d)\n",
+ err);
+ else {
+ pr_info("cpufreq backend driver registered.\n");
+
+ if ((test_config > 0) && (post_init_tests() != 0)) {
+ cpufreq_unregister_driver(&bl_cpufreq_driver);
+ return -EINVAL;
+ }
+ }
+
+ return err;
+}
+module_init(bl_cpufreq_module_init);
+
+static void __exit bl_cpufreq_module_exit(void)
+{
+ cpufreq_unregister_driver(&bl_cpufreq_driver);
+ pr_info("cpufreq backend driver unloaded.\n");
+}
+module_exit(bl_cpufreq_module_exit);
+
+
+MODULE_AUTHOR("Dave Martin");
+MODULE_DESCRIPTION("Simple cpufreq interface for the ARM big.LITTLE switcher");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/arm-bl-cpufreq.h b/drivers/cpufreq/arm-bl-cpufreq.h
new file mode 100644
index 00000000000..b13bb8c543d
--- /dev/null
+++ b/drivers/cpufreq/arm-bl-cpufreq.h
@@ -0,0 +1,37 @@
+#ifndef ARM_BL_CPUFREQ_H
+#define ARM_BL_CPUFREQ_H
+
+/* Dummy frequencies representing the big and little clusters: */
+#define FREQ_BIG 1000000
+#define FREQ_LITTLE 100000
+
+/* Cluster numbers */
+#define CLUSTER_BIG 0
+#define CLUSTER_LITTLE 1
+
+/*
+ * Switch latency advertised to cpufreq. This value is bogus and will
+ * need to be properly calibrated when running on real hardware.
+ */
+#define BL_CPUFREQ_FAKE_LATENCY 1
+
+static struct cpufreq_frequency_table __read_mostly bl_freqs[] = {
+ { CLUSTER_BIG, FREQ_BIG },
+ { CLUSTER_LITTLE, FREQ_LITTLE },
+ { 0, CPUFREQ_TABLE_END },
+};
+
+/* Cached current cluster for each CPU to save on IPIs */
+static DEFINE_PER_CPU(unsigned int, cpu_cur_cluster);
+
+static unsigned int entry_to_freq(struct cpufreq_frequency_table const *entry);
+static unsigned int entry_to_cluster(
+ struct cpufreq_frequency_table const *entry);
+static struct cpufreq_frequency_table const *find_entry_by_cluster(int cluster);
+static unsigned int cluster_to_freq(int cluster);
+static int get_current_cluster(unsigned int cpu);
+static int get_current_cached_cluster(unsigned int cpu);
+static unsigned int get_current_freq(unsigned int cpu);
+static unsigned int bl_cpufreq_get(unsigned int cpu);
+
+#endif /* ! ARM_BL_CPUFREQ_H */
diff --git a/drivers/cpufreq/arm-bl-cpufreq_tests.c b/drivers/cpufreq/arm-bl-cpufreq_tests.c
new file mode 100644
index 00000000000..da349e165f4
--- /dev/null
+++ b/drivers/cpufreq/arm-bl-cpufreq_tests.c
@@ -0,0 +1,652 @@
+/*
+ * arm-bl-cpufreqtests.c: Unit tests on the simple cpufreq backend for the
+ * ARM big.LITTLE switcher
+ * Copyright (C) 2012 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef ARM_BL_CPUFREQ_DEFINE_TESTS
+#include <linux/cpufreq.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+
+#include "arm-bl-cpufreq.h"
+
+static short int test_config;
+
+static int pre_init_tests(void);
+static int post_init_tests(void);
+
+#else /* ! ARM_BL_CPUFREQ_DEFINE_TESTS */
+
+#ifdef CONFIG_ARM_BL_CPUFREQ_TEST
+
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+#define pr_fmt(fmt) __module_pr_fmt("[test] ", fmt)
+
+#define SWITCH_DELAY 10
+#define SWITCH_TRANSITION_DELAY 200
+#define POST_INIT_TESTS_DELAY 100
+
+static DECLARE_WAIT_QUEUE_HEAD(test_wq);
+static int test_transition_count;
+unsigned int test_transition_freq;
+
+module_param(test_config, short, 1);
+MODULE_PARM_DESC(test_config, "Make tests before registering cpufreq driver. (0 : no tests, 1 : tests and registering driver (default))");
+
+static struct cpufreq_frequency_table const *get_other_entry(
+ struct cpufreq_frequency_table const *entry)
+{
+ if (entry_to_cluster(entry) == CLUSTER_BIG)
+ return find_entry_by_cluster(CLUSTER_LITTLE);
+ else
+ return find_entry_by_cluster(CLUSTER_BIG);
+}
+
+static int test_cpufreq_frequency_table(void)
+{
+ int nTest = 0, failCount = 0, testResult = 0;
+ struct cpufreq_frequency_table const *entry;
+
+ /* Get big and little cpufreq_frequency_table entries and check
+ * entry_to_freq() and entry_to_cluster() return corresponding
+ * frequencies and cluster id.
+ */
+ entry = find_entry_by_cluster(CLUSTER_BIG);
+
+ ++nTest;
+ if (entry_to_freq(entry) != FREQ_BIG) {
+ testResult = 0;
+ ++failCount;
+ } else
+ testResult = 1;
+ pr_info("name=pre-init/frequency_table/%d:entry_to_freq(big) result=%s\n",
+ nTest, (testResult ? "PASS" : "FAIL"));
+
+ ++nTest;
+ if (entry_to_cluster(entry) != CLUSTER_BIG) {
+ testResult = 0;
+ ++failCount;
+ } else
+ testResult = 1;
+ pr_info("name=pre-init/frequency_table/%d:entry_to_cluster(big) result=%s\n",
+ nTest, (testResult ? "PASS" : "FAIL"));
+
+ entry = find_entry_by_cluster(CLUSTER_LITTLE);
+
+ ++nTest;
+ if (entry_to_freq(entry) != FREQ_LITTLE) {
+ testResult = 0;
+ ++failCount;
+ } else
+ testResult = 1;
+ pr_info("name=pre-init/frequency_table/%d:entry_to_freq(little) result=%s\n",
+ nTest, (testResult ? "PASS" : "FAIL"));
+
+ ++nTest;
+ if (entry_to_cluster(entry) != CLUSTER_LITTLE) {
+ testResult = 0;
+ ++failCount;
+ } else
+ testResult = 1;
+ pr_info("name=pre-init/frequency_table/%d:entry_to_cluster(little) result=%s\n",
+ nTest, (testResult ? "PASS" : "FAIL"));
+
+ pr_info("name=pre-init/frequency_table run=%d result=%s pass=%d fail=%d\n",
+ nTest, (failCount == 0 ? "PASS" : "FAIL"),
+ (nTest - failCount), failCount);
+ if (failCount != 0)
+ return -1;
+
+ return 0;
+}
+
+static int test_cluster_to_freq(void)
+{
+ int nTest = 0, failCount = 0, testResult = 0;
+
+ /* Check if test_cluster_to_freq() result is consistent, ie :
+ * - CLUSTER_BIG => FREQ_BIG
+ * - CLUSTER_LITTLE => FREQ_LITTLE
+ */
+ ++nTest;
+ if (cluster_to_freq(CLUSTER_BIG) != FREQ_BIG) {
+ testResult = 0;
+ ++failCount;
+ } else
+ testResult = 1;
+ pr_info("name=pre-init/cluster_to_freq/%d:cluster_to_freq(big) result=%s\n",
+ nTest, (testResult ? "PASS" : "FAIL"));
+
+ ++nTest;
+ if (cluster_to_freq(CLUSTER_LITTLE) != FREQ_LITTLE) {
+ testResult = 0;
+ ++failCount;
+ } else
+ testResult = 1;
+ pr_info("name=pre-init/cluster_to_freq/%d:cluster_to_freq(little) result=%s\n",
+ nTest, (testResult ? "PASS" : "FAIL"));
+
+ pr_info("name=pre-init/cluster_to_freq run=%d result=%s pass=%d fail=%d\n",
+ nTest, (failCount == 0 ? "PASS" : "FAIL"),
+ (nTest - failCount), failCount);
+ if (failCount != 0)
+ return -1;
+
+ return 0;
+}
+
+static int test_get_current_cluster(void)
+{
+ int nTest = 0, failCount = 0, testResult = 0;
+ unsigned int cluster, cpu;
+
+ /* Check if get_current_cluster() return a consistent value, ie
+ * CLUSTER_BIG or CLUSTER_LITTLE
+ */
+ for_each_cpu(cpu, cpu_present_mask) {
+ cluster = get_current_cluster(cpu);
+ ++nTest;
+ if ((cluster != CLUSTER_BIG) && (cluster != CLUSTER_LITTLE)) {
+ testResult = 0;
+ ++failCount;
+ } else
+ testResult = 1;
+ pr_info("name=pre-init/get_current_cluster/%d:get_current_cluster(%u) result=%s\n",
+ nTest, cpu, (testResult ? "PASS" : "FAIL"));
+ }
+
+ pr_info("name=pre-init/get_current_cluster run=%d result=%s pass=%d fail=%d\n",
+ nTest, (failCount == 0 ? "PASS" : "FAIL"),
+ (nTest - failCount), failCount);
+ if (failCount != 0)
+ return -1;
+
+ return 0;
+}
+
+static int test_bl_cpufreq_get(void)
+{
+ int nTest = 0, failCount = 0, testResult = 0;
+ unsigned int cpu;
+ struct cpufreq_frequency_table const *other_entry = NULL;
+ struct cpufreq_frequency_table const *origin_entry = NULL;
+ struct cpufreq_policy *policy = NULL;
+
+ /*
+ * Check bl_cpufreq_get() return value : for all cores value has to be
+ * the frequency of origin_entry
+ */
+ for_each_cpu(cpu, cpu_present_mask) {
+ policy = cpufreq_cpu_get(cpu);
+ origin_entry = find_entry_by_cluster(get_current_cluster(cpu));
+ other_entry = get_other_entry(origin_entry);
+
+ ++nTest;
+ if (bl_cpufreq_get(cpu) != entry_to_freq(origin_entry)) {
+ testResult = 0;
+ ++failCount;
+ } else
+ testResult = 1;
+ pr_info("name=post-init/bl_cpufreq_get/%d:origin(%u) result=%s\n",
+ nTest, cpu, (testResult ? "PASS" : "FAIL"));
+
+ /*
+ * Switch to "other" cluster, ie cluster not used at module
+ * loading time
+ */
+ cpufreq_driver_target(policy, entry_to_freq(other_entry),
+ CPUFREQ_RELATION_H);
+
+ ++nTest;
+ if (bl_cpufreq_get(cpu) != entry_to_freq(other_entry)) {
+ testResult = 0;
+ ++failCount;
+ } else
+ testResult = 1;
+ pr_info("name=post-init/bl_cpufreq_get/%d:other(%u) result=%s\n",
+ nTest, cpu, (testResult ? "PASS" : "FAIL"));
+
+ /*
+ * Switch back to "origin" cluster, ie cluster used at module
+ * loading time
+ */
+ cpufreq_driver_target(policy, entry_to_freq(origin_entry),
+ CPUFREQ_RELATION_H);
+ cpufreq_cpu_put(policy);
+ }
+
+ pr_info("name=post-init/bl_cpufreq_get run=%d result=%s pass=%d fail=%d\n",
+ nTest, (failCount == 0 ? "PASS" : "FAIL"),
+ (nTest - failCount), failCount);
+ if (failCount != 0)
+ return -1;
+
+ return 0;
+}
+
+static int test_get_current_freq(void)
+{
+ int nTest = 0, failCount = 0, testResult = 0;
+ unsigned int cpu;
+ struct cpufreq_frequency_table const *other_entry = NULL;
+ struct cpufreq_frequency_table const *origin_entry = NULL;
+ struct cpufreq_policy *policy = NULL;
+
+ /*
+ * Check if get_current_freq() return a consistent value, ie
+ * FREQ_BIG while on big cluster and FREQ_LITTLE on little cluster
+ */
+ for_each_cpu(cpu, cpu_present_mask) {
+ policy = cpufreq_cpu_get(cpu);
+ origin_entry = find_entry_by_cluster(get_current_cluster(cpu));
+ other_entry = get_other_entry(origin_entry);
+
+ ++nTest;
+ if (get_current_freq(cpu) != entry_to_freq(origin_entry)) {
+ testResult = 0;
+ ++failCount;
+ } else
+ testResult = 1;
+ pr_info("name=post-init/get_current_freq/%d:origin(%u) result=%s\n",
+ nTest, cpu, (testResult ? "PASS" : "FAIL"));
+
+ /*
+ * Switch to "other" cluster, ie cluster not used at module
+ * loading time
+ */
+ cpufreq_driver_target(policy, entry_to_freq(other_entry),
+ CPUFREQ_RELATION_H);
+
+ ++nTest;
+ if (get_current_freq(cpu) != entry_to_freq(other_entry)) {
+ testResult = 0;
+ ++failCount;
+ } else
+ testResult = 1;
+ pr_info("name=post-init/get_current_freq/%d:other(%u) result=%s\n",
+ nTest, cpu, (testResult ? "PASS" : "FAIL"));
+
+ /*
+ * Switch back to "origin" cluster, ie cluster used at module
+ * loading time
+ */
+ cpufreq_driver_target(policy, entry_to_freq(origin_entry),
+ CPUFREQ_RELATION_H);
+ cpufreq_cpu_put(policy);
+ }
+
+ pr_info("name=post-init/get_current_freq run=%d result=%s pass=%d fail=%d\n",
+ nTest, (failCount == 0 ? "PASS" : "FAIL"),
+ (nTest - failCount), failCount);
+ if (failCount != 0)
+ return -1;
+
+ return 0;
+}
+
+static int test_get_current_cached_cluster(void)
+{
+ int nTest = 0, failCount = 0, testResult = 0;
+ unsigned int cpu, cluster;
+ struct cpufreq_frequency_table const *other_entry = NULL;
+ struct cpufreq_frequency_table const *origin_entry = NULL;
+ struct cpufreq_policy *policy = NULL;
+
+ /*
+ * Check if get_current_cached_cluster() return a consistent value, ie
+ * CLUSTER_BIG while on big cluster and CLUSTER_LITTLE on little cluster
+ */
+ for_each_cpu(cpu, cpu_present_mask) {
+ policy = cpufreq_cpu_get(cpu);
+ origin_entry = find_entry_by_cluster(get_current_cluster(cpu));
+ other_entry = get_other_entry(origin_entry);
+
+ ++nTest;
+ cluster = get_current_cached_cluster(cpu);
+ if (cluster != entry_to_cluster(origin_entry)) {
+ testResult = 0;
+ ++failCount;
+ } else
+ testResult = 1;
+ pr_info("name=post-init/get_current_cached_cluster/%d:origin(%u) result=%s\n",
+ nTest, cpu, (testResult ? "PASS" : "FAIL"));
+
+ /*
+ * Switch to "other" cluster, ie cluster not used at module
+ * loading time
+ */
+ cpufreq_driver_target(policy, entry_to_freq(other_entry),
+ CPUFREQ_RELATION_H);
+
+ ++nTest;
+ cluster = get_current_cached_cluster(cpu);
+ if (cluster != entry_to_cluster(other_entry)) {
+ testResult = 0;
+ ++failCount;
+ } else
+ testResult = 1;
+ pr_info("name=post-init/get_current_cached_cluster/%d:other(%u) result=%s\n",
+ nTest, cpu, (testResult ? "PASS" : "FAIL"));
+
+ /*
+ * Switch back to "origin" cluster, ie cluster used at module
+ * loading time
+ */
+ cpufreq_driver_target(policy, entry_to_freq(origin_entry),
+ CPUFREQ_RELATION_H);
+ cpufreq_cpu_put(policy);
+ }
+
+ pr_info("name=post-init/get_current_cached_cluster run=%d result=%s pass=%d fail=%d\n",
+ nTest, (failCount == 0 ? "PASS" : "FAIL"),
+ (nTest - failCount), failCount);
+ if (failCount != 0)
+ return -1;
+
+ return 0;
+}
+
+static int test_cpufreq_driver_target(void)
+{
+ int nTest = 0, failCount = 0, testResult = 0;
+ unsigned int cpu;
+ struct cpufreq_frequency_table const *other_entry = NULL;
+ struct cpufreq_frequency_table const *origin_entry = NULL;
+ struct cpufreq_policy *policy = NULL;
+
+ /*
+ * Try to switch between cluster and check if switch was performed with
+ * success
+ */
+ for_each_cpu(cpu, cpu_present_mask) {
+ policy = cpufreq_cpu_get(cpu);
+ origin_entry = find_entry_by_cluster(get_current_cluster(cpu));
+ other_entry = get_other_entry(origin_entry);
+
+ /* Switch to "other" cluster, ie cluster not used at module
+ * loading time
+ */
+ cpufreq_driver_target(policy, entry_to_freq(other_entry),
+ CPUFREQ_RELATION_H);
+
+ /*
+ * Give the hardware some time to switch between clusters
+ */
+ mdelay(SWITCH_DELAY);
+
+ ++nTest;
+ if (get_current_cluster(cpu) != entry_to_cluster(other_entry)) {
+ testResult = 0;
+ ++failCount;
+ } else
+ testResult = 1;
+ pr_info("name=post-init/cpufreq_driver_target/%d:other(%u) result=%s\n",
+ nTest, cpu, (testResult ? "PASS" : "FAIL"));
+
+ /* Switch again to "other" cluster
+ */
+ cpufreq_driver_target(policy, entry_to_freq(other_entry),
+ CPUFREQ_RELATION_H);
+ /*
+ * Give the hardware some time to switch between clusters
+ */
+ mdelay(SWITCH_DELAY);
+
+ ++nTest;
+ if (get_current_cluster(cpu) != entry_to_cluster(other_entry)) {
+ testResult = 0;
+ ++failCount;
+ } else
+ testResult = 1;
+ pr_info("name=post-init/cpufreq_driver_target/%d:otherAgain(%u) result=%s\n",
+ nTest, cpu, (testResult ? "PASS" : "FAIL"));
+
+ /* Switch back to "origin" cluster, ie cluster used at module loading
+ * time
+ */
+ cpufreq_driver_target(policy, entry_to_freq(origin_entry),
+ CPUFREQ_RELATION_H);
+ /*
+ * Give the hardware some time to switch between clusters
+ */
+ mdelay(SWITCH_DELAY);
+
+ ++nTest;
+ if (get_current_cluster(cpu) != entry_to_cluster(origin_entry))
+ {
+ testResult = 0;
+ ++failCount;
+ } else
+ testResult = 1;
+ pr_info("name=post-init/cpufreq_driver_target/%d:origin(%u) result=%s\n",
+ nTest, cpu, (testResult ? "PASS" : "FAIL"));
+
+ /* Switch again to "origin" cluster
+ */
+ cpufreq_driver_target(policy, entry_to_freq(origin_entry),
+ CPUFREQ_RELATION_H);
+ /*
+ * Give the hardware some time to switch between clusters
+ */
+ mdelay(SWITCH_DELAY);
+
+ ++nTest;
+ if (get_current_cluster(cpu) != entry_to_cluster(origin_entry))
+ {
+ testResult = 0;
+ ++failCount;
+ } else
+ testResult = 1;
+ pr_info("name=post-init/cpufreq_driver_target/%d:originAgain(%u) result=%s\n",
+ nTest, cpu, (testResult ? "PASS" : "FAIL"));
+
+ cpufreq_cpu_put(policy);
+ }
+
+ pr_info("name=post-init/cpufreq_driver_target run=%d result=%s pass=%d fail=%d\n",
+ nTest, (failCount == 0 ? "PASS" : "FAIL"),
+ (nTest - failCount), failCount);
+ if (failCount != 0)
+ return -1;
+
+ return 0;
+}
+
+/* Check that new frequency is expected frequency, increment count and wake up
+ * test function.
+ */
+static int test_arm_bl_cpufreq_notifier(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ struct cpufreq_freqs *freq = data;
+
+ if (freq->new != test_transition_freq)
+ test_transition_freq = -1;
+
+ ++test_transition_count;
+
+ wake_up(&test_wq);
+
+ return 0;
+}
+static struct notifier_block test_arm_bl_cpufreq_notifier_block = {
+ .notifier_call = test_arm_bl_cpufreq_notifier
+};
+
+static int test_transitions(void)
+{
+ int nTest = 0, failCount = 0, testResult = 0;
+ unsigned int cpu, origin_freq, other_freq;
+ struct cpufreq_frequency_table const *other_entry = NULL;
+ struct cpufreq_frequency_table const *origin_entry = NULL;
+ struct cpufreq_policy *policy = NULL;
+
+ /*
+ * register test_arm_bl_cpufreq_notifier_block as notifier :
+ * test_arm_bl_cpufreq_notifier_block will be called on cluster
+ * change and increment transition_count
+ */
+ cpufreq_register_notifier(&test_arm_bl_cpufreq_notifier_block,
+ CPUFREQ_TRANSITION_NOTIFIER);
+
+ /*
+ * Switch between cluster and check if notifications are received
+ */
+ for_each_cpu(cpu, cpu_present_mask) {
+ policy = cpufreq_cpu_get(cpu);
+ origin_entry = find_entry_by_cluster(get_current_cluster(cpu));
+ other_entry = get_other_entry(origin_entry);
+ origin_freq = entry_to_freq(origin_entry);
+ other_freq = entry_to_freq(other_entry);
+
+ /* Switch on little cluster and check notification
+ */
+ ++nTest;
+ test_transition_count = 0;
+ test_transition_freq = other_freq;
+ cpufreq_driver_target(policy, other_freq, CPUFREQ_RELATION_H);
+ wait_event_timeout(test_wq, (test_transition_count == 2),
+ msecs_to_jiffies(SWITCH_TRANSITION_DELAY));
+
+ if ((test_transition_count != 2)
+ || (test_transition_freq != other_freq)) {
+ testResult = 0;
+ ++failCount;
+ } else
+ testResult = 1;
+ pr_info("name=post-init/transitions/%d:other(%u) result=%s\n",
+ nTest, cpu, (testResult ? "PASS" : "FAIL"));
+
+ /* Switch on big cluster and check notification
+ */
+ ++nTest;
+ test_transition_count = 0;
+ test_transition_freq = origin_freq;
+ cpufreq_driver_target(policy, origin_freq, CPUFREQ_RELATION_H);
+ wait_event_timeout(test_wq, (test_transition_count == 2),
+ msecs_to_jiffies(SWITCH_TRANSITION_DELAY));
+
+ if ((test_transition_count != 2)
+ || (test_transition_freq != origin_freq)) {
+ testResult = 0;
+ ++failCount;
+ } else
+ testResult = 1;
+ pr_info("name=post-init/transitions/%d:origin(%u) result=%s\n",
+ nTest, cpu, (testResult ? "PASS" : "FAIL"));
+
+ cpufreq_cpu_put(policy);
+ }
+
+ cpufreq_unregister_notifier(&test_arm_bl_cpufreq_notifier_block,
+ CPUFREQ_TRANSITION_NOTIFIER);
+
+ pr_info("name=post-init/transitions run=%d result=%s pass=%d fail=%d\n",
+ nTest, (failCount == 0 ? "PASS" : "FAIL"),
+ (nTest - failCount), failCount);
+ if (failCount != 0)
+ return -1;
+
+ return 0;
+}
+
+static int pre_init_tests(void)
+{
+ int nTest = 0, failCount = 0;
+
+ pr_info("Begin pre-init tests");
+
+ ++nTest;
+ if (test_cpufreq_frequency_table() < 0)
+ ++failCount;
+
+ ++nTest;
+ if (test_cluster_to_freq() < 0)
+ ++failCount;
+
+ ++nTest;
+ if (test_get_current_cluster() < 0)
+ ++failCount;
+
+ pr_info("name=pre-init run=%d result=%s pass=%d fail=%d\n",
+ nTest, (failCount == 0 ? "PASS" : "FAIL"),
+ (nTest - failCount), failCount);
+ if (failCount != 0)
+ return -1;
+
+ return 0;
+}
+
+static int post_init_tests(void)
+{
+ /*
+ * Run all post-init tests
+ *
+ * We wait POST_INIT_TESTS_DELAY ms between tests to be sure system is
+ * in a stable state before running a new test.
+ */
+ int nTest = 0, failCount = 0;
+
+
+ mdelay(POST_INIT_TESTS_DELAY);
+ ++nTest;
+ if (test_cpufreq_driver_target() < 0)
+ ++failCount;
+
+ mdelay(POST_INIT_TESTS_DELAY);
+ ++nTest;
+ if (test_transitions() < 0)
+ ++failCount;
+
+ mdelay(POST_INIT_TESTS_DELAY);
+ ++nTest;
+ if (test_get_current_freq() < 0)
+ ++failCount;
+
+ mdelay(POST_INIT_TESTS_DELAY);
+ ++nTest;
+ if (test_bl_cpufreq_get() < 0)
+ ++failCount;
+
+ mdelay(POST_INIT_TESTS_DELAY);
+ ++nTest;
+ if (test_get_current_cached_cluster() < 0)
+ ++failCount;
+
+ pr_info("name=post-init run=%d result=%s pass=%d fail=%d\n",
+ nTest, (failCount == 0 ? "PASS" : "FAIL"),
+ (nTest - failCount), failCount);
+ if (failCount != 0)
+ return -1;
+
+ return 0;
+}
+
+#undef pr_fmt
+#define pr_fmt(fmt) __module_pr_fmt("", fmt)
+#else /* ! CONFIG_ARM_BL_CPUFREQ_TEST */
+
+static int pre_init_tests(void) { return 0; }
+static int post_init_tests(void) { return 0; }
+
+#endif /* CONFIG_ARM_BL_CPUFREQ_TEST */
+#endif /* ARM_BL_CPUFREQ_DEFINE_TESTS */
diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c
new file mode 100644
index 00000000000..7858957b9e6
--- /dev/null
+++ b/drivers/cpufreq/arm_big_little.c
@@ -0,0 +1,593 @@
+/*
+ * ARM big.LITTLE Platforms CPUFreq support
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ * Author: Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
+ *
+ * Copyright (C) 2012 Linaro.
+ * Viresh Kumar <viresh.kumar@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/clk.h>
+#include <linux/cpufreq.h>
+#include <linux/cpumask.h>
+#include <linux/export.h>
+#include <linux/mutex.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <asm/bL_switcher.h>
+#include <asm/topology.h>
+#include "arm_big_little.h"
+
+#ifdef CONFIG_BL_SWITCHER
+static bool bL_switching_enabled;
+#define is_bL_switching_enabled() bL_switching_enabled
+#define set_switching_enabled(x) (bL_switching_enabled = (x))
+#else
+#define is_bL_switching_enabled() false
+#define set_switching_enabled(x) do { } while (0)
+#endif
+
+#define A15_CLUSTER 0
+#define A7_CLUSTER 1
+#define MAX_CLUSTERS 2
+
+#define ACTUAL_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq << 1 : freq)
+#define VIRT_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq >> 1 : freq)
+
+static struct cpufreq_arm_bL_ops *arm_bL_ops;
+static struct clk *clk[MAX_CLUSTERS];
+static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS + 1];
+static int freq_table_cnt[MAX_CLUSTERS];
+static atomic_t cluster_usage[MAX_CLUSTERS + 1] = {ATOMIC_INIT(0), ATOMIC_INIT(0)};
+
+static unsigned int clk_big_min; /* (Big) clock frequencies */
+static unsigned int clk_little_max; /* Maximum clock frequency (Little) */
+
+static DEFINE_PER_CPU(unsigned int, physical_cluster);
+static DEFINE_PER_CPU(unsigned int, cpu_last_req_freq);
+
+static struct mutex cluster_lock[MAX_CLUSTERS];
+
+/*
+ * Functions to get the current status.
+ *
+ * Beware that the cluster for another CPU may change unexpectedly.
+ */
+static int cpu_to_cluster(int cpu)
+{
+ return is_bL_switching_enabled() ? MAX_CLUSTERS:
+ topology_physical_package_id(cpu);
+}
+
+static unsigned int find_cluster_maxfreq(int cluster)
+{
+ int j;
+ u32 max_freq = 0, cpu_freq;
+
+ for_each_online_cpu(j) {
+ cpu_freq = per_cpu(cpu_last_req_freq, j);
+
+ if ((cluster == per_cpu(physical_cluster, j)) &&
+ (max_freq < cpu_freq))
+ max_freq = cpu_freq;
+ }
+
+ pr_debug("%s: cluster: %d, max freq: %d\n", __func__, cluster,
+ max_freq);
+
+ return max_freq;
+}
+
+static unsigned int clk_get_cpu_rate(unsigned int cpu)
+{
+ u32 cur_cluster = per_cpu(physical_cluster, cpu);
+ u32 rate = clk_get_rate(clk[cur_cluster]) / 1000;
+
+ /* For switcher we use virtual A15 clock rates */
+ if (is_bL_switching_enabled())
+ rate = VIRT_FREQ(cur_cluster, rate);
+
+ pr_debug("%s: cpu: %d, cluster: %d, freq: %u\n", __func__, cpu,
+ cur_cluster, rate);
+
+ return rate;
+}
+
+static unsigned int bL_cpufreq_get_rate(unsigned int cpu)
+{
+ pr_debug("%s: freq: %d\n", __func__, per_cpu(cpu_last_req_freq, cpu));
+
+ return per_cpu(cpu_last_req_freq, cpu);
+}
+
+static unsigned int
+bL_cpufreq_set_rate(u32 cpu, u32 old_cluster, u32 new_cluster, u32 rate)
+{
+ u32 new_rate, prev_rate;
+ int ret;
+
+ mutex_lock(&cluster_lock[new_cluster]);
+
+ prev_rate = per_cpu(cpu_last_req_freq, cpu);
+ per_cpu(cpu_last_req_freq, cpu) = rate;
+ per_cpu(physical_cluster, cpu) = new_cluster;
+
+ if (is_bL_switching_enabled()) {
+ new_rate = find_cluster_maxfreq(new_cluster);
+ new_rate = ACTUAL_FREQ(new_cluster, new_rate);
+ } else {
+ new_rate = rate;
+ }
+
+ pr_debug("%s: cpu: %d, old cluster: %d, new cluster: %d, freq: %d\n",
+ __func__, cpu, old_cluster, new_cluster, new_rate);
+
+ ret = clk_set_rate(clk[new_cluster], new_rate * 1000);
+ if (WARN_ON(ret)) {
+ pr_err("clk_set_rate failed: %d, new cluster: %d\n", ret,
+ new_cluster);
+ per_cpu(cpu_last_req_freq, cpu) = prev_rate;
+ per_cpu(physical_cluster, cpu) = old_cluster;
+
+ mutex_unlock(&cluster_lock[new_cluster]);
+
+ return ret;
+ }
+
+ mutex_unlock(&cluster_lock[new_cluster]);
+
+ /* Recalc freq for old cluster when switching clusters */
+ if (old_cluster != new_cluster) {
+ pr_debug("%s: cpu: %d, old cluster: %d, new cluster: %d\n",
+ __func__, cpu, old_cluster, new_cluster);
+
+ /* Switch cluster */
+ bL_switch_request(cpu, new_cluster);
+
+ mutex_lock(&cluster_lock[old_cluster]);
+
+ /* Set freq of old cluster if there are cpus left on it */
+ new_rate = find_cluster_maxfreq(old_cluster);
+ new_rate = ACTUAL_FREQ(old_cluster, new_rate);
+
+ if (new_rate) {
+ pr_debug("%s: Updating rate of old cluster: %d, to freq: %d\n",
+ __func__, old_cluster, new_rate);
+
+ if (clk_set_rate(clk[old_cluster], new_rate * 1000))
+ pr_err("%s: clk_set_rate failed: %d, old cluster: %d\n",
+ __func__, ret, old_cluster);
+ }
+ mutex_unlock(&cluster_lock[old_cluster]);
+ }
+
+ return 0;
+}
+
+/* Validate policy frequency range */
+static int bL_cpufreq_verify_policy(struct cpufreq_policy *policy)
+{
+ u32 cur_cluster = cpu_to_cluster(policy->cpu);
+
+ /* This call takes care of it all using freq_table */
+ return cpufreq_frequency_table_verify(policy, freq_table[cur_cluster]);
+}
+
+/* Set clock frequency */
+static int bL_cpufreq_set_target(struct cpufreq_policy *policy,
+ unsigned int target_freq, unsigned int relation)
+{
+ struct cpufreq_freqs freqs;
+ u32 cpu = policy->cpu, freq_tab_idx, cur_cluster, new_cluster,
+ actual_cluster;
+ int ret = 0;
+
+ /* ASSUMPTION: The cpu can't be hotplugged in this function */
+ cur_cluster = cpu_to_cluster(cpu);
+ new_cluster = actual_cluster = per_cpu(physical_cluster, cpu);
+
+ freqs.cpu = cpu;
+ freqs.old = bL_cpufreq_get_rate(cpu);
+
+ /* Determine valid target frequency using freq_table */
+ cpufreq_frequency_table_target(policy, freq_table[cur_cluster],
+ target_freq, relation, &freq_tab_idx);
+ freqs.new = freq_table[cur_cluster][freq_tab_idx].frequency;
+
+ pr_debug("%s: cpu: %d, cluster: %d, oldfreq: %d, target freq: %d, new freq: %d\n",
+ __func__, cpu, cur_cluster, freqs.old, target_freq,
+ freqs.new);
+
+ if (freqs.old == freqs.new)
+ return 0;
+
+ if (is_bL_switching_enabled()) {
+ if ((actual_cluster == A15_CLUSTER) &&
+ (freqs.new < clk_big_min)) {
+ new_cluster = A7_CLUSTER;
+ } else if ((actual_cluster == A7_CLUSTER) &&
+ (freqs.new > clk_little_max)) {
+ new_cluster = A15_CLUSTER;
+ }
+ }
+
+ for_each_cpu(freqs.cpu, policy->cpus)
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+
+ ret = bL_cpufreq_set_rate(cpu, actual_cluster, new_cluster, freqs.new);
+ if (ret)
+ return ret;
+
+ policy->cur = freqs.new;
+
+ for_each_cpu(freqs.cpu, policy->cpus)
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+
+ return ret;
+}
+
+/* get the minimum frequency in the cpufreq_frequency_table */
+static inline u32 get_table_min(struct cpufreq_frequency_table *table)
+{
+ int i;
+ uint32_t min_freq = ~0;
+ for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++)
+ if (table[i].frequency < min_freq)
+ min_freq = table[i].frequency;
+ return min_freq;
+}
+
+/* get the maximum frequency in the cpufreq_frequency_table */
+static inline u32 get_table_max(struct cpufreq_frequency_table *table)
+{
+ int i;
+ uint32_t max_freq = 0;
+ for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++)
+ if (table[i].frequency > max_freq)
+ max_freq = table[i].frequency;
+ return max_freq;
+}
+
+/* translate the integer array into cpufreq_frequency_table entries */
+struct cpufreq_frequency_table *
+arm_bL_copy_table_from_array(unsigned int *table, int count)
+{
+ int i;
+
+ struct cpufreq_frequency_table *freq_table;
+
+ pr_debug("%s: table: %p, count: %d\n", __func__, table, count);
+
+ freq_table = kmalloc(sizeof(*freq_table) * (count + 1), GFP_KERNEL);
+ if (!freq_table)
+ return NULL;
+
+ for (i = 0; i < count; i++) {
+ pr_debug("%s: index: %d, freq: %d\n", __func__, i, table[i]);
+ freq_table[i].index = i;
+ freq_table[i].frequency = table[i]; /* in kHZ */
+ }
+
+ freq_table[i].index = count;
+ freq_table[i].frequency = CPUFREQ_TABLE_END;
+
+ return freq_table;
+}
+EXPORT_SYMBOL_GPL(arm_bL_copy_table_from_array);
+
+void arm_bL_free_freq_table(u32 cluster)
+{
+ pr_debug("%s: free freq table\n", __func__);
+
+ kfree(freq_table[cluster]);
+}
+EXPORT_SYMBOL_GPL(arm_bL_free_freq_table);
+
+static int merge_cluster_tables(void)
+{
+ int i, j, k = 0, count = 1;
+ struct cpufreq_frequency_table *table;
+
+ for (i = 0; i < MAX_CLUSTERS; i++)
+ count += freq_table_cnt[i];
+
+ table = kzalloc(sizeof(*table) * count, GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ freq_table[MAX_CLUSTERS] = table;
+
+ /* Add in reverse order to get freqs in increasing order */
+ for (i = MAX_CLUSTERS - 1; i >= 0; i--) {
+ for (j = 0; j < freq_table_cnt[i]; j++) {
+ table[k].index = k;
+ table[k].frequency = VIRT_FREQ(i,
+ freq_table[i][j].frequency);
+ pr_debug("%s: index: %d, freq: %d\n", __func__, k,
+ table[k].frequency);
+ k++;
+ }
+ }
+
+ table[k].index = k;
+ table[k].frequency = CPUFREQ_TABLE_END;
+
+ pr_debug("%s: End, table: %p, count: %d\n", __func__, table, k);
+
+ return 0;
+}
+
+static void _put_cluster_clk_and_freq_table(u32 cluster)
+{
+ if (!atomic_dec_return(&cluster_usage[cluster])) {
+ clk_put(clk[cluster]);
+ clk[cluster] = NULL;
+ arm_bL_ops->put_freq_tbl(cluster);
+ freq_table[cluster] = NULL;
+ pr_debug("%s: cluster: %d\n", __func__, cluster);
+ }
+}
+
+static void put_cluster_clk_and_freq_table(u32 cluster)
+{
+ int i;
+
+ if (cluster < MAX_CLUSTERS)
+ return _put_cluster_clk_and_freq_table(cluster);
+
+ if (atomic_dec_return(&cluster_usage[MAX_CLUSTERS]))
+ return;
+
+ for (i = 0; i < MAX_CLUSTERS; i++)
+ _put_cluster_clk_and_freq_table(i);
+
+ /* free virtual table */
+ arm_bL_free_freq_table(MAX_CLUSTERS);
+}
+
+static int _get_cluster_clk_and_freq_table(u32 cluster)
+{
+ char name[9] = "clusterX";
+ int count;
+
+ if (atomic_inc_return(&cluster_usage[cluster]) != 1)
+ return 0;
+
+ freq_table[cluster] = arm_bL_ops->get_freq_tbl(cluster, &count);
+ if (!freq_table[cluster])
+ goto atomic_dec;
+
+ freq_table_cnt[cluster] = count;
+
+ name[7] = cluster + '0';
+ clk[cluster] = clk_get(NULL, name);
+ if (!IS_ERR_OR_NULL(clk[cluster])) {
+ pr_debug("%s: clk: %p & freq table: %p, cluster: %d\n",
+ __func__, clk[cluster], freq_table[cluster],
+ cluster);
+ return 0;
+ }
+
+ arm_bL_ops->put_freq_tbl(cluster);
+
+atomic_dec:
+ atomic_dec(&cluster_usage[cluster]);
+ pr_err("%s: Failed to get data for cluster: %d\n", __func__, cluster);
+ return -ENODATA;
+}
+
+static int get_cluster_clk_and_freq_table(u32 cluster)
+{
+ int i, ret;
+
+ if (cluster < MAX_CLUSTERS)
+ return _get_cluster_clk_and_freq_table(cluster);
+
+ if (atomic_inc_return(&cluster_usage[MAX_CLUSTERS]) != 1)
+ return 0;
+
+ /*
+ * Get data for all clusters and fill virtual cluster with a merge of
+ * both
+ */
+ for (i = 0; i < MAX_CLUSTERS; i++) {
+ ret = _get_cluster_clk_and_freq_table(i);
+ if (ret)
+ goto put_clusters;
+ }
+
+ ret = merge_cluster_tables();
+ if (ret)
+ goto put_clusters;
+
+ /* Assuming 2 cluster, set clk_big_min and clk_little_max */
+ clk_big_min = get_table_min(freq_table[0]);
+ clk_little_max = VIRT_FREQ(1, get_table_max(freq_table[1]));
+
+ pr_debug("%s: cluster: %d, clk_big_min: %d, clk_little_max: %d\n",
+ __func__, cluster, clk_big_min, clk_little_max);
+
+ return 0;
+
+put_clusters:
+ while (i)
+ _put_cluster_clk_and_freq_table(--i);
+
+ atomic_dec(&cluster_usage[MAX_CLUSTERS]);
+
+ return ret;
+}
+
+/* Per-CPU initialization */
+static int bL_cpufreq_init(struct cpufreq_policy *policy)
+{
+ u32 cur_cluster = cpu_to_cluster(policy->cpu);
+ int result;
+
+ result = get_cluster_clk_and_freq_table(cur_cluster);
+ if (result)
+ return result;
+
+ result = cpufreq_frequency_table_cpuinfo(policy,
+ freq_table[cur_cluster]);
+ if (result) {
+ pr_err("CPU %d, cluster: %d invalid freq table\n", policy->cpu,
+ cur_cluster);
+ put_cluster_clk_and_freq_table(cur_cluster);
+ return result;
+ }
+
+ cpufreq_frequency_table_get_attr(freq_table[cur_cluster], policy->cpu);
+
+ if (cur_cluster < MAX_CLUSTERS) {
+ cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
+ cpumask_copy(policy->related_cpus, policy->cpus);
+
+ per_cpu(physical_cluster, policy->cpu) = cur_cluster;
+ } else {
+ /* Assumption: during init, we are always running on A15 */
+ per_cpu(physical_cluster, policy->cpu) = A15_CLUSTER;
+ }
+
+ policy->cpuinfo.transition_latency = 1000000; /* 1 ms assumed */
+ policy->cur = clk_get_cpu_rate(policy->cpu);
+ per_cpu(cpu_last_req_freq, policy->cpu) = policy->cur;
+
+ pr_info("%s: Initialized, cpu: %d, cluster %d\n", __func__,
+ policy->cpu, cur_cluster);
+
+ return 0;
+}
+
+/* Export freq_table to sysfs */
+static struct freq_attr *bL_cpufreq_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ NULL,
+};
+
+static struct cpufreq_driver bL_cpufreq_driver = {
+ .name = "arm-big-little",
+ .flags = CPUFREQ_STICKY,
+ .verify = bL_cpufreq_verify_policy,
+ .target = bL_cpufreq_set_target,
+ .get = bL_cpufreq_get_rate,
+ .init = bL_cpufreq_init,
+ .attr = bL_cpufreq_attr,
+};
+
+static int bL_cpufreq_switcher_notifier(struct notifier_block *nfb,
+ unsigned long action, void *_arg)
+{
+ pr_debug("%s: action: %ld\n", __func__, action);
+
+ switch (action) {
+ case BL_NOTIFY_PRE_ENABLE:
+ case BL_NOTIFY_PRE_DISABLE:
+ cpufreq_unregister_driver(&bL_cpufreq_driver);
+ break;
+
+ case BL_NOTIFY_POST_ENABLE:
+ set_switching_enabled(true);
+ cpufreq_register_driver(&bL_cpufreq_driver);
+ break;
+
+ case BL_NOTIFY_POST_DISABLE:
+ set_switching_enabled(false);
+ cpufreq_register_driver(&bL_cpufreq_driver);
+ break;
+
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block bL_switcher_notifier = {
+ .notifier_call = bL_cpufreq_switcher_notifier,
+};
+
+int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops)
+{
+ int ret, i;
+
+ if (arm_bL_ops) {
+ pr_debug("%s: Already registered: %s, exiting\n", __func__,
+ arm_bL_ops->name);
+ return -EBUSY;
+ }
+
+ if (!ops || !strlen(ops->name) || !ops->get_freq_tbl) {
+ pr_err("%s: Invalid arm_bL_ops, exiting\n", __func__);
+ return -ENODEV;
+ }
+
+ arm_bL_ops = ops;
+
+ ret = bL_switcher_get_enabled();
+ set_switching_enabled(ret);
+
+ for (i = 0; i < MAX_CLUSTERS; i++)
+ mutex_init(&cluster_lock[i]);
+
+ ret = cpufreq_register_driver(&bL_cpufreq_driver);
+ if (ret) {
+ pr_info("%s: Failed registering platform driver: %s, err: %d\n",
+ __func__, ops->name, ret);
+ arm_bL_ops = NULL;
+ } else {
+ ret = bL_switcher_register_notifier(&bL_switcher_notifier);
+ if (ret) {
+ cpufreq_unregister_driver(&bL_cpufreq_driver);
+ arm_bL_ops = NULL;
+ } else {
+ pr_info("%s: Registered platform driver: %s\n",
+ __func__, ops->name);
+ }
+ }
+
+ bL_switcher_put_enabled();
+ return ret;
+}
+EXPORT_SYMBOL_GPL(bL_cpufreq_register);
+
+void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops)
+{
+ if (arm_bL_ops != ops) {
+ pr_info("%s: Registered with: %s, can't unregister, exiting\n",
+ __func__, arm_bL_ops->name);
+ }
+
+ bL_switcher_get_enabled();
+ bL_switcher_unregister_notifier(&bL_switcher_notifier);
+ cpufreq_unregister_driver(&bL_cpufreq_driver);
+ bL_switcher_put_enabled();
+ pr_info("%s: Un-registered platform driver: %s\n", __func__,
+ arm_bL_ops->name);
+
+ /* For saving table get/put on every cpu in/out */
+ if (is_bL_switching_enabled()) {
+ put_cluster_clk_and_freq_table(MAX_CLUSTERS);
+ } else {
+ int i;
+
+ for (i = 0; i < MAX_CLUSTERS; i++)
+ put_cluster_clk_and_freq_table(i);
+ }
+
+ arm_bL_ops = NULL;
+}
+EXPORT_SYMBOL_GPL(bL_cpufreq_unregister);
diff --git a/drivers/cpufreq/arm_big_little.h b/drivers/cpufreq/arm_big_little.h
new file mode 100644
index 00000000000..6712a501198
--- /dev/null
+++ b/drivers/cpufreq/arm_big_little.h
@@ -0,0 +1,38 @@
+/*
+ * ARM big.LITTLE platform's CPUFreq header file
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ * Author: Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ * Viresh Kumar <viresh.kumar@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef CPUFREQ_ARM_BIG_LITTLE_H
+#define CPUFREQ_ARM_BIG_LITTLE_H
+
+#include <linux/cpufreq.h>
+#include <linux/types.h>
+
+struct cpufreq_arm_bL_ops {
+ char name[CPUFREQ_NAME_LEN];
+ struct cpufreq_frequency_table *(*get_freq_tbl)(u32 cluster, int *count);
+ void (*put_freq_tbl)(u32 cluster);
+};
+
+struct cpufreq_frequency_table *
+arm_bL_copy_table_from_array(unsigned int *table, int count);
+void arm_bL_free_freq_table(u32 cluster);
+
+int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops);
+void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops);
+
+#endif /* CPUFREQ_ARM_BIG_LITTLE_H */
diff --git a/drivers/cpufreq/arm_dt_big_little.c b/drivers/cpufreq/arm_dt_big_little.c
new file mode 100644
index 00000000000..fabfb9c5c37
--- /dev/null
+++ b/drivers/cpufreq/arm_dt_big_little.c
@@ -0,0 +1,101 @@
+/*
+ * Generic big.LITTLE CPUFreq Interface driver
+ *
+ * It provides necessary ops to arm_big_little cpufreq driver and gets
+ * Frequency information from Device Tree. Freq table in DT must be in KHz.
+ *
+ * Copyright (C) 2012 Linaro.
+ * Viresh Kumar <viresh.kumar@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/cpufreq.h>
+#include <linux/export.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include "arm_big_little.h"
+
+static struct cpufreq_frequency_table *generic_get_freq_tbl(u32 cluster,
+ int *count)
+{
+ struct device_node *np = NULL;
+ const struct property *pp;
+ unsigned int *table = NULL;
+ int cluster_id;
+ struct cpufreq_frequency_table *cpufreq_table;
+
+ while ((np = of_find_node_by_name(np, "cluster"))) {
+ if (of_property_read_u32(np, "reg", &cluster_id))
+ continue;
+
+ if (cluster_id != cluster)
+ continue;
+
+ pp = of_find_property(np, "freqs", NULL);
+ if (!pp)
+ continue;
+
+ *count = pp->length / sizeof(u32);
+ if (!*count)
+ continue;
+
+ table = kmalloc(sizeof(*table) * (*count), GFP_KERNEL);
+ if (!table) {
+ pr_err("%s: Failed to allocate memory for table\n",
+ __func__);
+ return NULL;
+ }
+
+ of_property_read_u32_array(np, "freqs", table, *count);
+ break;
+ }
+
+ if (!table) {
+ pr_err("%s: Unable to retrieve Freq table from Device Tree",
+ __func__);
+ return NULL;
+ }
+
+ cpufreq_table = arm_bL_copy_table_from_array(table, *count);
+ kfree(table);
+
+ return cpufreq_table;
+}
+
+static void generic_put_freq_tbl(u32 cluster)
+{
+ arm_bL_free_freq_table(cluster);
+}
+
+static struct cpufreq_arm_bL_ops generic_bL_ops = {
+ .name = "generic-bl",
+ .get_freq_tbl = generic_get_freq_tbl,
+ .put_freq_tbl = generic_put_freq_tbl,
+};
+
+static int generic_bL_init(void)
+{
+ return bL_cpufreq_register(&generic_bL_ops);
+}
+module_init(generic_bL_init);
+
+static void generic_bL_exit(void)
+{
+ return bL_cpufreq_unregister(&generic_bL_ops);
+}
+module_exit(generic_bL_exit);
+
+MODULE_DESCRIPTION("Generic ARM big LITTLE cpufreq driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 6c287ae7bbb..7dc9c4efbcf 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -21,6 +21,7 @@
#include <linux/spinlock.h>
#include <linux/notifier.h>
#include <asm/cputime.h>
+#include <asm/bL_switcher.h>
static spinlock_t cpufreq_stats_lock;
@@ -403,7 +404,7 @@ static struct notifier_block notifier_trans_block = {
.notifier_call = cpufreq_stat_notifier_trans
};
-static int __init cpufreq_stats_init(void)
+static int cpufreq_stats_setup(void)
{
int ret;
unsigned int cpu;
@@ -431,7 +432,8 @@ static int __init cpufreq_stats_init(void)
return 0;
}
-static void __exit cpufreq_stats_exit(void)
+
+static void cpufreq_stats_cleanup(void)
{
unsigned int cpu;
@@ -446,6 +448,49 @@ static void __exit cpufreq_stats_exit(void)
}
}
+static int cpufreq_stats_switcher_notifier(struct notifier_block *nfb,
+ unsigned long action, void *_arg)
+{
+ switch (action) {
+ case BL_NOTIFY_PRE_ENABLE:
+ case BL_NOTIFY_PRE_DISABLE:
+ cpufreq_stats_cleanup();
+ break;
+
+ case BL_NOTIFY_POST_ENABLE:
+ case BL_NOTIFY_POST_DISABLE:
+ cpufreq_stats_setup();
+ break;
+
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block switcher_notifier = {
+ .notifier_call = cpufreq_stats_switcher_notifier,
+};
+
+static int __init cpufreq_stats_init(void)
+{
+ int ret;
+ spin_lock_init(&cpufreq_stats_lock);
+
+ ret = cpufreq_stats_setup();
+ if (!ret)
+ bL_switcher_register_notifier(&switcher_notifier);
+
+ return ret;
+}
+
+static void __exit cpufreq_stats_exit(void)
+{
+ bL_switcher_unregister_notifier(&switcher_notifier);
+ cpufreq_stats_cleanup();
+}
+
MODULE_AUTHOR("Zou Nan hai <nanhai.zou@intel.com>");
MODULE_DESCRIPTION("'cpufreq_stats' - A driver to export cpufreq stats "
"through sysfs filesystem");
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 6133ef5cf67..d8a8c9bfd4f 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -48,12 +48,7 @@ static inline int32_t div_fp(int32_t x, int32_t y)
}
struct sample {
- ktime_t start_time;
- ktime_t end_time;
int core_pct_busy;
- int pstate_pct_busy;
- u64 duration_us;
- u64 idletime_us;
u64 aperf;
u64 mperf;
int freq;
@@ -91,8 +86,6 @@ struct cpudata {
int min_pstate_count;
int idle_mode;
- ktime_t prev_sample;
- u64 prev_idle_time_us;
u64 prev_aperf;
u64 prev_mperf;
int sample_ptr;
@@ -124,6 +117,8 @@ struct perf_limits {
int min_perf_pct;
int32_t max_perf;
int32_t min_perf;
+ int max_policy_pct;
+ int max_sysfs_pct;
};
static struct perf_limits limits = {
@@ -132,6 +127,8 @@ static struct perf_limits limits = {
.max_perf = int_tofp(1),
.min_perf_pct = 0,
.min_perf = 0,
+ .max_policy_pct = 100,
+ .max_sysfs_pct = 100,
};
static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
@@ -302,7 +299,8 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
if (ret != 1)
return -EINVAL;
- limits.max_perf_pct = clamp_t(int, input, 0 , 100);
+ limits.max_sysfs_pct = clamp_t(int, input, 0 , 100);
+ limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
return count;
}
@@ -450,48 +448,26 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu,
struct sample *sample)
{
u64 core_pct;
- sample->pstate_pct_busy = 100 - div64_u64(
- sample->idletime_us * 100,
- sample->duration_us);
core_pct = div64_u64(sample->aperf * 100, sample->mperf);
sample->freq = cpu->pstate.max_pstate * core_pct * 1000;
- sample->core_pct_busy = div_s64((sample->pstate_pct_busy * core_pct),
- 100);
+ sample->core_pct_busy = core_pct;
}
static inline void intel_pstate_sample(struct cpudata *cpu)
{
- ktime_t now;
- u64 idle_time_us;
u64 aperf, mperf;
- now = ktime_get();
- idle_time_us = get_cpu_idle_time_us(cpu->cpu, NULL);
-
rdmsrl(MSR_IA32_APERF, aperf);
rdmsrl(MSR_IA32_MPERF, mperf);
- /* for the first sample, don't actually record a sample, just
- * set the baseline */
- if (cpu->prev_idle_time_us > 0) {
- cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT;
- cpu->samples[cpu->sample_ptr].start_time = cpu->prev_sample;
- cpu->samples[cpu->sample_ptr].end_time = now;
- cpu->samples[cpu->sample_ptr].duration_us =
- ktime_us_delta(now, cpu->prev_sample);
- cpu->samples[cpu->sample_ptr].idletime_us =
- idle_time_us - cpu->prev_idle_time_us;
-
- cpu->samples[cpu->sample_ptr].aperf = aperf;
- cpu->samples[cpu->sample_ptr].mperf = mperf;
- cpu->samples[cpu->sample_ptr].aperf -= cpu->prev_aperf;
- cpu->samples[cpu->sample_ptr].mperf -= cpu->prev_mperf;
-
- intel_pstate_calc_busy(cpu, &cpu->samples[cpu->sample_ptr]);
- }
+ cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT;
+ cpu->samples[cpu->sample_ptr].aperf = aperf;
+ cpu->samples[cpu->sample_ptr].mperf = mperf;
+ cpu->samples[cpu->sample_ptr].aperf -= cpu->prev_aperf;
+ cpu->samples[cpu->sample_ptr].mperf -= cpu->prev_mperf;
+
+ intel_pstate_calc_busy(cpu, &cpu->samples[cpu->sample_ptr]);
- cpu->prev_sample = now;
- cpu->prev_idle_time_us = idle_time_us;
cpu->prev_aperf = aperf;
cpu->prev_mperf = mperf;
}
@@ -575,22 +551,16 @@ static void intel_pstate_timer_func(unsigned long __data)
struct cpudata *cpu = (struct cpudata *) __data;
intel_pstate_sample(cpu);
+ intel_pstate_adjust_busy_pstate(cpu);
- if (!cpu->idle_mode)
- intel_pstate_adjust_busy_pstate(cpu);
- else
- intel_pstate_adjust_idle_pstate(cpu);
-
-#if defined(XPERF_FIX)
if (cpu->pstate.current_pstate == cpu->pstate.min_pstate) {
cpu->min_pstate_count++;
if (!(cpu->min_pstate_count % 5)) {
intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate);
- intel_pstate_idle_mode(cpu);
}
} else
cpu->min_pstate_count = 0;
-#endif
+
intel_pstate_set_sample_time(cpu);
}
@@ -670,8 +640,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100);
limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
- limits.max_perf_pct = policy->max * 100 / policy->cpuinfo.max_freq;
- limits.max_perf_pct = clamp_t(int, limits.max_perf_pct, 0 , 100);
+ limits.max_policy_pct = policy->max * 100 / policy->cpuinfo.max_freq;
+ limits.max_policy_pct = clamp_t(int, limits.max_policy_pct, 0 , 100);
+ limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
diff --git a/drivers/cpufreq/vexpress_big_little.c b/drivers/cpufreq/vexpress_big_little.c
new file mode 100644
index 00000000000..66648c3fc94
--- /dev/null
+++ b/drivers/cpufreq/vexpress_big_little.c
@@ -0,0 +1,74 @@
+/*
+ * Vexpress big.LITTLE CPUFreq Interface driver
+ *
+ * It provides necessary ops to arm_big_little cpufreq driver and gets
+ * information from spc controller.
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ * Author: Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
+ *
+ * Copyright (C) 2012 Linaro.
+ * Viresh Kumar <viresh.kumar@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/cpufreq.h>
+#include <linux/export.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/vexpress.h>
+#include "arm_big_little.h"
+
+static struct cpufreq_frequency_table *vexpress_get_freq_tbl(u32 cluster,
+ int *count)
+{
+ unsigned int *table = vexpress_spc_get_freq_table(cluster, count);
+
+ if (!table || !*count) {
+ pr_err("SPC controller returned invalid freq table");
+ return NULL;
+ }
+
+ return arm_bL_copy_table_from_array(table, *count);
+}
+
+static void vexpress_put_freq_tbl(u32 cluster)
+{
+ arm_bL_free_freq_table(cluster);
+}
+
+static struct cpufreq_arm_bL_ops vexpress_bL_ops = {
+ .name = "vexpress-bL",
+ .get_freq_tbl = vexpress_get_freq_tbl,
+ .put_freq_tbl = vexpress_put_freq_tbl,
+};
+
+static int vexpress_bL_init(void)
+{
+ if (!vexpress_spc_check_loaded()) {
+ pr_info("%s: No SPC found\n", __func__);
+ return -ENOENT;
+ }
+
+ return bL_cpufreq_register(&vexpress_bL_ops);
+}
+module_init(vexpress_bL_init);
+
+static void vexpress_bL_exit(void)
+{
+ return bL_cpufreq_unregister(&vexpress_bL_ops);
+}
+module_exit(vexpress_bL_exit);
+
+MODULE_DESCRIPTION("ARM Vexpress big LITTLE cpufreq driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile
index 24c6e7d945e..6a7e6a9beff 100644
--- a/drivers/cpuidle/Makefile
+++ b/drivers/cpuidle/Makefile
@@ -4,6 +4,6 @@
obj-y += cpuidle.o driver.o governor.o sysfs.o governors/
obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED) += coupled.o
-
+obj-$(CONFIG_BIG_LITTLE) += arm_big_little.o
obj-$(CONFIG_CPU_IDLE_CALXEDA) += cpuidle-calxeda.o
obj-$(CONFIG_CPU_IDLE_KIRKWOOD) += cpuidle-kirkwood.o
diff --git a/drivers/cpuidle/arm_big_little.c b/drivers/cpuidle/arm_big_little.c
new file mode 100644
index 00000000000..e5378896a8c
--- /dev/null
+++ b/drivers/cpuidle/arm_big_little.c
@@ -0,0 +1,183 @@
+/*
+ * big.LITTLE CPU idle driver.
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/arm-cci.h>
+#include <linux/bitmap.h>
+#include <linux/cpuidle.h>
+#include <linux/cpu_pm.h>
+#include <linux/clockchips.h>
+#include <linux/debugfs.h>
+#include <linux/hrtimer.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/tick.h>
+#include <linux/vexpress.h>
+#include <asm/mcpm.h>
+#include <asm/cpuidle.h>
+#include <asm/cputype.h>
+#include <asm/idmap.h>
+#include <asm/proc-fns.h>
+#include <asm/suspend.h>
+#include <linux/of.h>
+
+static int bl_cpuidle_simple_enter(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
+{
+ ktime_t time_start, time_end;
+ s64 diff;
+
+ time_start = ktime_get();
+
+ cpu_do_idle();
+
+ time_end = ktime_get();
+
+ local_irq_enable();
+
+ diff = ktime_to_us(ktime_sub(time_end, time_start));
+ if (diff > INT_MAX)
+ diff = INT_MAX;
+
+ dev->last_residency = (int) diff;
+
+ return index;
+}
+
+static int bl_enter_powerdown(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int idx);
+
+static struct cpuidle_state bl_cpuidle_set[] __initdata = {
+ [0] = {
+ .enter = bl_cpuidle_simple_enter,
+ .exit_latency = 1,
+ .target_residency = 1,
+ .power_usage = UINT_MAX,
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .name = "WFI",
+ .desc = "ARM WFI",
+ },
+ [1] = {
+ .enter = bl_enter_powerdown,
+ .exit_latency = 300,
+ .target_residency = 1000,
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .name = "C1",
+ .desc = "ARM power down",
+ },
+};
+
+struct cpuidle_driver bl_idle_driver = {
+ .name = "bl_idle",
+ .owner = THIS_MODULE,
+ .safe_state_index = 0
+};
+
+static DEFINE_PER_CPU(struct cpuidle_device, bl_idle_dev);
+
+static int notrace bl_powerdown_finisher(unsigned long arg)
+{
+ unsigned int mpidr = read_cpuid_mpidr();
+ unsigned int cluster = (mpidr >> 8) & 0xf;
+ unsigned int cpu = mpidr & 0xf;
+
+ mcpm_set_entry_vector(cpu, cluster, cpu_resume);
+ mcpm_cpu_suspend(0); /* 0 should be replaced with better value here */
+ return 1;
+}
+
+/*
+ * bl_enter_powerdown - Programs CPU to enter the specified state
+ * @dev: cpuidle device
+ * @drv: The target state to be programmed
+ * @idx: state index
+ *
+ * Called from the CPUidle framework to program the device to the
+ * specified target state selected by the governor.
+ */
+static int bl_enter_powerdown(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int idx)
+{
+ struct timespec ts_preidle, ts_postidle, ts_idle;
+ int ret;
+
+ /* Used to keep track of the total time in idle */
+ getnstimeofday(&ts_preidle);
+
+ BUG_ON(!irqs_disabled());
+
+ cpu_pm_enter();
+
+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu);
+
+ ret = cpu_suspend((unsigned long) dev, bl_powerdown_finisher);
+ if (ret)
+ BUG();
+
+ mcpm_cpu_powered_up();
+
+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu);
+
+ cpu_pm_exit();
+
+ getnstimeofday(&ts_postidle);
+ local_irq_enable();
+ ts_idle = timespec_sub(ts_postidle, ts_preidle);
+
+ dev->last_residency = ts_idle.tv_nsec / NSEC_PER_USEC +
+ ts_idle.tv_sec * USEC_PER_SEC;
+ return idx;
+}
+
+/*
+ * bl_idle_init
+ *
+ * Registers the bl specific cpuidle driver with the cpuidle
+ * framework with the valid set of states.
+ */
+int __init bl_idle_init(void)
+{
+ struct cpuidle_device *dev;
+ int i, cpu_id;
+ struct cpuidle_driver *drv = &bl_idle_driver;
+
+ if (!of_find_compatible_node(NULL, NULL, "arm,generic")) {
+ pr_info("%s: No compatible node found\n", __func__);
+ return -ENODEV;
+ }
+
+ drv->state_count = (sizeof(bl_cpuidle_set) /
+ sizeof(struct cpuidle_state));
+
+ for (i = 0; i < drv->state_count; i++) {
+ memcpy(&drv->states[i], &bl_cpuidle_set[i],
+ sizeof(struct cpuidle_state));
+ }
+
+ cpuidle_register_driver(drv);
+
+ for_each_cpu(cpu_id, cpu_online_mask) {
+ pr_err("CPUidle for CPU%d registered\n", cpu_id);
+ dev = &per_cpu(bl_idle_dev, cpu_id);
+ dev->cpu = cpu_id;
+
+ dev->state_count = drv->state_count;
+
+ if (cpuidle_register_device(dev)) {
+ printk(KERN_ERR "%s: Cpuidle register device failed\n",
+ __func__);
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+device_initcall(bl_idle_init);
diff --git a/drivers/cpuidle/cpuidle-calxeda.c b/drivers/cpuidle/cpuidle-calxeda.c
index e1aab38c5a8..ece83d6e049 100644
--- a/drivers/cpuidle/cpuidle-calxeda.c
+++ b/drivers/cpuidle/cpuidle-calxeda.c
@@ -37,20 +37,6 @@ extern void *scu_base_addr;
static struct cpuidle_device __percpu *calxeda_idle_cpuidle_devices;
-static inline unsigned int get_auxcr(void)
-{
- unsigned int val;
- asm("mrc p15, 0, %0, c1, c0, 1 @ get AUXCR" : "=r" (val) : : "cc");
- return val;
-}
-
-static inline void set_auxcr(unsigned int val)
-{
- asm volatile("mcr p15, 0, %0, c1, c0, 1 @ set AUXCR"
- : : "r" (val) : "cc");
- isb();
-}
-
static noinline void calxeda_idle_restore(void)
{
set_cr(get_cr() | CR_C);
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index cf268b14ae9..d482b12f5c8 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -1154,7 +1154,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained);
sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
- DMA_BIDIRECTIONAL, assoc_chained);
+ DMA_TO_DEVICE, assoc_chained);
if (likely(req->src == req->dst)) {
sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
DMA_BIDIRECTIONAL, src_chained);
@@ -1336,7 +1336,7 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained);
sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
- DMA_BIDIRECTIONAL, assoc_chained);
+ DMA_TO_DEVICE, assoc_chained);
if (likely(req->src == req->dst)) {
sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
DMA_BIDIRECTIONAL, src_chained);
diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c
index 69d04d28b1e..09c7ad13031 100644
--- a/drivers/dma/of-dma.c
+++ b/drivers/dma/of-dma.c
@@ -93,6 +93,7 @@ int of_dma_controller_register(struct device_node *np,
{
struct of_dma *ofdma;
int nbcells;
+ const __be32 *prop;
if (!np || !of_dma_xlate) {
pr_err("%s: not enough information provided\n", __func__);
@@ -103,8 +104,11 @@ int of_dma_controller_register(struct device_node *np,
if (!ofdma)
return -ENOMEM;
- nbcells = be32_to_cpup(of_get_property(np, "#dma-cells", NULL));
- if (!nbcells) {
+ prop = of_get_property(np, "#dma-cells", NULL);
+ if (prop)
+ nbcells = be32_to_cpup(prop);
+
+ if (!prop || !nbcells) {
pr_err("%s: #dma-cells property is missing or invalid\n",
__func__);
kfree(ofdma);
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index d01faeb0f27..ce3dc3e9688 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -476,7 +476,7 @@ static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan)
dev_dbg(chan2dev(&pd_chan->chan), "scanned %d descriptors\n", i);
if (!ret) {
- ret = pdc_alloc_desc(&pd_chan->chan, GFP_NOIO);
+ ret = pdc_alloc_desc(&pd_chan->chan, GFP_ATOMIC);
if (ret) {
spin_lock(&pd_chan->lock);
pd_chan->descs_allocated++;
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 1734feec47b..71bf4ec300e 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -1566,10 +1566,12 @@ static void dma_tc_handle(struct d40_chan *d40c)
return;
}
- if (d40_queue_start(d40c) == NULL)
+ if (d40_queue_start(d40c) == NULL) {
d40c->busy = false;
- pm_runtime_mark_last_busy(d40c->base->dev);
- pm_runtime_put_autosuspend(d40c->base->dev);
+
+ pm_runtime_mark_last_busy(d40c->base->dev);
+ pm_runtime_put_autosuspend(d40c->base->dev);
+ }
d40_desc_remove(d40d);
d40_desc_done(d40c, d40d);
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 5899a76eec3..769d92ec573 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -327,17 +327,17 @@ static struct device_attribute *dynamic_csrow_dimm_attr[] = {
};
/* possible dynamic channel ce_count attribute files */
-DEVICE_CHANNEL(ch0_ce_count, S_IRUGO | S_IWUSR,
+DEVICE_CHANNEL(ch0_ce_count, S_IRUGO,
channel_ce_count_show, NULL, 0);
-DEVICE_CHANNEL(ch1_ce_count, S_IRUGO | S_IWUSR,
+DEVICE_CHANNEL(ch1_ce_count, S_IRUGO,
channel_ce_count_show, NULL, 1);
-DEVICE_CHANNEL(ch2_ce_count, S_IRUGO | S_IWUSR,
+DEVICE_CHANNEL(ch2_ce_count, S_IRUGO,
channel_ce_count_show, NULL, 2);
-DEVICE_CHANNEL(ch3_ce_count, S_IRUGO | S_IWUSR,
+DEVICE_CHANNEL(ch3_ce_count, S_IRUGO,
channel_ce_count_show, NULL, 3);
-DEVICE_CHANNEL(ch4_ce_count, S_IRUGO | S_IWUSR,
+DEVICE_CHANNEL(ch4_ce_count, S_IRUGO,
channel_ce_count_show, NULL, 4);
-DEVICE_CHANNEL(ch5_ce_count, S_IRUGO | S_IWUSR,
+DEVICE_CHANNEL(ch5_ce_count, S_IRUGO,
channel_ce_count_show, NULL, 5);
/* Total possible dynamic ce_count attribute file table */
diff --git a/drivers/gpio/gpio-msm-v2.c b/drivers/gpio/gpio-msm-v2.c
index 55a7e7769af..dd2eddeb1e0 100644
--- a/drivers/gpio/gpio-msm-v2.c
+++ b/drivers/gpio/gpio-msm-v2.c
@@ -23,13 +23,12 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
-#include <asm/mach/irq.h>
-
#include <mach/msm_gpiomux.h>
#include <mach/msm_iomap.h>
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
index 7877335c4cc..7176743915d 100644
--- a/drivers/gpio/gpio-mxc.c
+++ b/drivers/gpio/gpio-mxc.c
@@ -24,6 +24,7 @@
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/gpio.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -32,7 +33,6 @@
#include <linux/of_device.h>
#include <linux/module.h>
#include <asm-generic/bug.h>
-#include <asm/mach/irq.h>
enum mxc_gpio_hwtype {
IMX1_GPIO, /* runs on i.mx1 */
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 159f5c57eb4..a612ea1c53c 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -25,11 +25,10 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/irqdomain.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/gpio.h>
#include <linux/platform_data/gpio-omap.h>
-#include <asm/mach/irq.h>
-
#define OFF_MODE 1
static LIST_HEAD(omap_gpio_list);
diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c
index b820869ca93..29763361d13 100644
--- a/drivers/gpio/gpio-pl061.c
+++ b/drivers/gpio/gpio-pl061.c
@@ -15,6 +15,7 @@
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/bitops.h>
#include <linux/workqueue.h>
#include <linux/gpio.h>
@@ -23,7 +24,6 @@
#include <linux/amba/pl061.h>
#include <linux/slab.h>
#include <linux/pm.h>
-#include <asm/mach/irq.h>
#define GPIODIR 0x400
#define GPIOIS 0x404
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index 8325f580c0f..2d3af981641 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -19,6 +19,7 @@
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -26,8 +27,6 @@
#include <linux/syscore_ops.h>
#include <linux/slab.h>
-#include <asm/mach/irq.h>
-
#include <mach/irqs.h>
/*
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index 414ad912232..8e215554888 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -27,11 +27,10 @@
#include <linux/platform_device.h>
#include <linux/module.h>
#include <linux/irqdomain.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pm.h>
-#include <asm/mach/irq.h>
-
#define GPIO_BANK(x) ((x) >> 5)
#define GPIO_PORT(x) (((x) >> 3) & 0x3)
#define GPIO_BIT(x) ((x) & 0x7)
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 528429252f0..02e52d543e4 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -241,6 +241,8 @@ struct ast_fbdev {
void *sysram;
int size;
struct ttm_bo_kmap_obj mapping;
+ int x1, y1, x2, y2; /* dirty rect */
+ spinlock_t dirty_lock;
};
#define to_ast_crtc(x) container_of(x, struct ast_crtc, base)
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
index 34931fe7d2c..fbc0823cfa1 100644
--- a/drivers/gpu/drm/ast/ast_fb.c
+++ b/drivers/gpu/drm/ast/ast_fb.c
@@ -53,16 +53,52 @@ static void ast_dirty_update(struct ast_fbdev *afbdev,
int bpp = (afbdev->afb.base.bits_per_pixel + 7)/8;
int ret;
bool unmap = false;
+ bool store_for_later = false;
+ int x2, y2;
+ unsigned long flags;
obj = afbdev->afb.obj;
bo = gem_to_ast_bo(obj);
+ /*
+ * try and reserve the BO, if we fail with busy
+ * then the BO is being moved and we should
+ * store up the damage until later.
+ */
ret = ast_bo_reserve(bo, true);
if (ret) {
- DRM_ERROR("failed to reserve fb bo\n");
+ if (ret != -EBUSY)
+ return;
+
+ store_for_later = true;
+ }
+
+ x2 = x + width - 1;
+ y2 = y + height - 1;
+ spin_lock_irqsave(&afbdev->dirty_lock, flags);
+
+ if (afbdev->y1 < y)
+ y = afbdev->y1;
+ if (afbdev->y2 > y2)
+ y2 = afbdev->y2;
+ if (afbdev->x1 < x)
+ x = afbdev->x1;
+ if (afbdev->x2 > x2)
+ x2 = afbdev->x2;
+
+ if (store_for_later) {
+ afbdev->x1 = x;
+ afbdev->x2 = x2;
+ afbdev->y1 = y;
+ afbdev->y2 = y2;
+ spin_unlock_irqrestore(&afbdev->dirty_lock, flags);
return;
}
+ afbdev->x1 = afbdev->y1 = INT_MAX;
+ afbdev->x2 = afbdev->y2 = 0;
+ spin_unlock_irqrestore(&afbdev->dirty_lock, flags);
+
if (!bo->kmap.virtual) {
ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
if (ret) {
@@ -72,10 +108,10 @@ static void ast_dirty_update(struct ast_fbdev *afbdev,
}
unmap = true;
}
- for (i = y; i < y + height; i++) {
+ for (i = y; i <= y2; i++) {
/* assume equal stride for now */
src_offset = dst_offset = i * afbdev->afb.base.pitches[0] + (x * bpp);
- memcpy_toio(bo->kmap.virtual + src_offset, afbdev->sysram + src_offset, width * bpp);
+ memcpy_toio(bo->kmap.virtual + src_offset, afbdev->sysram + src_offset, (x2 - x + 1) * bpp);
}
if (unmap)
@@ -292,6 +328,7 @@ int ast_fbdev_init(struct drm_device *dev)
ast->fbdev = afbdev;
afbdev->helper.funcs = &ast_fb_helper_funcs;
+ spin_lock_init(&afbdev->dirty_lock);
ret = drm_fb_helper_init(dev, &afbdev->helper,
1, 1);
if (ret) {
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index 3602731a611..09da3393c52 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -316,7 +316,7 @@ int ast_bo_reserve(struct ast_bo *bo, bool no_wait)
ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
if (ret) {
- if (ret != -ERESTARTSYS)
+ if (ret != -ERESTARTSYS && ret != -EBUSY)
DRM_ERROR("reserve failed %p\n", bo);
return ret;
}
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index 6e0cc724e5a..7ca05959688 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -154,6 +154,8 @@ struct cirrus_fbdev {
struct list_head fbdev_list;
void *sysram;
int size;
+ int x1, y1, x2, y2; /* dirty rect */
+ spinlock_t dirty_lock;
};
struct cirrus_bo {
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
index e25afccaf85..3541b567bbd 100644
--- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
@@ -27,16 +27,51 @@ static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
int bpp = (afbdev->gfb.base.bits_per_pixel + 7)/8;
int ret;
bool unmap = false;
+ bool store_for_later = false;
+ int x2, y2;
+ unsigned long flags;
obj = afbdev->gfb.obj;
bo = gem_to_cirrus_bo(obj);
+ /*
+ * try and reserve the BO, if we fail with busy
+ * then the BO is being moved and we should
+ * store up the damage until later.
+ */
ret = cirrus_bo_reserve(bo, true);
if (ret) {
- DRM_ERROR("failed to reserve fb bo\n");
+ if (ret != -EBUSY)
+ return;
+ store_for_later = true;
+ }
+
+ x2 = x + width - 1;
+ y2 = y + height - 1;
+ spin_lock_irqsave(&afbdev->dirty_lock, flags);
+
+ if (afbdev->y1 < y)
+ y = afbdev->y1;
+ if (afbdev->y2 > y2)
+ y2 = afbdev->y2;
+ if (afbdev->x1 < x)
+ x = afbdev->x1;
+ if (afbdev->x2 > x2)
+ x2 = afbdev->x2;
+
+ if (store_for_later) {
+ afbdev->x1 = x;
+ afbdev->x2 = x2;
+ afbdev->y1 = y;
+ afbdev->y2 = y2;
+ spin_unlock_irqrestore(&afbdev->dirty_lock, flags);
return;
}
+ afbdev->x1 = afbdev->y1 = INT_MAX;
+ afbdev->x2 = afbdev->y2 = 0;
+ spin_unlock_irqrestore(&afbdev->dirty_lock, flags);
+
if (!bo->kmap.virtual) {
ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
if (ret) {
@@ -268,6 +303,7 @@ int cirrus_fbdev_init(struct cirrus_device *cdev)
cdev->mode_info.gfbdev = gfbdev;
gfbdev->helper.funcs = &cirrus_fb_helper_funcs;
+ spin_lock_init(&gfbdev->dirty_lock);
ret = drm_fb_helper_init(cdev->dev, &gfbdev->helper,
cdev->num_crtc, CIRRUSFB_CONN_LIMIT);
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index 1413a26e490..2ed8cfc740c 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -321,7 +321,7 @@ int cirrus_bo_reserve(struct cirrus_bo *bo, bool no_wait)
ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
if (ret) {
- if (ret != -ERESTARTSYS)
+ if (ret != -ERESTARTSYS && ret != -EBUSY)
DRM_ERROR("reserve failed %p\n", bo);
return ret;
}
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index dd64a06dc5b..016c5d8b466 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -78,6 +78,10 @@ void drm_warn_on_modeset_not_all_locked(struct drm_device *dev)
{
struct drm_crtc *crtc;
+ /* Locking is currently fubar in the panic handler. */
+ if (oops_in_progress)
+ return;
+
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
WARN_ON(!mutex_is_locked(&crtc->mutex));
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index af779ae19eb..cf919e36e8a 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -205,11 +205,11 @@ static void
drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
{
if (obj->import_attach) {
- drm_prime_remove_imported_buf_handle(&filp->prime,
+ drm_prime_remove_buf_handle(&filp->prime,
obj->import_attach->dmabuf);
}
if (obj->export_dma_buf) {
- drm_prime_remove_imported_buf_handle(&filp->prime,
+ drm_prime_remove_buf_handle(&filp->prime,
obj->export_dma_buf);
}
}
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index a6a8643a6a7..8bcce7866d3 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -1054,7 +1054,7 @@ EXPORT_SYMBOL(drm_vblank_off);
*/
void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
{
- /* vblank is not initialized (IRQ not installed ?) */
+ /* vblank is not initialized (IRQ not installed ?), or has been freed */
if (!dev->num_crtcs)
return;
/*
@@ -1076,6 +1076,10 @@ void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
{
unsigned long irqflags;
+ /* vblank is not initialized (IRQ not installed ?), or has been freed */
+ if (!dev->num_crtcs)
+ return;
+
if (dev->vblank_inmodeset[crtc]) {
spin_lock_irqsave(&dev->vbl_lock, irqflags);
dev->vblank_disable_allowed = 1;
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index db1e2d6f90d..07cf99cc886 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -755,33 +755,35 @@ void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
EXPORT_SYMBOL(drm_mm_debug_table);
#if defined(CONFIG_DEBUG_FS)
-int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
+static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry)
{
- struct drm_mm_node *entry;
- unsigned long total_used = 0, total_free = 0, total = 0;
unsigned long hole_start, hole_end, hole_size;
- hole_start = drm_mm_hole_node_start(&mm->head_node);
- hole_end = drm_mm_hole_node_end(&mm->head_node);
- hole_size = hole_end - hole_start;
- if (hole_size)
+ if (entry->hole_follows) {
+ hole_start = drm_mm_hole_node_start(entry);
+ hole_end = drm_mm_hole_node_end(entry);
+ hole_size = hole_end - hole_start;
seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
hole_start, hole_end, hole_size);
- total_free += hole_size;
+ return hole_size;
+ }
+
+ return 0;
+}
+
+int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
+{
+ struct drm_mm_node *entry;
+ unsigned long total_used = 0, total_free = 0, total = 0;
+
+ total_free += drm_mm_dump_hole(m, &mm->head_node);
drm_mm_for_each_node(entry, mm) {
seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n",
entry->start, entry->start + entry->size,
entry->size);
total_used += entry->size;
- if (entry->hole_follows) {
- hole_start = drm_mm_hole_node_start(entry);
- hole_end = drm_mm_hole_node_end(entry);
- hole_size = hole_end - hole_start;
- seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
- hole_start, hole_end, hole_size);
- total_free += hole_size;
- }
+ total_free += drm_mm_dump_hole(m, entry);
}
total = total_free + total_used;
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 366910ddcfc..db767cae5f2 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -62,6 +62,7 @@ struct drm_prime_member {
struct dma_buf *dma_buf;
uint32_t handle;
};
+static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle);
static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
enum dma_data_direction dir)
@@ -200,7 +201,8 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev,
{
struct drm_gem_object *obj;
void *buf;
- int ret;
+ int ret = 0;
+ struct dma_buf *dmabuf;
obj = drm_gem_object_lookup(dev, file_priv, handle);
if (!obj)
@@ -209,43 +211,44 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev,
mutex_lock(&file_priv->prime.lock);
/* re-export the original imported object */
if (obj->import_attach) {
- get_dma_buf(obj->import_attach->dmabuf);
- *prime_fd = dma_buf_fd(obj->import_attach->dmabuf, flags);
- drm_gem_object_unreference_unlocked(obj);
- mutex_unlock(&file_priv->prime.lock);
- return 0;
+ dmabuf = obj->import_attach->dmabuf;
+ goto out_have_obj;
}
if (obj->export_dma_buf) {
- get_dma_buf(obj->export_dma_buf);
- *prime_fd = dma_buf_fd(obj->export_dma_buf, flags);
- drm_gem_object_unreference_unlocked(obj);
- } else {
- buf = dev->driver->gem_prime_export(dev, obj, flags);
- if (IS_ERR(buf)) {
- /* normally the created dma-buf takes ownership of the ref,
- * but if that fails then drop the ref
- */
- drm_gem_object_unreference_unlocked(obj);
- mutex_unlock(&file_priv->prime.lock);
- return PTR_ERR(buf);
- }
- obj->export_dma_buf = buf;
- *prime_fd = dma_buf_fd(buf, flags);
+ dmabuf = obj->export_dma_buf;
+ goto out_have_obj;
}
+
+ buf = dev->driver->gem_prime_export(dev, obj, flags);
+ if (IS_ERR(buf)) {
+ /* normally the created dma-buf takes ownership of the ref,
+ * but if that fails then drop the ref
+ */
+ ret = PTR_ERR(buf);
+ goto out;
+ }
+ obj->export_dma_buf = buf;
+
/* if we've exported this buffer the cheat and add it to the import list
* so we get the correct handle back
*/
- ret = drm_prime_add_imported_buf_handle(&file_priv->prime,
- obj->export_dma_buf, handle);
- if (ret) {
- drm_gem_object_unreference_unlocked(obj);
- mutex_unlock(&file_priv->prime.lock);
- return ret;
- }
+ ret = drm_prime_add_buf_handle(&file_priv->prime,
+ obj->export_dma_buf, handle);
+ if (ret)
+ goto out;
+ *prime_fd = dma_buf_fd(buf, flags);
mutex_unlock(&file_priv->prime.lock);
return 0;
+
+out_have_obj:
+ get_dma_buf(dmabuf);
+ *prime_fd = dma_buf_fd(dmabuf, flags);
+out:
+ drm_gem_object_unreference_unlocked(obj);
+ mutex_unlock(&file_priv->prime.lock);
+ return ret;
}
EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
@@ -268,7 +271,6 @@ struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
* refcount on gem itself instead of f_count of dmabuf.
*/
drm_gem_object_reference(obj);
- dma_buf_put(dma_buf);
return obj;
}
}
@@ -277,6 +279,8 @@ struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
if (IS_ERR(attach))
return ERR_PTR(PTR_ERR(attach));
+ get_dma_buf(dma_buf);
+
sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
if (IS_ERR_OR_NULL(sgt)) {
ret = PTR_ERR(sgt);
@@ -297,6 +301,8 @@ fail_unmap:
dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
fail_detach:
dma_buf_detach(dma_buf, attach);
+ dma_buf_put(dma_buf);
+
return ERR_PTR(ret);
}
EXPORT_SYMBOL(drm_gem_prime_import);
@@ -314,7 +320,7 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev,
mutex_lock(&file_priv->prime.lock);
- ret = drm_prime_lookup_imported_buf_handle(&file_priv->prime,
+ ret = drm_prime_lookup_buf_handle(&file_priv->prime,
dma_buf, handle);
if (!ret) {
ret = 0;
@@ -333,12 +339,15 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev,
if (ret)
goto out_put;
- ret = drm_prime_add_imported_buf_handle(&file_priv->prime,
+ ret = drm_prime_add_buf_handle(&file_priv->prime,
dma_buf, *handle);
if (ret)
goto fail;
mutex_unlock(&file_priv->prime.lock);
+
+ dma_buf_put(dma_buf);
+
return 0;
fail:
@@ -491,7 +500,7 @@ void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
}
EXPORT_SYMBOL(drm_prime_destroy_file_private);
-int drm_prime_add_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle)
+static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle)
{
struct drm_prime_member *member;
@@ -499,14 +508,14 @@ int drm_prime_add_imported_buf_handle(struct drm_prime_file_private *prime_fpriv
if (!member)
return -ENOMEM;
+ get_dma_buf(dma_buf);
member->dma_buf = dma_buf;
member->handle = handle;
list_add(&member->entry, &prime_fpriv->head);
return 0;
}
-EXPORT_SYMBOL(drm_prime_add_imported_buf_handle);
-int drm_prime_lookup_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle)
+int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle)
{
struct drm_prime_member *member;
@@ -518,19 +527,20 @@ int drm_prime_lookup_imported_buf_handle(struct drm_prime_file_private *prime_fp
}
return -ENOENT;
}
-EXPORT_SYMBOL(drm_prime_lookup_imported_buf_handle);
+EXPORT_SYMBOL(drm_prime_lookup_buf_handle);
-void drm_prime_remove_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf)
+void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf)
{
struct drm_prime_member *member, *safe;
mutex_lock(&prime_fpriv->lock);
list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) {
if (member->dma_buf == dma_buf) {
+ dma_buf_put(dma_buf);
list_del(&member->entry);
kfree(member);
}
}
mutex_unlock(&prime_fpriv->lock);
}
-EXPORT_SYMBOL(drm_prime_remove_imported_buf_handle);
+EXPORT_SYMBOL(drm_prime_remove_buf_handle);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
index ba0a3aa7854..ff7f2a886a3 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
@@ -235,7 +235,6 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
* refcount on gem itself instead of f_count of dmabuf.
*/
drm_gem_object_reference(obj);
- dma_buf_put(dma_buf);
return obj;
}
}
@@ -244,6 +243,7 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
if (IS_ERR(attach))
return ERR_PTR(-EINVAL);
+ get_dma_buf(dma_buf);
sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
if (IS_ERR_OR_NULL(sgt)) {
@@ -298,6 +298,8 @@ err_unmap_attach:
dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
err_buf_detach:
dma_buf_detach(dma_buf, attach);
+ dma_buf_put(dma_buf);
+
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 2590cac8425..8d070781e0e 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -772,8 +772,8 @@ void psb_modeset_init(struct drm_device *dev)
for (i = 0; i < dev_priv->num_pipe; i++)
psb_intel_crtc_init(dev, i, mode_dev);
- dev->mode_config.max_width = 2048;
- dev->mode_config.max_height = 2048;
+ dev->mode_config.max_width = 4096;
+ dev->mode_config.max_height = 4096;
psb_setup_outputs(dev);
diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
index 8652cdf3f03..029eccf3013 100644
--- a/drivers/gpu/drm/gma500/psb_irq.c
+++ b/drivers/gpu/drm/gma500/psb_irq.c
@@ -211,7 +211,7 @@ irqreturn_t psb_irq_handler(DRM_IRQ_ARGS)
vdc_stat = PSB_RVDC32(PSB_INT_IDENTITY_R);
- if (vdc_stat & _PSB_PIPE_EVENT_FLAG)
+ if (vdc_stat & (_PSB_PIPE_EVENT_FLAG|_PSB_IRQ_ASLE))
dsp_int = 1;
/* FIXME: Handle Medfield
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index e9b57893db2..49393e584a8 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -359,40 +359,64 @@ static const struct pci_device_id pciidlist[] = { /* aka */
INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */
INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */
INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */
- INTEL_VGA_DEVICE(0x0422, &intel_haswell_d_info), /* GT2 desktop */
+ INTEL_VGA_DEVICE(0x0422, &intel_haswell_d_info), /* GT3 desktop */
INTEL_VGA_DEVICE(0x040a, &intel_haswell_d_info), /* GT1 server */
INTEL_VGA_DEVICE(0x041a, &intel_haswell_d_info), /* GT2 server */
- INTEL_VGA_DEVICE(0x042a, &intel_haswell_d_info), /* GT2 server */
+ INTEL_VGA_DEVICE(0x042a, &intel_haswell_d_info), /* GT3 server */
INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */
INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */
INTEL_VGA_DEVICE(0x0426, &intel_haswell_m_info), /* GT2 mobile */
+ INTEL_VGA_DEVICE(0x040B, &intel_haswell_d_info), /* GT1 reserved */
+ INTEL_VGA_DEVICE(0x041B, &intel_haswell_d_info), /* GT2 reserved */
+ INTEL_VGA_DEVICE(0x042B, &intel_haswell_d_info), /* GT3 reserved */
+ INTEL_VGA_DEVICE(0x040E, &intel_haswell_d_info), /* GT1 reserved */
+ INTEL_VGA_DEVICE(0x041E, &intel_haswell_d_info), /* GT2 reserved */
+ INTEL_VGA_DEVICE(0x042E, &intel_haswell_d_info), /* GT3 reserved */
INTEL_VGA_DEVICE(0x0C02, &intel_haswell_d_info), /* SDV GT1 desktop */
INTEL_VGA_DEVICE(0x0C12, &intel_haswell_d_info), /* SDV GT2 desktop */
- INTEL_VGA_DEVICE(0x0C22, &intel_haswell_d_info), /* SDV GT2 desktop */
+ INTEL_VGA_DEVICE(0x0C22, &intel_haswell_d_info), /* SDV GT3 desktop */
INTEL_VGA_DEVICE(0x0C0A, &intel_haswell_d_info), /* SDV GT1 server */
INTEL_VGA_DEVICE(0x0C1A, &intel_haswell_d_info), /* SDV GT2 server */
- INTEL_VGA_DEVICE(0x0C2A, &intel_haswell_d_info), /* SDV GT2 server */
+ INTEL_VGA_DEVICE(0x0C2A, &intel_haswell_d_info), /* SDV GT3 server */
INTEL_VGA_DEVICE(0x0C06, &intel_haswell_m_info), /* SDV GT1 mobile */
INTEL_VGA_DEVICE(0x0C16, &intel_haswell_m_info), /* SDV GT2 mobile */
- INTEL_VGA_DEVICE(0x0C26, &intel_haswell_m_info), /* SDV GT2 mobile */
+ INTEL_VGA_DEVICE(0x0C26, &intel_haswell_m_info), /* SDV GT3 mobile */
+ INTEL_VGA_DEVICE(0x0C0B, &intel_haswell_d_info), /* SDV GT1 reserved */
+ INTEL_VGA_DEVICE(0x0C1B, &intel_haswell_d_info), /* SDV GT2 reserved */
+ INTEL_VGA_DEVICE(0x0C2B, &intel_haswell_d_info), /* SDV GT3 reserved */
+ INTEL_VGA_DEVICE(0x0C0E, &intel_haswell_d_info), /* SDV GT1 reserved */
+ INTEL_VGA_DEVICE(0x0C1E, &intel_haswell_d_info), /* SDV GT2 reserved */
+ INTEL_VGA_DEVICE(0x0C2E, &intel_haswell_d_info), /* SDV GT3 reserved */
INTEL_VGA_DEVICE(0x0A02, &intel_haswell_d_info), /* ULT GT1 desktop */
INTEL_VGA_DEVICE(0x0A12, &intel_haswell_d_info), /* ULT GT2 desktop */
- INTEL_VGA_DEVICE(0x0A22, &intel_haswell_d_info), /* ULT GT2 desktop */
+ INTEL_VGA_DEVICE(0x0A22, &intel_haswell_d_info), /* ULT GT3 desktop */
INTEL_VGA_DEVICE(0x0A0A, &intel_haswell_d_info), /* ULT GT1 server */
INTEL_VGA_DEVICE(0x0A1A, &intel_haswell_d_info), /* ULT GT2 server */
- INTEL_VGA_DEVICE(0x0A2A, &intel_haswell_d_info), /* ULT GT2 server */
+ INTEL_VGA_DEVICE(0x0A2A, &intel_haswell_d_info), /* ULT GT3 server */
INTEL_VGA_DEVICE(0x0A06, &intel_haswell_m_info), /* ULT GT1 mobile */
INTEL_VGA_DEVICE(0x0A16, &intel_haswell_m_info), /* ULT GT2 mobile */
- INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT2 mobile */
+ INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT3 mobile */
+ INTEL_VGA_DEVICE(0x0A0B, &intel_haswell_d_info), /* ULT GT1 reserved */
+ INTEL_VGA_DEVICE(0x0A1B, &intel_haswell_d_info), /* ULT GT2 reserved */
+ INTEL_VGA_DEVICE(0x0A2B, &intel_haswell_d_info), /* ULT GT3 reserved */
+ INTEL_VGA_DEVICE(0x0A0E, &intel_haswell_m_info), /* ULT GT1 reserved */
+ INTEL_VGA_DEVICE(0x0A1E, &intel_haswell_m_info), /* ULT GT2 reserved */
+ INTEL_VGA_DEVICE(0x0A2E, &intel_haswell_m_info), /* ULT GT3 reserved */
INTEL_VGA_DEVICE(0x0D02, &intel_haswell_d_info), /* CRW GT1 desktop */
INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT2 desktop */
- INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT2 desktop */
+ INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT3 desktop */
INTEL_VGA_DEVICE(0x0D0A, &intel_haswell_d_info), /* CRW GT1 server */
INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT2 server */
- INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT2 server */
+ INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT3 server */
INTEL_VGA_DEVICE(0x0D06, &intel_haswell_m_info), /* CRW GT1 mobile */
INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT2 mobile */
- INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT2 mobile */
+ INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT3 mobile */
+ INTEL_VGA_DEVICE(0x0D0B, &intel_haswell_d_info), /* CRW GT1 reserved */
+ INTEL_VGA_DEVICE(0x0D1B, &intel_haswell_d_info), /* CRW GT2 reserved */
+ INTEL_VGA_DEVICE(0x0D2B, &intel_haswell_d_info), /* CRW GT3 reserved */
+ INTEL_VGA_DEVICE(0x0D0E, &intel_haswell_d_info), /* CRW GT1 reserved */
+ INTEL_VGA_DEVICE(0x0D1E, &intel_haswell_d_info), /* CRW GT2 reserved */
+ INTEL_VGA_DEVICE(0x0D2E, &intel_haswell_d_info), /* CRW GT3 reserved */
INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info),
INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info),
INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info),
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 01769e2a995..ef99b1c22fc 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -941,6 +941,7 @@ typedef struct drm_i915_private {
unsigned int int_crt_support:1;
unsigned int lvds_use_ssc:1;
unsigned int display_clock_mode:1;
+ unsigned int fdi_rx_polarity_inverted:1;
int lvds_ssc_freq;
unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
struct {
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 0e207e6e0df..db2cff319c8 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -91,14 +91,11 @@ i915_gem_wait_for_error(struct i915_gpu_error *error)
{
int ret;
-#define EXIT_COND (!i915_reset_in_progress(error))
+#define EXIT_COND (!i915_reset_in_progress(error) || \
+ i915_terminally_wedged(error))
if (EXIT_COND)
return 0;
- /* GPU is already declared terminally dead, give up. */
- if (i915_terminally_wedged(error))
- return -EIO;
-
/*
* Only wait 10 seconds for the gpu reset to complete to avoid hanging
* userspace. If it takes that long something really bad is going on and
@@ -2678,17 +2675,35 @@ static inline int fence_number(struct drm_i915_private *dev_priv,
return fence - dev_priv->fence_regs;
}
+static void i915_gem_write_fence__ipi(void *data)
+{
+ wbinvd();
+}
+
static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
struct drm_i915_fence_reg *fence,
bool enable)
{
- struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
- int reg = fence_number(dev_priv, fence);
-
- i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int fence_reg = fence_number(dev_priv, fence);
+
+ /* In order to fully serialize access to the fenced region and
+ * the update to the fence register we need to take extreme
+ * measures on SNB+. In theory, the write to the fence register
+ * flushes all memory transactions before, and coupled with the
+ * mb() placed around the register write we serialise all memory
+ * operations with respect to the changes in the tiler. Yet, on
+ * SNB+ we need to take a step further and emit an explicit wbinvd()
+ * on each processor in order to manually flush all memory
+ * transactions before updating the fence register.
+ */
+ if (HAS_LLC(obj->base.dev))
+ on_each_cpu(i915_gem_write_fence__ipi, NULL, 1);
+ i915_gem_write_fence(dev, fence_reg, enable ? obj : NULL);
if (enable) {
- obj->fence_reg = reg;
+ obj->fence_reg = fence_reg;
fence->obj = obj;
list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
} else {
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 94d873a6cff..a1e8ecb6adf 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -152,6 +152,13 @@ create_hw_context(struct drm_device *dev,
return ERR_PTR(-ENOMEM);
}
+ if (INTEL_INFO(dev)->gen >= 7) {
+ ret = i915_gem_object_set_cache_level(ctx->obj,
+ I915_CACHE_LLC_MLC);
+ if (ret)
+ goto err_out;
+ }
+
/* The ring associated with the context object is handled by the normal
* object tracking code. We give an initial ring value simple to pass an
* assertion in the context switch code.
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 6a5af682862..c303de1034c 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -271,7 +271,6 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
* refcount on gem itself instead of f_count of dmabuf.
*/
drm_gem_object_reference(&obj->base);
- dma_buf_put(dma_buf);
return &obj->base;
}
}
@@ -281,6 +280,8 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
if (IS_ERR(attach))
return ERR_CAST(attach);
+ get_dma_buf(dma_buf);
+
obj = i915_gem_object_alloc(dev);
if (obj == NULL) {
ret = -ENOMEM;
@@ -300,5 +301,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
fail_detach:
dma_buf_detach(dma_buf, attach);
+ dma_buf_put(dma_buf);
+
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 926a1e2dd23..193c8d1088e 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -182,8 +182,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
/* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
* entries. For aliasing ppgtt support we just steal them at the end for
* now. */
- first_pd_entry_in_global_pt =
- gtt_total_entries(dev_priv->gtt) - I915_PPGTT_PD_ENTRIES;
+ first_pd_entry_in_global_pt = gtt_total_entries(dev_priv->gtt);
ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
ppgtt->clear_range = gen6_ppgtt_clear_range;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 848992f67d5..c91124f925d 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -3827,7 +3827,7 @@
#define _TRANSB_CHICKEN2 0xf1064
#define TRANS_CHICKEN2(pipe) _PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2)
#define TRANS_CHICKEN2_TIMING_OVERRIDE (1<<31)
-
+#define TRANS_CHICKEN2_FDI_POLARITY_REVERSED (1<<29)
#define SOUTH_CHICKEN1 0xc2000
#define FDIA_PHASE_SYNC_SHIFT_OVR 19
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 55ffba1f581..bd833918c49 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -351,12 +351,14 @@ parse_general_features(struct drm_i915_private *dev_priv,
dev_priv->lvds_ssc_freq =
intel_bios_ssc_frequency(dev, general->ssc_freq);
dev_priv->display_clock_mode = general->display_clock_mode;
- DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d\n",
+ dev_priv->fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted;
+ DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n",
dev_priv->int_tv_support,
dev_priv->int_crt_support,
dev_priv->lvds_use_ssc,
dev_priv->lvds_ssc_freq,
- dev_priv->display_clock_mode);
+ dev_priv->display_clock_mode,
+ dev_priv->fdi_rx_polarity_inverted);
}
}
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index 36e57f93437..e088d6f0956 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -127,7 +127,9 @@ struct bdb_general_features {
/* bits 3 */
u8 disable_smooth_vision:1;
u8 single_dvi:1;
- u8 rsvd9:6; /* finish byte */
+ u8 rsvd9:1;
+ u8 fdi_rx_polarity_inverted:1;
+ u8 rsvd10:4; /* finish byte */
/* bits 4 */
u8 legacy_monitor_detect;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index b20d50192fc..2ab65b48449 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -7589,22 +7589,25 @@ intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes,
if (crtc->enabled)
*prepare_pipes |= 1 << intel_crtc->pipe;
- /* We only support modeset on one single crtc, hence we need to do that
- * only for the passed in crtc iff we change anything else than just
- * disable crtcs.
- *
- * This is actually not true, to be fully compatible with the old crtc
- * helper we automatically disable _any_ output (i.e. doesn't need to be
- * connected to the crtc we're modesetting on) if it's disconnected.
- * Which is a rather nutty api (since changed the output configuration
- * without userspace's explicit request can lead to confusion), but
- * alas. Hence we currently need to modeset on all pipes we prepare. */
+ /*
+ * For simplicity do a full modeset on any pipe where the output routing
+ * changed. We could be more clever, but that would require us to be
+ * more careful with calling the relevant encoder->mode_set functions.
+ */
if (*prepare_pipes)
*modeset_pipes = *prepare_pipes;
/* ... and mask these out. */
*modeset_pipes &= ~(*disable_pipes);
*prepare_pipes &= ~(*disable_pipes);
+
+ /*
+ * HACK: We don't (yet) fully support global modesets. intel_set_config
+ * obies this rule, but the modeset restore mode of
+ * intel_modeset_setup_hw_state does not.
+ */
+ *modeset_pipes &= 1 << intel_crtc->pipe;
+ *prepare_pipes &= 1 << intel_crtc->pipe;
}
static bool intel_crtc_in_use(struct drm_crtc *crtc)
@@ -7771,9 +7774,9 @@ intel_modeset_check_state(struct drm_device *dev)
}
}
-int intel_set_mode(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- int x, int y, struct drm_framebuffer *fb)
+static int __intel_set_mode(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ int x, int y, struct drm_framebuffer *fb)
{
struct drm_device *dev = crtc->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -7863,8 +7866,6 @@ done:
if (ret && crtc->enabled) {
crtc->hwmode = *saved_hwmode;
crtc->mode = *saved_mode;
- } else {
- intel_modeset_check_state(dev);
}
out:
@@ -7872,6 +7873,20 @@ out:
return ret;
}
+int intel_set_mode(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ int x, int y, struct drm_framebuffer *fb)
+{
+ int ret;
+
+ ret = __intel_set_mode(crtc, mode, x, y, fb);
+
+ if (ret == 0)
+ intel_modeset_check_state(crtc->dev);
+
+ return ret;
+}
+
void intel_crtc_restore_mode(struct drm_crtc *crtc)
{
intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->fb);
@@ -7945,6 +7960,21 @@ static void intel_set_config_restore_state(struct drm_device *dev,
}
}
+static bool
+is_crtc_connector_off(struct drm_crtc *crtc, struct drm_connector *connectors,
+ int num_connectors)
+{
+ int i;
+
+ for (i = 0; i < num_connectors; i++)
+ if (connectors[i].encoder &&
+ connectors[i].encoder->crtc == crtc &&
+ connectors[i].dpms != DRM_MODE_DPMS_ON)
+ return true;
+
+ return false;
+}
+
static void
intel_set_config_compute_mode_changes(struct drm_mode_set *set,
struct intel_set_config *config)
@@ -7952,7 +7982,11 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set,
/* We should be able to check here if the fb has the same properties
* and then just flip_or_move it */
- if (set->crtc->fb != set->fb) {
+ if (set->connectors != NULL &&
+ is_crtc_connector_off(set->crtc, *set->connectors,
+ set->num_connectors)) {
+ config->mode_changed = true;
+ } else if (set->crtc->fb != set->fb) {
/* If we have no fb then treat it as a full mode set */
if (set->crtc->fb == NULL) {
DRM_DEBUG_KMS("crtc has no fb, full mode set\n");
@@ -7964,8 +7998,9 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set,
} else if (set->fb->bits_per_pixel !=
set->crtc->fb->bits_per_pixel) {
config->mode_changed = true;
- } else
+ } else {
config->fb_changed = true;
+ }
}
if (set->fb && (set->x != set->crtc->x || set->y != set->crtc->y))
@@ -8314,7 +8349,7 @@ static void intel_setup_outputs(struct drm_device *dev)
I915_WRITE(PFIT_CONTROL, 0);
}
- if (!(HAS_DDI(dev) && (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)))
+ if (!IS_ULT(dev))
intel_crt_init(dev);
if (HAS_DDI(dev)) {
@@ -9172,8 +9207,16 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
}
if (force_restore) {
+ /*
+ * We need to use raw interfaces for restoring state to avoid
+ * checking (bogus) intermediate states.
+ */
for_each_pipe(pipe) {
- intel_crtc_restore_mode(dev_priv->pipe_to_crtc_mapping[pipe]);
+ struct drm_crtc *crtc =
+ dev_priv->pipe_to_crtc_mapping[pipe];
+
+ __intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y,
+ crtc->fb);
}
i915_redisable_vga(dev);
@@ -9236,6 +9279,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
/* flush any delayed tasks or pending work */
flush_scheduled_work();
+ /* destroy backlight, if any, before the connectors */
+ intel_panel_destroy_backlight(dev);
+
drm_mode_config_cleanup(dev);
intel_cleanup_overlay(dev);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 8fc93f90a7c..b8e17e56401 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -2538,17 +2538,14 @@ done:
static void
intel_dp_destroy(struct drm_connector *connector)
{
- struct drm_device *dev = connector->dev;
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct intel_connector *intel_connector = to_intel_connector(connector);
if (!IS_ERR_OR_NULL(intel_connector->edid))
kfree(intel_connector->edid);
- if (is_edp(intel_dp)) {
- intel_panel_destroy_backlight(dev);
+ if (is_edp(intel_dp))
intel_panel_fini(&intel_connector->panel);
- }
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 00e70dbe82d..cc70b16d5d4 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -448,6 +448,7 @@ void intel_dvo_init(struct drm_device *dev)
const struct intel_dvo_device *dvo = &intel_dvo_devices[i];
struct i2c_adapter *i2c;
int gpio;
+ bool dvoinit;
/* Allow the I2C driver info to specify the GPIO to be used in
* special cases, but otherwise default to what's defined
@@ -467,7 +468,17 @@ void intel_dvo_init(struct drm_device *dev)
i2c = intel_gmbus_get_adapter(dev_priv, gpio);
intel_dvo->dev = *dvo;
- if (!dvo->dev_ops->init(&intel_dvo->dev, i2c))
+
+ /* GMBUS NAK handling seems to be unstable, hence let the
+ * transmitter detection run in bit banging mode for now.
+ */
+ intel_gmbus_force_bit(i2c, true);
+
+ dvoinit = dvo->dev_ops->init(&intel_dvo->dev, i2c);
+
+ intel_gmbus_force_bit(i2c, false);
+
+ if (!dvoinit)
continue;
intel_encoder->type = INTEL_OUTPUT_DVO;
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 981bdce3634..898832b9ee9 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -261,10 +261,22 @@ void intel_fbdev_fini(struct drm_device *dev)
void intel_fbdev_set_suspend(struct drm_device *dev, int state)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- if (!dev_priv->fbdev)
+ struct intel_fbdev *ifbdev = dev_priv->fbdev;
+ struct fb_info *info;
+
+ if (!ifbdev)
return;
- fb_set_suspend(dev_priv->fbdev->helper.fbdev, state);
+ info = ifbdev->helper.fbdev;
+
+ /* On resume from hibernation: If the object is shmemfs backed, it has
+ * been restored from swap. If the object is stolen however, it will be
+ * full of whatever garbage was left in there.
+ */
+ if (!state && ifbdev->ifb.obj->stolen)
+ memset_io(info->screen_base, 0, info->screen_size);
+
+ fb_set_suspend(info, state);
}
MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 3d1d97488cc..e2ca9c1e6c5 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -618,7 +618,6 @@ static void intel_lvds_destroy(struct drm_connector *connector)
if (!IS_ERR_OR_NULL(lvds_connector->base.edid))
kfree(lvds_connector->base.edid);
- intel_panel_destroy_backlight(connector->dev);
intel_panel_fini(&lvds_connector->base.panel);
drm_sysfs_connector_remove(connector);
@@ -804,10 +803,10 @@ static const struct dmi_system_id intel_no_lvds[] = {
},
{
.callback = intel_no_lvds_dmi_callback,
- .ident = "Hewlett-Packard HP t5740e Thin Client",
+ .ident = "Hewlett-Packard HP t5740",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP t5740e Thin Client"),
+ DMI_MATCH(DMI_PRODUCT_NAME, " t5740"),
},
},
{
@@ -850,6 +849,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "X7SPA-H"),
},
},
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Fujitsu Esprimo Q900",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO Q900"),
+ },
+ },
{ } /* terminating entry */
};
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index bee8cb6108a..94d895b665d 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -422,6 +422,9 @@ int intel_panel_setup_backlight(struct drm_connector *connector)
intel_panel_init_backlight(dev);
+ if (WARN_ON(dev_priv->backlight))
+ return -ENODEV;
+
memset(&props, 0, sizeof(props));
props.type = BACKLIGHT_RAW;
props.max_brightness = _intel_panel_get_max_backlight(dev);
@@ -447,8 +450,10 @@ int intel_panel_setup_backlight(struct drm_connector *connector)
void intel_panel_destroy_backlight(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (dev_priv->backlight)
+ if (dev_priv->backlight) {
backlight_device_unregister(dev_priv->backlight);
+ dev_priv->backlight = NULL;
+ }
}
#else
int intel_panel_setup_backlight(struct drm_connector *connector)
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index adca00783e6..332b29e5641 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3562,6 +3562,7 @@ static void cpt_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
+ uint32_t val;
/*
* On Ibex Peak and Cougar Point, we need to disable clock
@@ -3574,8 +3575,12 @@ static void cpt_init_clock_gating(struct drm_device *dev)
/* The below fixes the weird display corruption, a few pixels shifted
* downward, on (only) LVDS of some HP laptops with IVY.
*/
- for_each_pipe(pipe)
- I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_CHICKEN2_TIMING_OVERRIDE);
+ for_each_pipe(pipe) {
+ val = TRANS_CHICKEN2_TIMING_OVERRIDE;
+ if (dev_priv->fdi_rx_polarity_inverted)
+ val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
+ I915_WRITE(TRANS_CHICKEN2(pipe), val);
+ }
/* WADP0ClockGatingDisable */
for_each_pipe(pipe) {
I915_WRITE(TRANS_CHICKEN1(pipe),
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index d07a8cdf998..cdd78ca695c 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1235,11 +1235,13 @@ static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
+ u16 active_outputs;
u32 tmp;
tmp = I915_READ(intel_sdvo->sdvo_reg);
+ intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs);
- if (!(tmp & SDVO_ENABLE))
+ if (!(tmp & SDVO_ENABLE) && (active_outputs == 0))
return false;
if (HAS_PCH_CPT(dev))
@@ -1768,7 +1770,7 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
* Assume that the preferred modes are
* arranged in priority order.
*/
- intel_ddc_get_modes(connector, intel_sdvo->i2c);
+ intel_ddc_get_modes(connector, &intel_sdvo->ddc);
if (list_empty(&connector->probed_modes) == false)
goto end;
@@ -2739,7 +2741,6 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
struct intel_sdvo *intel_sdvo;
u32 hotplug_mask;
int i;
-
intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL);
if (!intel_sdvo)
return false;
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index 4d932c46725..8065919f9a6 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -115,6 +115,8 @@ struct mga_fbdev {
void *sysram;
int size;
struct ttm_bo_kmap_obj mapping;
+ int x1, y1, x2, y2; /* dirty rect */
+ spinlock_t dirty_lock;
};
struct mga_crtc {
diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c
index d2253f63948..b0dad27cb6a 100644
--- a/drivers/gpu/drm/mgag200/mgag200_fb.c
+++ b/drivers/gpu/drm/mgag200/mgag200_fb.c
@@ -29,16 +29,52 @@ static void mga_dirty_update(struct mga_fbdev *mfbdev,
int bpp = (mfbdev->mfb.base.bits_per_pixel + 7)/8;
int ret;
bool unmap = false;
+ bool store_for_later = false;
+ int x2, y2;
+ unsigned long flags;
obj = mfbdev->mfb.obj;
bo = gem_to_mga_bo(obj);
+ /*
+ * try and reserve the BO, if we fail with busy
+ * then the BO is being moved and we should
+ * store up the damage until later.
+ */
ret = mgag200_bo_reserve(bo, true);
if (ret) {
- DRM_ERROR("failed to reserve fb bo\n");
+ if (ret != -EBUSY)
+ return;
+
+ store_for_later = true;
+ }
+
+ x2 = x + width - 1;
+ y2 = y + height - 1;
+ spin_lock_irqsave(&mfbdev->dirty_lock, flags);
+
+ if (mfbdev->y1 < y)
+ y = mfbdev->y1;
+ if (mfbdev->y2 > y2)
+ y2 = mfbdev->y2;
+ if (mfbdev->x1 < x)
+ x = mfbdev->x1;
+ if (mfbdev->x2 > x2)
+ x2 = mfbdev->x2;
+
+ if (store_for_later) {
+ mfbdev->x1 = x;
+ mfbdev->x2 = x2;
+ mfbdev->y1 = y;
+ mfbdev->y2 = y2;
+ spin_unlock_irqrestore(&mfbdev->dirty_lock, flags);
return;
}
+ mfbdev->x1 = mfbdev->y1 = INT_MAX;
+ mfbdev->x2 = mfbdev->y2 = 0;
+ spin_unlock_irqrestore(&mfbdev->dirty_lock, flags);
+
if (!bo->kmap.virtual) {
ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
if (ret) {
@@ -48,10 +84,10 @@ static void mga_dirty_update(struct mga_fbdev *mfbdev,
}
unmap = true;
}
- for (i = y; i < y + height; i++) {
+ for (i = y; i <= y2; i++) {
/* assume equal stride for now */
src_offset = dst_offset = i * mfbdev->mfb.base.pitches[0] + (x * bpp);
- memcpy_toio(bo->kmap.virtual + src_offset, mfbdev->sysram + src_offset, width * bpp);
+ memcpy_toio(bo->kmap.virtual + src_offset, mfbdev->sysram + src_offset, (x2 - x + 1) * bpp);
}
if (unmap)
@@ -255,6 +291,7 @@ int mgag200_fbdev_init(struct mga_device *mdev)
mdev->mfbdev = mfbdev;
mfbdev->helper.funcs = &mga_fb_helper_funcs;
+ spin_lock_init(&mfbdev->dirty_lock);
ret = drm_fb_helper_init(mdev->dev, &mfbdev->helper,
mdev->num_crtc, MGAG200FB_CONN_LIMIT);
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 78d8e919509..407e2576c80 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -189,12 +189,12 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
tmp = RREG8(DAC_DATA);
tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
- WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp);
+ WREG8(DAC_DATA, tmp);
WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
tmp = RREG8(DAC_DATA);
tmp |= MGA1064_REMHEADCTL_CLKDIS;
- WREG_DAC(MGA1064_REMHEADCTL, tmp);
+ WREG8(DAC_DATA, tmp);
/* select PLL Set C */
tmp = RREG8(MGAREG_MEM_MISC_READ);
@@ -204,7 +204,7 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
tmp = RREG8(DAC_DATA);
tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN | 0x80;
- WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+ WREG8(DAC_DATA, tmp);
udelay(500);
@@ -212,7 +212,7 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
WREG8(DAC_INDEX, MGA1064_VREF_CTL);
tmp = RREG8(DAC_DATA);
tmp &= ~0x04;
- WREG_DAC(MGA1064_VREF_CTL, tmp);
+ WREG8(DAC_DATA, tmp);
udelay(50);
@@ -236,13 +236,13 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
tmp = RREG8(DAC_DATA);
tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
- WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+ WREG8(DAC_DATA, tmp);
WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
tmp = RREG8(DAC_DATA);
tmp &= ~MGA1064_REMHEADCTL_CLKSL_MSK;
tmp |= MGA1064_REMHEADCTL_CLKSL_PLL;
- WREG_DAC(MGA1064_REMHEADCTL, tmp);
+ WREG8(DAC_DATA, tmp);
/* reset dotclock rate bit */
WREG8(MGAREG_SEQ_INDEX, 1);
@@ -253,7 +253,7 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
tmp = RREG8(DAC_DATA);
tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
- WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+ WREG8(DAC_DATA, tmp);
vcount = RREG8(MGAREG_VCOUNT);
@@ -318,7 +318,7 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
tmp = RREG8(DAC_DATA);
tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
- WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp);
+ WREG8(DAC_DATA, tmp);
tmp = RREG8(MGAREG_MEM_MISC_READ);
tmp |= 0x3 << 2;
@@ -326,12 +326,12 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT);
tmp = RREG8(DAC_DATA);
- WREG_DAC(MGA1064_PIX_PLL_STAT, tmp & ~0x40);
+ WREG8(DAC_DATA, tmp & ~0x40);
WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
tmp = RREG8(DAC_DATA);
tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
- WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+ WREG8(DAC_DATA, tmp);
WREG_DAC(MGA1064_EV_PIX_PLLC_M, m);
WREG_DAC(MGA1064_EV_PIX_PLLC_N, n);
@@ -342,7 +342,7 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
tmp = RREG8(DAC_DATA);
tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
- WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+ WREG8(DAC_DATA, tmp);
udelay(500);
@@ -350,11 +350,11 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
tmp = RREG8(DAC_DATA);
tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
- WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+ WREG8(DAC_DATA, tmp);
WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT);
tmp = RREG8(DAC_DATA);
- WREG_DAC(MGA1064_PIX_PLL_STAT, tmp | 0x40);
+ WREG8(DAC_DATA, tmp | 0x40);
tmp = RREG8(MGAREG_MEM_MISC_READ);
tmp |= (0x3 << 2);
@@ -363,7 +363,7 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
tmp = RREG8(DAC_DATA);
tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
- WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+ WREG8(DAC_DATA, tmp);
return 0;
}
@@ -416,7 +416,7 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock)
WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
tmp = RREG8(DAC_DATA);
tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
- WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp);
+ WREG8(DAC_DATA, tmp);
tmp = RREG8(MGAREG_MEM_MISC_READ);
tmp |= 0x3 << 2;
@@ -425,7 +425,7 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock)
WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
tmp = RREG8(DAC_DATA);
tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
- WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+ WREG8(DAC_DATA, tmp);
udelay(500);
@@ -439,13 +439,13 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock)
tmp = RREG8(DAC_DATA);
tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
- WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+ WREG8(DAC_DATA, tmp);
WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
tmp = RREG8(DAC_DATA);
tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
- WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+ WREG8(DAC_DATA, tmp);
vcount = RREG8(MGAREG_VCOUNT);
@@ -515,12 +515,12 @@ static int mga_g200er_set_plls(struct mga_device *mdev, long clock)
WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
tmp = RREG8(DAC_DATA);
tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
- WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp);
+ WREG8(DAC_DATA, tmp);
WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
tmp = RREG8(DAC_DATA);
tmp |= MGA1064_REMHEADCTL_CLKDIS;
- WREG_DAC(MGA1064_REMHEADCTL, tmp);
+ WREG8(DAC_DATA, tmp);
tmp = RREG8(MGAREG_MEM_MISC_READ);
tmp |= (0x3<<2) | 0xc0;
@@ -530,7 +530,7 @@ static int mga_g200er_set_plls(struct mga_device *mdev, long clock)
tmp = RREG8(DAC_DATA);
tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
- WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+ WREG8(DAC_DATA, tmp);
udelay(500);
@@ -657,12 +657,26 @@ static void mga_g200wb_commit(struct drm_crtc *crtc)
WREG_DAC(MGA1064_GEN_IO_DATA, tmp);
}
-
+/*
+ This is how the framebuffer base address is stored in g200 cards:
+ * Assume @offset is the gpu_addr variable of the framebuffer object
+ * Then addr is the number of _pixels_ (not bytes) from the start of
+ VRAM to the first pixel we want to display. (divided by 2 for 32bit
+ framebuffers)
+ * addr is stored in the CRTCEXT0, CRTCC and CRTCD registers
+ addr<20> -> CRTCEXT0<6>
+ addr<19-16> -> CRTCEXT0<3-0>
+ addr<15-8> -> CRTCC<7-0>
+ addr<7-0> -> CRTCD<7-0>
+ CRTCEXT0 has to be programmed last to trigger an update and make the
+ new addr variable take effect.
+ */
void mga_set_start_address(struct drm_crtc *crtc, unsigned offset)
{
struct mga_device *mdev = crtc->dev->dev_private;
u32 addr;
int count;
+ u8 crtcext0;
while (RREG8(0x1fda) & 0x08);
while (!(RREG8(0x1fda) & 0x08));
@@ -670,10 +684,17 @@ void mga_set_start_address(struct drm_crtc *crtc, unsigned offset)
count = RREG8(MGAREG_VCOUNT) + 2;
while (RREG8(MGAREG_VCOUNT) < count);
- addr = offset >> 2;
+ WREG8(MGAREG_CRTCEXT_INDEX, 0);
+ crtcext0 = RREG8(MGAREG_CRTCEXT_DATA);
+ crtcext0 &= 0xB0;
+ addr = offset / 8;
+ /* Can't store addresses any higher than that...
+ but we also don't have more than 16MB of memory, so it should be fine. */
+ WARN_ON(addr > 0x1fffff);
+ crtcext0 |= (!!(addr & (1<<20)))<<6;
WREG_CRT(0x0d, (u8)(addr & 0xff));
WREG_CRT(0x0c, (u8)(addr >> 8) & 0xff);
- WREG_CRT(0xaf, (u8)(addr >> 16) & 0xf);
+ WREG_ECRT(0x0, ((u8)(addr >> 16) & 0xf) | crtcext0);
}
@@ -1020,13 +1041,14 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
else
hi_pri_lvl = 5;
- WREG8(0x1fde, 0x06);
- WREG8(0x1fdf, hi_pri_lvl);
+ WREG8(MGAREG_CRTCEXT_INDEX, 0x06);
+ WREG8(MGAREG_CRTCEXT_DATA, hi_pri_lvl);
} else {
+ WREG8(MGAREG_CRTCEXT_INDEX, 0x06);
if (mdev->reg_1e24 >= 0x01)
- WREG8(0x1fdf, 0x03);
+ WREG8(MGAREG_CRTCEXT_DATA, 0x03);
else
- WREG8(0x1fdf, 0x04);
+ WREG8(MGAREG_CRTCEXT_DATA, 0x04);
}
}
return 0;
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index 8fc9d920194..401c9891d3a 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -315,8 +315,8 @@ int mgag200_bo_reserve(struct mgag200_bo *bo, bool no_wait)
ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
if (ret) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("reserve failed %p\n", bo);
+ if (ret != -ERESTARTSYS && ret != -EBUSY)
+ DRM_ERROR("reserve failed %p %d\n", bo, ret);
return ret;
}
return 0;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
index 9c41b58d57e..ad6335f3f1f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
@@ -1926,8 +1926,8 @@ init_zm_mask_add(struct nvbios_init *init)
trace("ZM_MASK_ADD\tR[0x%06x] &= 0x%08x += 0x%08x\n", addr, mask, add);
init->offset += 13;
- data = init_rd32(init, addr) & mask;
- data |= ((data + add) & ~mask);
+ data = init_rd32(init, addr);
+ data = (data & mask) | ((data + add) & ~mask);
init_wr32(init, addr, data);
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c
index 4393eb4d656..2391b1b3859 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c
@@ -138,7 +138,6 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
- device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
break;
case 0xce:
@@ -225,7 +224,6 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
- device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
break;
case 0xc8:
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
index ac74d1bc67b..1bdf7e1c379 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
@@ -212,7 +212,6 @@ struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
* refcount on gem itself instead of f_count of dmabuf.
*/
drm_gem_object_reference(obj);
- dma_buf_put(buffer);
return obj;
}
}
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index 46a9c377285..fb441a790f3 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -1394,10 +1394,10 @@ int atom_allocate_fb_scratch(struct atom_context *ctx)
firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset);
DRM_DEBUG("atom firmware requested %08x %dkb\n",
- firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware,
- firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb);
+ le32_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware),
+ le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb));
- usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024;
+ usage_bytes = le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb) * 1024;
}
ctx->scratch_size_bytes = 0;
if (usage_bytes == 0)
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 21a892c6ab9..6d6fdb3ba0d 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -557,6 +557,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
/* use frac fb div on APUs */
if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev))
radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
+ /* use frac fb div on RS780/RS880 */
+ if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880))
+ radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
if (ASIC_IS_DCE32(rdev) && mode->clock > 165000)
radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
} else {
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 4552d4aff31..6cf2b52a56a 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -667,6 +667,8 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
int
atombios_get_encoder_mode(struct drm_encoder *encoder)
{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
@@ -693,7 +695,8 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
- radeon_audio)
+ radeon_audio &&
+ !ASIC_IS_DCE6(rdev)) /* remove once we support DCE6 */
return ATOM_ENCODER_MODE_HDMI;
else if (radeon_connector->use_digital)
return ATOM_ENCODER_MODE_DVI;
@@ -704,7 +707,8 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
case DRM_MODE_CONNECTOR_HDMIA:
default:
if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
- radeon_audio)
+ radeon_audio &&
+ !ASIC_IS_DCE6(rdev)) /* remove once we support DCE6 */
return ATOM_ENCODER_MODE_HDMI;
else
return ATOM_ENCODER_MODE_DVI;
@@ -718,7 +722,8 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
return ATOM_ENCODER_MODE_DP;
else if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
- radeon_audio)
+ radeon_audio &&
+ !ASIC_IS_DCE6(rdev)) /* remove once we support DCE6 */
return ATOM_ENCODER_MODE_HDMI;
else
return ATOM_ENCODER_MODE_DVI;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 305a657bf21..f8e36ac2c05 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -105,6 +105,27 @@ void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
}
}
+static bool dce4_is_in_vblank(struct radeon_device *rdev, int crtc)
+{
+ if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)
+ return true;
+ else
+ return false;
+}
+
+static bool dce4_is_counter_moving(struct radeon_device *rdev, int crtc)
+{
+ u32 pos1, pos2;
+
+ pos1 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
+ pos2 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
+
+ if (pos1 != pos2)
+ return true;
+ else
+ return false;
+}
+
/**
* dce4_wait_for_vblank - vblank wait asic callback.
*
@@ -115,21 +136,28 @@ void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
*/
void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc)
{
- int i;
+ unsigned i = 0;
if (crtc >= rdev->num_crtc)
return;
- if (RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[crtc]) & EVERGREEN_CRTC_MASTER_EN) {
- for (i = 0; i < rdev->usec_timeout; i++) {
- if (!(RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK))
+ if (!(RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[crtc]) & EVERGREEN_CRTC_MASTER_EN))
+ return;
+
+ /* depending on when we hit vblank, we may be close to active; if so,
+ * wait for another frame.
+ */
+ while (dce4_is_in_vblank(rdev, crtc)) {
+ if (i++ % 100 == 0) {
+ if (!dce4_is_counter_moving(rdev, crtc))
break;
- udelay(1);
}
- for (i = 0; i < rdev->usec_timeout; i++) {
- if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)
+ }
+
+ while (!dce4_is_in_vblank(rdev, crtc)) {
+ if (i++ % 100 == 0) {
+ if (!dce4_is_counter_moving(rdev, crtc))
break;
- udelay(1);
}
}
}
@@ -608,6 +636,16 @@ void evergreen_hpd_init(struct radeon_device *rdev)
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
+ connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
+ /* don't try to enable hpd on eDP or LVDS avoid breaking the
+ * aux dp channel on imac and help (but not completely fix)
+ * https://bugzilla.redhat.com/show_bug.cgi?id=726143
+ * also avoid interrupt storms during dpms.
+ */
+ continue;
+ }
switch (radeon_connector->hpd.hpd) {
case RADEON_HPD_1:
WREG32(DC_HPD1_CONTROL, tmp);
@@ -1325,17 +1363,16 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) {
radeon_wait_for_vblank(rdev, i);
- tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+ tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
}
} else {
tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) {
radeon_wait_for_vblank(rdev, i);
- tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+ tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
}
@@ -1347,6 +1384,15 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
break;
udelay(1);
}
+
+ /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+ tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
+ tmp &= ~EVERGREEN_CRTC_MASTER_EN;
+ WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+ save->crtc_enabled[i] = false;
+ /* ***** */
} else {
save->crtc_enabled[i] = false;
}
@@ -1364,6 +1410,22 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
}
/* wait for the MC to settle */
udelay(100);
+
+ /* lock double buffered regs */
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (save->crtc_enabled[i]) {
+ tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
+ if (!(tmp & EVERGREEN_GRPH_UPDATE_LOCK)) {
+ tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
+ WREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i], tmp);
+ }
+ tmp = RREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i]);
+ if (!(tmp & 1)) {
+ tmp |= 1;
+ WREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
+ }
+ }
+ }
}
void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
@@ -1385,6 +1447,33 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
+ /* unlock regs and wait for update */
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (save->crtc_enabled[i]) {
+ tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]);
+ if ((tmp & 0x3) != 0) {
+ tmp &= ~0x3;
+ WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
+ }
+ tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
+ if (tmp & EVERGREEN_GRPH_UPDATE_LOCK) {
+ tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK;
+ WREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i], tmp);
+ }
+ tmp = RREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i]);
+ if (tmp & 1) {
+ tmp &= ~1;
+ WREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
+ }
+ for (j = 0; j < rdev->usec_timeout; j++) {
+ tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
+ if ((tmp & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING) == 0)
+ break;
+ udelay(1);
+ }
+ }
+ }
+
/* unblackout the MC */
tmp = RREG32(MC_SHARED_BLACKOUT_CNTL);
tmp &= ~BLACKOUT_MODE_MASK;
@@ -2311,8 +2400,8 @@ int evergreen_mc_init(struct radeon_device *rdev)
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
} else {
/* size in MB on evergreen/cayman/tn */
- rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
- rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
+ rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
+ rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
}
rdev->mc.visible_vram_size = rdev->mc.aper_size;
r700_vram_gtt_location(rdev, &rdev->mc);
@@ -3639,6 +3728,12 @@ static int evergreen_startup(struct radeon_device *rdev)
}
/* Enable IRQ */
+ if (!rdev->irq.installed) {
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ }
+
r = r600_irq_init(rdev);
if (r) {
DRM_ERROR("radeon: IH init failed (%d).\n", r);
@@ -3787,10 +3882,6 @@ int evergreen_init(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_irq_kms_init(rdev);
- if (r)
- return r;
-
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
@@ -3855,8 +3946,7 @@ void evergreen_fini(struct radeon_device *rdev)
void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
{
- u32 link_width_cntl, speed_cntl, mask;
- int ret;
+ u32 link_width_cntl, speed_cntl;
if (radeon_pcie_gen2 == 0)
return;
@@ -3871,11 +3961,8 @@ void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
if (ASIC_IS_X2(rdev))
return;
- ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
- if (ret != 0)
- return;
-
- if (!(mask & DRM_PCIE_SPEED_50))
+ if ((rdev->pdev->bus->max_bus_speed != PCIE_SPEED_5_0GT) &&
+ (rdev->pdev->bus->max_bus_speed != PCIE_SPEED_8_0GT))
return;
speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index f585be16e2d..881aba23c47 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -226,6 +226,8 @@
#define EVERGREEN_CRTC_STATUS_HV_COUNT 0x6ea0
#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
#define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4
+#define EVERGREEN_MASTER_UPDATE_LOCK 0x6ef4
+#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
#define EVERGREEN_DC_GPIO_HPD_MASK 0x64b0
#define EVERGREEN_DC_GPIO_HPD_A 0x64b4
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 27769e724b6..d7968b8c92e 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -473,7 +473,8 @@ static void cayman_gpu_init(struct radeon_device *rdev)
(rdev->pdev->device == 0x990F) ||
(rdev->pdev->device == 0x9910) ||
(rdev->pdev->device == 0x9917) ||
- (rdev->pdev->device == 0x9999)) {
+ (rdev->pdev->device == 0x9999) ||
+ (rdev->pdev->device == 0x999C)) {
rdev->config.cayman.max_simds_per_se = 6;
rdev->config.cayman.max_backends_per_se = 2;
} else if ((rdev->pdev->device == 0x9903) ||
@@ -482,7 +483,8 @@ static void cayman_gpu_init(struct radeon_device *rdev)
(rdev->pdev->device == 0x990D) ||
(rdev->pdev->device == 0x990E) ||
(rdev->pdev->device == 0x9913) ||
- (rdev->pdev->device == 0x9918)) {
+ (rdev->pdev->device == 0x9918) ||
+ (rdev->pdev->device == 0x999D)) {
rdev->config.cayman.max_simds_per_se = 4;
rdev->config.cayman.max_backends_per_se = 2;
} else if ((rdev->pdev->device == 0x9919) ||
@@ -621,6 +623,8 @@ static void cayman_gpu_init(struct radeon_device *rdev)
WREG32(GB_ADDR_CONFIG, gb_addr_config);
WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
+ if (ASIC_IS_DCE6(rdev))
+ WREG32(DMIF_ADDR_CALC, gb_addr_config);
WREG32(HDP_ADDR_CONFIG, gb_addr_config);
WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
@@ -1707,6 +1711,12 @@ static int cayman_startup(struct radeon_device *rdev)
}
/* Enable IRQ */
+ if (!rdev->irq.installed) {
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ }
+
r = r600_irq_init(rdev);
if (r) {
DRM_ERROR("radeon: IH init failed (%d).\n", r);
@@ -1853,10 +1863,6 @@ int cayman_init(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_irq_kms_init(rdev);
- if (r)
- return r;
-
ring->ring_obj = NULL;
r600_ring_init(rdev, ring, 1024 * 1024);
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index 079dee202a9..445b235c432 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -45,6 +45,10 @@
#define ARUBA_GB_ADDR_CONFIG_GOLDEN 0x12010001
#define DMIF_ADDR_CONFIG 0xBD4
+
+/* DCE6 only */
+#define DMIF_ADDR_CALC 0xC00
+
#define SRBM_GFX_CNTL 0x0E44
#define RINGID(x) (((x) & 0x3) << 0)
#define VMID(x) (((x) & 0x7) << 0)
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 9db58530be3..d0314ecbd7c 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -69,6 +69,38 @@ MODULE_FIRMWARE(FIRMWARE_R520);
* and others in some cases.
*/
+static bool r100_is_in_vblank(struct radeon_device *rdev, int crtc)
+{
+ if (crtc == 0) {
+ if (RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR)
+ return true;
+ else
+ return false;
+ } else {
+ if (RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR)
+ return true;
+ else
+ return false;
+ }
+}
+
+static bool r100_is_counter_moving(struct radeon_device *rdev, int crtc)
+{
+ u32 vline1, vline2;
+
+ if (crtc == 0) {
+ vline1 = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
+ vline2 = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
+ } else {
+ vline1 = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
+ vline2 = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
+ }
+ if (vline1 != vline2)
+ return true;
+ else
+ return false;
+}
+
/**
* r100_wait_for_vblank - vblank wait asic callback.
*
@@ -79,36 +111,33 @@ MODULE_FIRMWARE(FIRMWARE_R520);
*/
void r100_wait_for_vblank(struct radeon_device *rdev, int crtc)
{
- int i;
+ unsigned i = 0;
if (crtc >= rdev->num_crtc)
return;
if (crtc == 0) {
- if (RREG32(RADEON_CRTC_GEN_CNTL) & RADEON_CRTC_EN) {
- for (i = 0; i < rdev->usec_timeout; i++) {
- if (!(RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR))
- break;
- udelay(1);
- }
- for (i = 0; i < rdev->usec_timeout; i++) {
- if (RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR)
- break;
- udelay(1);
- }
- }
+ if (!(RREG32(RADEON_CRTC_GEN_CNTL) & RADEON_CRTC_EN))
+ return;
} else {
- if (RREG32(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_EN) {
- for (i = 0; i < rdev->usec_timeout; i++) {
- if (!(RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR))
- break;
- udelay(1);
- }
- for (i = 0; i < rdev->usec_timeout; i++) {
- if (RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR)
- break;
- udelay(1);
- }
+ if (!(RREG32(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_EN))
+ return;
+ }
+
+ /* depending on when we hit vblank, we may be close to active; if so,
+ * wait for another frame.
+ */
+ while (r100_is_in_vblank(rdev, crtc)) {
+ if (i++ % 100 == 0) {
+ if (!r100_is_counter_moving(rdev, crtc))
+ break;
+ }
+ }
+
+ while (!r100_is_in_vblank(rdev, crtc)) {
+ if (i++ % 100 == 0) {
+ if (!r100_is_counter_moving(rdev, crtc))
+ break;
}
}
}
@@ -3840,6 +3869,12 @@ static int r100_startup(struct radeon_device *rdev)
}
/* Enable IRQ */
+ if (!rdev->irq.installed) {
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ }
+
r100_irq_set(rdev);
rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */
@@ -3995,9 +4030,6 @@ int r100_init(struct radeon_device *rdev)
r = radeon_fence_driver_init(rdev);
if (r)
return r;
- r = radeon_irq_kms_init(rdev);
- if (r)
- return r;
/* Memory manager */
r = radeon_bo_init(rdev);
if (r)
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index c60350e6872..b9b776f1e58 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -1382,6 +1382,12 @@ static int r300_startup(struct radeon_device *rdev)
}
/* Enable IRQ */
+ if (!rdev->irq.installed) {
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ }
+
r100_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */
@@ -1516,9 +1522,6 @@ int r300_init(struct radeon_device *rdev)
r = radeon_fence_driver_init(rdev);
if (r)
return r;
- r = radeon_irq_kms_init(rdev);
- if (r)
- return r;
/* Memory manager */
r = radeon_bo_init(rdev);
if (r)
diff --git a/drivers/gpu/drm/radeon/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c
index 865e2c9980d..60170ea5e3a 100644
--- a/drivers/gpu/drm/radeon/r300_cmdbuf.c
+++ b/drivers/gpu/drm/radeon/r300_cmdbuf.c
@@ -75,7 +75,7 @@ static int r300_emit_cliprects(drm_radeon_private_t *dev_priv,
OUT_RING(CP_PACKET0(R300_RE_CLIPRECT_TL_0, nr * 2 - 1));
for (i = 0; i < nr; ++i) {
- if (DRM_COPY_FROM_USER_UNCHECKED
+ if (DRM_COPY_FROM_USER
(&box, &cmdbuf->boxes[n + i], sizeof(box))) {
DRM_ERROR("copy cliprect faulted\n");
return -EFAULT;
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 6fce2eb4dd1..4e796ecf9ea 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -265,6 +265,12 @@ static int r420_startup(struct radeon_device *rdev)
}
/* Enable IRQ */
+ if (!rdev->irq.installed) {
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ }
+
r100_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */
@@ -411,10 +417,6 @@ int r420_init(struct radeon_device *rdev)
if (r) {
return r;
}
- r = radeon_irq_kms_init(rdev);
- if (r) {
- return r;
- }
/* Memory manager */
r = radeon_bo_init(rdev);
if (r) {
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h
index c0dc8d3ba0b..1dd0d32993d 100644
--- a/drivers/gpu/drm/radeon/r500_reg.h
+++ b/drivers/gpu/drm/radeon/r500_reg.h
@@ -358,7 +358,9 @@
#define AVIVO_D1CRTC_STATUS_HV_COUNT 0x60ac
#define AVIVO_D1CRTC_STEREO_CONTROL 0x60c4
+#define AVIVO_D1MODE_MASTER_UPDATE_LOCK 0x60e0
#define AVIVO_D1MODE_MASTER_UPDATE_MODE 0x60e4
+#define AVIVO_D1CRTC_UPDATE_LOCK 0x60e8
/* master controls */
#define AVIVO_DC_CRTC_MASTER_EN 0x60f8
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index f795a4e092c..e1aece73b37 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -194,6 +194,12 @@ static int r520_startup(struct radeon_device *rdev)
}
/* Enable IRQ */
+ if (!rdev->irq.installed) {
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ }
+
rs600_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */
@@ -297,9 +303,6 @@ int r520_init(struct radeon_device *rdev)
r = radeon_fence_driver_init(rdev);
if (r)
return r;
- r = radeon_irq_kms_init(rdev);
- if (r)
- return r;
/* Memory manager */
r = radeon_bo_init(rdev);
if (r)
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 0740db3fcd2..bac1e2ac2ad 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2940,6 +2940,12 @@ static int r600_startup(struct radeon_device *rdev)
}
/* Enable IRQ */
+ if (!rdev->irq.installed) {
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ }
+
r = r600_irq_init(rdev);
if (r) {
DRM_ERROR("radeon: IH init failed (%d).\n", r);
@@ -3094,10 +3100,6 @@ int r600_init(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_irq_kms_init(rdev);
- if (r)
- return r;
-
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
@@ -4351,8 +4353,6 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev)
{
u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp;
u16 link_cntl2;
- u32 mask;
- int ret;
if (radeon_pcie_gen2 == 0)
return;
@@ -4371,11 +4371,8 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev)
if (rdev->family <= CHIP_R600)
return;
- ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
- if (ret != 0)
- return;
-
- if (!(mask & DRM_PCIE_SPEED_50))
+ if ((rdev->pdev->bus->max_bus_speed != PCIE_SPEED_5_0GT) &&
+ (rdev->pdev->bus->max_bus_speed != PCIE_SPEED_8_0GT))
return;
speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 21ecc0e12dc..85208336cbc 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -433,7 +433,7 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
offset = dig->afmt->offset;
/* Older chipsets require setting HDMI and routing manually */
- if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
+ if (ASIC_IS_DCE2(rdev) && !ASIC_IS_DCE3(rdev)) {
hdmi = HDMI0_ERROR_ACK | HDMI0_ENABLE;
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
@@ -501,7 +501,7 @@ void r600_hdmi_disable(struct drm_encoder *encoder)
radeon_irq_kms_disable_afmt(rdev, dig->afmt->id);
/* Older chipsets not handled by AtomBIOS */
- if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
+ if (ASIC_IS_DCE2(rdev) && !ASIC_IS_DCE3(rdev)) {
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
WREG32_P(AVIVO_TMDSA_CNTL, 0,
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index f22eb571352..96168ef4ab1 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -2028,6 +2028,8 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
num_modes = power_info->info.ucNumOfPowerModeEntries;
if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK)
num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK;
+ if (num_modes == 0)
+ return state_index;
rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * num_modes, GFP_KERNEL);
if (!rdev->pm.power_state)
return state_index;
@@ -2432,6 +2434,8 @@ static int radeon_atombios_parse_power_table_4_5(struct radeon_device *rdev)
power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController);
+ if (power_info->pplib.ucNumStates == 0)
+ return state_index;
rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) *
power_info->pplib.ucNumStates, GFP_KERNEL);
if (!rdev->pm.power_state)
@@ -2514,6 +2518,7 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
u16 data_offset;
u8 frev, crev;
+ u8 *power_state_offset;
if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
&frev, &crev, &data_offset))
@@ -2530,15 +2535,17 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
non_clock_info_array = (struct _NonClockInfoArray *)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
+ if (state_array->ucNumEntries == 0)
+ return state_index;
rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) *
state_array->ucNumEntries, GFP_KERNEL);
if (!rdev->pm.power_state)
return state_index;
+ power_state_offset = (u8 *)state_array->states;
for (i = 0; i < state_array->ucNumEntries; i++) {
mode_index = 0;
- power_state = (union pplib_power_state *)&state_array->states[i];
- /* XXX this might be an inagua bug... */
- non_clock_array_index = i; /* power_state->v2.nonClockInfoIndex */
+ power_state = (union pplib_power_state *)power_state_offset;
+ non_clock_array_index = power_state->v2.nonClockInfoIndex;
non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
&non_clock_info_array->nonClockInfo[non_clock_array_index];
rdev->pm.power_state[i].clock_info = kzalloc(sizeof(struct radeon_pm_clock_info) *
@@ -2550,9 +2557,6 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
if (power_state->v2.ucNumDPMLevels) {
for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
clock_array_index = power_state->v2.clockInfoIndex[j];
- /* XXX this might be an inagua bug... */
- if (clock_array_index >= clock_info_array->ucNumEntries)
- continue;
clock_info = (union pplib_clock_info *)
&clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
valid = radeon_atombios_parse_pplib_clock_info(rdev,
@@ -2574,6 +2578,7 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
non_clock_info);
state_index++;
}
+ power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
}
/* if multiple clock modes, mark the lowest as no display */
for (i = 0; i < state_index; i++) {
@@ -2620,7 +2625,9 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
default:
break;
}
- } else {
+ }
+
+ if (state_index == 0) {
rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state), GFP_KERNEL);
if (rdev->pm.power_state) {
rdev->pm.power_state[0].clock_info =
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 44b8034a400..5073665d161 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -435,18 +435,17 @@ bool radeon_card_posted(struct radeon_device *rdev)
return false;
/* first check CRTCs */
- if (ASIC_IS_DCE41(rdev)) {
+ if (ASIC_IS_DCE4(rdev)) {
reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
- if (reg & EVERGREEN_CRTC_MASTER_EN)
- return true;
- } else if (ASIC_IS_DCE4(rdev)) {
- reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
- RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
- RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
- RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) |
- RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
- RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
+ if (rdev->num_crtc >= 4) {
+ reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
+ RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
+ }
+ if (rdev->num_crtc >= 6) {
+ reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
+ RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
+ }
if (reg & EVERGREEN_CRTC_MASTER_EN)
return true;
} else if (ASIC_IS_AVIVO(rdev)) {
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 66a7f0fd962..96cf43901b5 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -144,7 +144,7 @@ static inline void radeon_unregister_atpx_handler(void) {}
#endif
int radeon_no_wb;
-int radeon_modeset = 1;
+int radeon_modeset = -1;
int radeon_dynclks = -1;
int radeon_r4xx_atom = 0;
int radeon_agpmode = 0;
@@ -449,6 +449,16 @@ static struct pci_driver radeon_kms_pci_driver = {
static int __init radeon_init(void)
{
+#ifdef CONFIG_VGA_CONSOLE
+ if (vgacon_text_force() && radeon_modeset == -1) {
+ DRM_INFO("VGACON disable radeon kernel modesetting.\n");
+ radeon_modeset = 0;
+ }
+#endif
+ /* set to modesetting by default if not nomodeset */
+ if (radeon_modeset == -1)
+ radeon_modeset = 1;
+
if (radeon_modeset == 1) {
DRM_INFO("radeon kernel modesetting enabled.\n");
driver = &kms_driver;
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index c75cb2c6ba7..c5b2765cbf1 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -50,9 +50,13 @@ int radeon_driver_unload_kms(struct drm_device *dev)
if (rdev == NULL)
return 0;
+ if (rdev->rmmio == NULL)
+ goto done_free;
radeon_acpi_fini(rdev);
radeon_modeset_fini(rdev);
radeon_device_fini(rdev);
+
+done_free:
kfree(rdev);
dev->dev_private = NULL;
return 0;
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 338fd6a74e8..788c64cb4b4 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -843,7 +843,11 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
struct radeon_device *rdev = dev->dev_private;
seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk);
- seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
+ /* radeon_get_engine_clock is not reliable on APUs so just print the current clock */
+ if ((rdev->family >= CHIP_PALM) && (rdev->flags & RADEON_IS_IGP))
+ seq_printf(m, "current engine clock: %u0 kHz\n", rdev->pm.current_sclk);
+ else
+ seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk);
if (rdev->asic->pm.get_memory_clock)
seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 8d58e268ff6..1ef5eaac6ab 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -180,7 +180,8 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
radeon_semaphore_free(rdev, &ib->semaphore, NULL);
}
/* if we can't remember our last VM flush then flush now! */
- if (ib->vm && !ib->vm->last_flush) {
+ /* XXX figure out why we have to flush for every IB */
+ if (ib->vm /*&& !ib->vm->last_flush*/) {
radeon_ring_vm_flush(rdev, ib->ring, ib->vm);
}
if (const_ib) {
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 93f760e27a9..6c0ce8915fa 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -726,7 +726,7 @@ int radeon_ttm_init(struct radeon_device *rdev)
return r;
}
DRM_INFO("radeon: %uM of VRAM memory ready\n",
- (unsigned)rdev->mc.real_vram_size / (1024 * 1024));
+ (unsigned) (rdev->mc.real_vram_size / (1024 * 1024)));
r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT,
rdev->mc.gtt_size >> PAGE_SHIFT);
if (r) {
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 73051ce3121..233a9b9fa1f 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -417,6 +417,12 @@ static int rs400_startup(struct radeon_device *rdev)
}
/* Enable IRQ */
+ if (!rdev->irq.installed) {
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ }
+
r100_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */
@@ -535,9 +541,6 @@ int rs400_init(struct radeon_device *rdev)
r = radeon_fence_driver_init(rdev);
if (r)
return r;
- r = radeon_irq_kms_init(rdev);
- if (r)
- return r;
/* Memory manager */
r = radeon_bo_init(rdev);
if (r)
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 5a0fc74c2ba..670b555d2ca 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -52,23 +52,59 @@ static const u32 crtc_offsets[2] =
AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL
};
+static bool avivo_is_in_vblank(struct radeon_device *rdev, int crtc)
+{
+ if (RREG32(AVIVO_D1CRTC_STATUS + crtc_offsets[crtc]) & AVIVO_D1CRTC_V_BLANK)
+ return true;
+ else
+ return false;
+}
+
+static bool avivo_is_counter_moving(struct radeon_device *rdev, int crtc)
+{
+ u32 pos1, pos2;
+
+ pos1 = RREG32(AVIVO_D1CRTC_STATUS_POSITION + crtc_offsets[crtc]);
+ pos2 = RREG32(AVIVO_D1CRTC_STATUS_POSITION + crtc_offsets[crtc]);
+
+ if (pos1 != pos2)
+ return true;
+ else
+ return false;
+}
+
+/**
+ * avivo_wait_for_vblank - vblank wait asic callback.
+ *
+ * @rdev: radeon_device pointer
+ * @crtc: crtc to wait for vblank on
+ *
+ * Wait for vblank on the requested crtc (r5xx-r7xx).
+ */
void avivo_wait_for_vblank(struct radeon_device *rdev, int crtc)
{
- int i;
+ unsigned i = 0;
if (crtc >= rdev->num_crtc)
return;
- if (RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[crtc]) & AVIVO_CRTC_EN) {
- for (i = 0; i < rdev->usec_timeout; i++) {
- if (!(RREG32(AVIVO_D1CRTC_STATUS + crtc_offsets[crtc]) & AVIVO_D1CRTC_V_BLANK))
+ if (!(RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[crtc]) & AVIVO_CRTC_EN))
+ return;
+
+ /* depending on when we hit vblank, we may be close to active; if so,
+ * wait for another frame.
+ */
+ while (avivo_is_in_vblank(rdev, crtc)) {
+ if (i++ % 100 == 0) {
+ if (!avivo_is_counter_moving(rdev, crtc))
break;
- udelay(1);
}
- for (i = 0; i < rdev->usec_timeout; i++) {
- if (RREG32(AVIVO_D1CRTC_STATUS + crtc_offsets[crtc]) & AVIVO_D1CRTC_V_BLANK)
+ }
+
+ while (!avivo_is_in_vblank(rdev, crtc)) {
+ if (i++ % 100 == 0) {
+ if (!avivo_is_counter_moving(rdev, crtc))
break;
- udelay(1);
}
}
}
@@ -887,6 +923,12 @@ static int rs600_startup(struct radeon_device *rdev)
}
/* Enable IRQ */
+ if (!rdev->irq.installed) {
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ }
+
rs600_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */
@@ -1011,9 +1053,6 @@ int rs600_init(struct radeon_device *rdev)
r = radeon_fence_driver_init(rdev);
if (r)
return r;
- r = radeon_irq_kms_init(rdev);
- if (r)
- return r;
/* Memory manager */
r = radeon_bo_init(rdev);
if (r)
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 5706d2ac75a..fad6633dce3 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -628,6 +628,12 @@ static int rs690_startup(struct radeon_device *rdev)
}
/* Enable IRQ */
+ if (!rdev->irq.installed) {
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ }
+
rs600_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */
@@ -753,9 +759,6 @@ int rs690_init(struct radeon_device *rdev)
r = radeon_fence_driver_init(rdev);
if (r)
return r;
- r = radeon_irq_kms_init(rdev);
- if (r)
- return r;
/* Memory manager */
r = radeon_bo_init(rdev);
if (r)
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 435ed355136..21c7d7b26e5 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -303,8 +303,10 @@ void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
tmp = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]);
if (!(tmp & AVIVO_CRTC_DISP_READ_REQUEST_DISABLE)) {
radeon_wait_for_vblank(rdev, i);
+ WREG32(AVIVO_D1CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
tmp |= AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
WREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i], tmp);
+ WREG32(AVIVO_D1CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
}
/* wait for the next frame */
frame_count = radeon_get_vblank_counter(rdev, i);
@@ -313,6 +315,15 @@ void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
break;
udelay(1);
}
+
+ /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
+ WREG32(AVIVO_D1CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+ tmp = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]);
+ tmp &= ~AVIVO_CRTC_EN;
+ WREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i], tmp);
+ WREG32(AVIVO_D1CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+ save->crtc_enabled[i] = false;
+ /* ***** */
} else {
save->crtc_enabled[i] = false;
}
@@ -338,6 +349,22 @@ void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
}
/* wait for the MC to settle */
udelay(100);
+
+ /* lock double buffered regs */
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (save->crtc_enabled[i]) {
+ tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]);
+ if (!(tmp & AVIVO_D1GRPH_UPDATE_LOCK)) {
+ tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
+ WREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i], tmp);
+ }
+ tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_LOCK + crtc_offsets[i]);
+ if (!(tmp & 1)) {
+ tmp |= 1;
+ WREG32(AVIVO_D1MODE_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
+ }
+ }
+ }
}
void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
@@ -348,7 +375,7 @@ void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
/* update crtc base addresses */
for (i = 0; i < rdev->num_crtc; i++) {
if (rdev->family >= CHIP_RV770) {
- if (i == 1) {
+ if (i == 0) {
WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH,
upper_32_bits(rdev->mc.vram_start));
WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH,
@@ -367,6 +394,33 @@ void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
}
WREG32(R_000310_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
+ /* unlock regs and wait for update */
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (save->crtc_enabled[i]) {
+ tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i]);
+ if ((tmp & 0x3) != 0) {
+ tmp &= ~0x3;
+ WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
+ }
+ tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]);
+ if (tmp & AVIVO_D1GRPH_UPDATE_LOCK) {
+ tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK;
+ WREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i], tmp);
+ }
+ tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_LOCK + crtc_offsets[i]);
+ if (tmp & 1) {
+ tmp &= ~1;
+ WREG32(AVIVO_D1MODE_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
+ }
+ for (j = 0; j < rdev->usec_timeout; j++) {
+ tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]);
+ if ((tmp & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING) == 0)
+ break;
+ udelay(1);
+ }
+ }
+ }
+
if (rdev->family >= CHIP_R600) {
/* unblackout the MC */
if (rdev->family >= CHIP_RV770)
@@ -478,6 +532,12 @@ static int rv515_startup(struct radeon_device *rdev)
}
/* Enable IRQ */
+ if (!rdev->irq.installed) {
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ }
+
rs600_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */
@@ -608,9 +668,6 @@ int rv515_init(struct radeon_device *rdev)
r = radeon_fence_driver_init(rdev);
if (r)
return r;
- r = radeon_irq_kms_init(rdev);
- if (r)
- return r;
/* Memory manager */
r = radeon_bo_init(rdev);
if (r)
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index d63fe1d0f53..c872d2b7187 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -1041,6 +1041,12 @@ static int rv770_startup(struct radeon_device *rdev)
}
/* Enable IRQ */
+ if (!rdev->irq.installed) {
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ }
+
r = r600_irq_init(rdev);
if (r) {
DRM_ERROR("radeon: IH init failed (%d).\n", r);
@@ -1180,10 +1186,6 @@ int rv770_init(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_irq_kms_init(rdev);
- if (r)
- return r;
-
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
@@ -1238,8 +1240,6 @@ static void rv770_pcie_gen2_enable(struct radeon_device *rdev)
{
u32 link_width_cntl, lanes, speed_cntl, tmp;
u16 link_cntl2;
- u32 mask;
- int ret;
if (radeon_pcie_gen2 == 0)
return;
@@ -1254,11 +1254,8 @@ static void rv770_pcie_gen2_enable(struct radeon_device *rdev)
if (ASIC_IS_X2(rdev))
return;
- ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
- if (ret != 0)
- return;
-
- if (!(mask & DRM_PCIE_SPEED_50))
+ if ((rdev->pdev->bus->max_bus_speed != PCIE_SPEED_5_0GT) &&
+ (rdev->pdev->bus->max_bus_speed != PCIE_SPEED_8_0GT))
return;
DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index bafbe321695..a964a967959 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -1463,7 +1463,7 @@ static void si_select_se_sh(struct radeon_device *rdev,
u32 data = INSTANCE_BROADCAST_WRITES;
if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
- data = SH_BROADCAST_WRITES | SE_BROADCAST_WRITES;
+ data |= SH_BROADCAST_WRITES | SE_BROADCAST_WRITES;
else if (se_num == 0xffffffff)
data |= SE_BROADCAST_WRITES | SH_INDEX(sh_num);
else if (sh_num == 0xffffffff)
@@ -1645,7 +1645,7 @@ static void si_gpu_init(struct radeon_device *rdev)
default:
rdev->config.si.max_shader_engines = 1;
rdev->config.si.max_tile_pipes = 4;
- rdev->config.si.max_cu_per_sh = 2;
+ rdev->config.si.max_cu_per_sh = 5;
rdev->config.si.max_sh_per_se = 2;
rdev->config.si.max_backends_per_se = 4;
rdev->config.si.max_texture_channel_caches = 4;
@@ -1765,6 +1765,7 @@ static void si_gpu_init(struct radeon_device *rdev)
WREG32(GB_ADDR_CONFIG, gb_addr_config);
WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
+ WREG32(DMIF_ADDR_CALC, gb_addr_config);
WREG32(HDP_ADDR_CONFIG, gb_addr_config);
WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
@@ -2643,8 +2644,8 @@ static int si_mc_init(struct radeon_device *rdev)
rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
/* size in MB on si */
- rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
- rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
+ rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
+ rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
rdev->mc.visible_vram_size = rdev->mc.aper_size;
si_vram_gtt_location(rdev, &rdev->mc);
radeon_update_bandwidth_info(rdev);
@@ -4373,6 +4374,12 @@ static int si_startup(struct radeon_device *rdev)
}
/* Enable IRQ */
+ if (!rdev->irq.installed) {
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ }
+
r = si_irq_init(rdev);
if (r) {
DRM_ERROR("radeon: IH init failed (%d).\n", r);
@@ -4533,10 +4540,6 @@ int si_init(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_irq_kms_init(rdev);
- if (r)
- return r;
-
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
ring->ring_obj = NULL;
r600_ring_init(rdev, ring, 1024 * 1024);
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 23fc08fc8e7..f84cff0aafc 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -65,6 +65,8 @@
#define DMIF_ADDR_CONFIG 0xBD4
+#define DMIF_ADDR_CALC 0xC00
+
#define SRBM_STATUS 0xE50
#define GRBM_RQ_PENDING (1 << 5)
#define VMC_BUSY (1 << 8)
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index c5b592dc197..bfac5827c9b 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -75,7 +75,7 @@ static int modeset_init(struct drm_device *dev)
mod->funcs->modeset_init(mod, dev);
}
- if ((priv->num_encoders = 0) || (priv->num_connectors == 0)) {
+ if ((priv->num_encoders == 0) || (priv->num_connectors == 0)) {
/* oh nos! */
dev_err(dev->dev, "no encoders/connectors found\n");
return -ENXIO;
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 3816270ba49..ef034fa3e6f 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -303,6 +303,8 @@ struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
if (IS_ERR(attach))
return ERR_CAST(attach);
+ get_dma_buf(dma_buf);
+
sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
if (IS_ERR(sg)) {
ret = PTR_ERR(sg);
@@ -322,5 +324,7 @@ fail_unmap:
dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
fail_detach:
dma_buf_detach(dma_buf, attach);
+ dma_buf_put(dma_buf);
+
return ERR_PTR(ret);
}
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index aa341d13586..e6dbf092adc 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1702,6 +1702,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_SRWS1) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THINGM, USB_DEVICE_ID_BLINK1) },
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index ff1be167eb0..421d6078d81 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -318,7 +318,7 @@ static u32 get_vp_index(uuid_le *type_guid)
return 0;
}
cur_cpu = (++next_vp % max_cpus);
- return cur_cpu;
+ return hv_context.vp_index[cur_cpu];
}
/*
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index cafa72ffdc3..d6fbb5772b8 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -71,6 +71,7 @@ u32 hv_end_read(struct hv_ring_buffer_info *rbi)
static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi)
{
+ smp_mb();
if (rbi->ring_buffer->interrupt_mask)
return false;
diff --git a/drivers/hwmon/abituguru.c b/drivers/hwmon/abituguru.c
index 6119ff8e8c8..f3b3488e5a5 100644
--- a/drivers/hwmon/abituguru.c
+++ b/drivers/hwmon/abituguru.c
@@ -1411,14 +1411,18 @@ static int abituguru_probe(struct platform_device *pdev)
pr_info("found Abit uGuru\n");
/* Register sysfs hooks */
- for (i = 0; i < sysfs_attr_i; i++)
- if (device_create_file(&pdev->dev,
- &data->sysfs_attr[i].dev_attr))
+ for (i = 0; i < sysfs_attr_i; i++) {
+ res = device_create_file(&pdev->dev,
+ &data->sysfs_attr[i].dev_attr);
+ if (res)
goto abituguru_probe_error;
- for (i = 0; i < ARRAY_SIZE(abituguru_sysfs_attr); i++)
- if (device_create_file(&pdev->dev,
- &abituguru_sysfs_attr[i].dev_attr))
+ }
+ for (i = 0; i < ARRAY_SIZE(abituguru_sysfs_attr); i++) {
+ res = device_create_file(&pdev->dev,
+ &abituguru_sysfs_attr[i].dev_attr);
+ if (res)
goto abituguru_probe_error;
+ }
data->hwmon_dev = hwmon_device_register(&pdev->dev);
if (!IS_ERR(data->hwmon_dev))
diff --git a/drivers/hwmon/adm1021.c b/drivers/hwmon/adm1021.c
index 71bcba8abfc..1468be8e441 100644
--- a/drivers/hwmon/adm1021.c
+++ b/drivers/hwmon/adm1021.c
@@ -332,26 +332,68 @@ static int adm1021_detect(struct i2c_client *client,
man_id = i2c_smbus_read_byte_data(client, ADM1021_REG_MAN_ID);
dev_id = i2c_smbus_read_byte_data(client, ADM1021_REG_DEV_ID);
+ if (man_id < 0 || dev_id < 0)
+ return -ENODEV;
+
if (man_id == 0x4d && dev_id == 0x01)
type_name = "max1617a";
else if (man_id == 0x41) {
if ((dev_id & 0xF0) == 0x30)
type_name = "adm1023";
- else
+ else if ((dev_id & 0xF0) == 0x00)
type_name = "adm1021";
+ else
+ return -ENODEV;
} else if (man_id == 0x49)
type_name = "thmc10";
else if (man_id == 0x23)
type_name = "gl523sm";
else if (man_id == 0x54)
type_name = "mc1066";
- /* LM84 Mfr ID in a different place, and it has more unused bits */
- else if (conv_rate == 0x00
- && (config & 0x7F) == 0x00
- && (status & 0xAB) == 0x00)
- type_name = "lm84";
- else
- type_name = "max1617";
+ else {
+ int lte, rte, lhi, rhi, llo, rlo;
+
+ /* extra checks for LM84 and MAX1617 to avoid misdetections */
+
+ llo = i2c_smbus_read_byte_data(client, ADM1021_REG_THYST_R(0));
+ rlo = i2c_smbus_read_byte_data(client, ADM1021_REG_THYST_R(1));
+
+ /* fail if any of the additional register reads failed */
+ if (llo < 0 || rlo < 0)
+ return -ENODEV;
+
+ lte = i2c_smbus_read_byte_data(client, ADM1021_REG_TEMP(0));
+ rte = i2c_smbus_read_byte_data(client, ADM1021_REG_TEMP(1));
+ lhi = i2c_smbus_read_byte_data(client, ADM1021_REG_TOS_R(0));
+ rhi = i2c_smbus_read_byte_data(client, ADM1021_REG_TOS_R(1));
+
+ /*
+ * Fail for negative temperatures and negative high limits.
+ * This check also catches read errors on the tested registers.
+ */
+ if ((s8)lte < 0 || (s8)rte < 0 || (s8)lhi < 0 || (s8)rhi < 0)
+ return -ENODEV;
+
+ /* fail if all registers hold the same value */
+ if (lte == rte && lte == lhi && lte == rhi && lte == llo
+ && lte == rlo)
+ return -ENODEV;
+
+ /*
+ * LM84 Mfr ID is in a different place,
+ * and it has more unused bits.
+ */
+ if (conv_rate == 0x00
+ && (config & 0x7F) == 0x00
+ && (status & 0xAB) == 0x00) {
+ type_name = "lm84";
+ } else {
+ /* fail if low limits are larger than high limits */
+ if ((s8)llo > lhi || (s8)rlo > rhi)
+ return -ENODEV;
+ type_name = "max1617";
+ }
+ }
pr_debug("adm1021: Detected chip %s at adapter %d, address 0x%02x.\n",
type_name, i2c_adapter_id(adapter), client->addr);
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index 94fd8187540..2db362854d7 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -361,7 +361,8 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
/* Enable the adapter */
dw_writel(dev, 1, DW_IC_ENABLE);
- /* Enable interrupts */
+ /* Clear and enable interrupts */
+ i2c_dw_clear_int(dev);
dw_writel(dev, DW_IC_INTR_DEFAULT_MASK, DW_IC_INTR_MASK);
}
@@ -426,8 +427,14 @@ i2c_dw_xfer_msg(struct dw_i2c_dev *dev)
cmd |= BIT(9);
if (msgs[dev->msg_write_idx].flags & I2C_M_RD) {
+
+ /* avoid rx buffer overrun */
+ if (rx_limit - dev->rx_outstanding <= 0)
+ break;
+
dw_writel(dev, cmd | 0x100, DW_IC_DATA_CMD);
rx_limit--;
+ dev->rx_outstanding++;
} else
dw_writel(dev, cmd | *buf++, DW_IC_DATA_CMD);
tx_limit--; buf_len--;
@@ -480,8 +487,10 @@ i2c_dw_read(struct dw_i2c_dev *dev)
rx_valid = dw_readl(dev, DW_IC_RXFLR);
- for (; len > 0 && rx_valid > 0; len--, rx_valid--)
+ for (; len > 0 && rx_valid > 0; len--, rx_valid--) {
*buf++ = dw_readl(dev, DW_IC_DATA_CMD);
+ dev->rx_outstanding--;
+ }
if (len > 0) {
dev->status |= STATUS_READ_IN_PROGRESS;
@@ -539,6 +548,7 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
dev->msg_err = 0;
dev->status = STATUS_IDLE;
dev->abort_source = 0;
+ dev->rx_outstanding = 0;
ret = i2c_dw_wait_bus_not_busy(dev);
if (ret < 0)
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index 9c1840ee09c..e761ad18dd6 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -60,6 +60,7 @@
* @adapter: i2c subsystem adapter node
* @tx_fifo_depth: depth of the hardware tx fifo
* @rx_fifo_depth: depth of the hardware rx fifo
+ * @rx_outstanding: current master-rx elements in tx fifo
*/
struct dw_i2c_dev {
struct device *dev;
@@ -88,6 +89,7 @@ struct dw_i2c_dev {
u32 master_cfg;
unsigned int tx_fifo_depth;
unsigned int rx_fifo_depth;
+ int rx_outstanding;
};
#define ACCESS_SWAP 0x00000001
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index 332c720fb3f..3d0f0520c1b 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -312,10 +312,8 @@ static void xiic_fill_tx_fifo(struct xiic_i2c *i2c)
/* last message in transfer -> STOP */
data |= XIIC_TX_DYN_STOP_MASK;
dev_dbg(i2c->adap.dev.parent, "%s TX STOP\n", __func__);
-
- xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, data);
- } else
- xiic_setreg8(i2c, XIIC_DTR_REG_OFFSET, data);
+ }
+ xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, data);
}
}
diff --git a/drivers/iio/frequency/adf4350.c b/drivers/iio/frequency/adf4350.c
index a884252ac66..e76d4ace53f 100644
--- a/drivers/iio/frequency/adf4350.c
+++ b/drivers/iio/frequency/adf4350.c
@@ -212,7 +212,7 @@ static int adf4350_set_freq(struct adf4350_state *st, unsigned long long freq)
(pdata->r2_user_settings & (ADF4350_REG2_PD_POLARITY_POS |
ADF4350_REG2_LDP_6ns | ADF4350_REG2_LDF_INT_N |
ADF4350_REG2_CHARGE_PUMP_CURR_uA(5000) |
- ADF4350_REG2_MUXOUT(0x7) | ADF4350_REG2_NOISE_MODE(0x9)));
+ ADF4350_REG2_MUXOUT(0x7) | ADF4350_REG2_NOISE_MODE(0x3)));
st->regs[ADF4350_REG3] = pdata->r3_user_settings &
(ADF4350_REG3_12BIT_CLKDIV(0xFFF) |
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
index b289915b846..8ff1a2de1d9 100644
--- a/drivers/iio/inkern.c
+++ b/drivers/iio/inkern.c
@@ -279,7 +279,7 @@ static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
s64 raw64 = raw;
int ret;
- ret = iio_channel_read(chan, &offset, NULL, IIO_CHAN_INFO_SCALE);
+ ret = iio_channel_read(chan, &offset, NULL, IIO_CHAN_INFO_OFFSET);
if (ret == 0)
raw64 += offset;
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 70b1808a08f..ed49ab345b6 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -100,6 +100,16 @@ static int alloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
return 0;
}
+static int alloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq, int user)
+{
+ int ret = -ENOSYS;
+ if (user)
+ ret = alloc_oc_sq(rdev, sq);
+ if (ret)
+ ret = alloc_host_sq(rdev, sq);
+ return ret;
+}
+
static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
struct c4iw_dev_ucontext *uctx)
{
@@ -168,18 +178,9 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
goto free_sw_rq;
}
- if (user) {
- ret = alloc_oc_sq(rdev, &wq->sq);
- if (ret)
- goto free_hwaddr;
-
- ret = alloc_host_sq(rdev, &wq->sq);
- if (ret)
- goto free_sq;
- } else
- ret = alloc_host_sq(rdev, &wq->sq);
- if (ret)
- goto free_hwaddr;
+ ret = alloc_sq(rdev, &wq->sq, user);
+ if (ret)
+ goto free_hwaddr;
memset(wq->sq.queue, 0, wq->sq.memsize);
dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index be1edb04b08..68ebb7fe072 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -416,8 +416,9 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task,
for (i=0 ; i<ib_conn->page_vec->length ; i++)
iser_err("page_vec[%d] = 0x%llx\n", i,
(unsigned long long) ib_conn->page_vec->pages[i]);
- return err;
}
+ if (err)
+ return err;
}
return 0;
}
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index c09d41b1a2f..b4a76d18a4f 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -2227,6 +2227,27 @@ static void srpt_close_ch(struct srpt_rdma_ch *ch)
}
/**
+ * srpt_shutdown_session() - Whether or not a session may be shut down.
+ */
+static int srpt_shutdown_session(struct se_session *se_sess)
+{
+ struct srpt_rdma_ch *ch = se_sess->fabric_sess_ptr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ch->spinlock, flags);
+ if (ch->in_shutdown) {
+ spin_unlock_irqrestore(&ch->spinlock, flags);
+ return true;
+ }
+
+ ch->in_shutdown = true;
+ target_sess_cmd_list_set_waiting(se_sess);
+ spin_unlock_irqrestore(&ch->spinlock, flags);
+
+ return true;
+}
+
+/**
* srpt_drain_channel() - Drain a channel by resetting the IB queue pair.
* @cm_id: Pointer to the CM ID of the channel to be drained.
*
@@ -2264,6 +2285,9 @@ static void srpt_drain_channel(struct ib_cm_id *cm_id)
spin_unlock_irq(&sdev->spinlock);
if (do_reset) {
+ if (ch->sess)
+ srpt_shutdown_session(ch->sess);
+
ret = srpt_ch_qp_err(ch);
if (ret < 0)
printk(KERN_ERR "Setting queue pair in error state"
@@ -3467,14 +3491,6 @@ static void srpt_release_cmd(struct se_cmd *se_cmd)
}
/**
- * srpt_shutdown_session() - Whether or not a session may be shut down.
- */
-static int srpt_shutdown_session(struct se_session *se_sess)
-{
- return true;
-}
-
-/**
* srpt_close_session() - Forcibly close a session.
*
* Callback function invoked by the TCM core to clean up sessions associated
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h
index 4caf55cda7b..3dae156905d 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.h
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.h
@@ -325,6 +325,7 @@ struct srpt_rdma_ch {
u8 sess_name[36];
struct work_struct release_work;
struct completion *release_done;
+ bool in_shutdown;
};
/**
diff --git a/drivers/input/touchscreen/egalax_ts.c b/drivers/input/touchscreen/egalax_ts.c
index 17c9097f3b5..39f3df8670c 100644
--- a/drivers/input/touchscreen/egalax_ts.c
+++ b/drivers/input/touchscreen/egalax_ts.c
@@ -216,7 +216,7 @@ static int egalax_ts_probe(struct i2c_client *client,
input_set_abs_params(input_dev,
ABS_MT_POSITION_X, 0, EGALAX_MAX_X, 0, 0);
input_set_abs_params(input_dev,
- ABS_MT_POSITION_X, 0, EGALAX_MAX_Y, 0, 0);
+ ABS_MT_POSITION_Y, 0, EGALAX_MAX_Y, 0, 0);
input_mt_init_slots(input_dev, MAX_SUPPORT_POINTS, 0);
input_set_drvdata(input_dev, ts);
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index b287ca33833..1a5285b8c0e 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -700,11 +700,23 @@ retry:
static void iommu_poll_events(struct amd_iommu *iommu)
{
- u32 head, tail;
+ u32 head, tail, status;
unsigned long flags;
spin_lock_irqsave(&iommu->lock, flags);
+ /* enable event interrupts again */
+ do {
+ /*
+ * Workaround for Erratum ERBT1312
+ * Clearing the EVT_INT bit may race in the hardware, so read
+ * it again and make sure it was really cleared
+ */
+ status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
+ writel(MMIO_STATUS_EVT_INT_MASK,
+ iommu->mmio_base + MMIO_STATUS_OFFSET);
+ } while (status & MMIO_STATUS_EVT_INT_MASK);
+
head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
@@ -741,16 +753,25 @@ static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw)
static void iommu_poll_ppr_log(struct amd_iommu *iommu)
{
unsigned long flags;
- u32 head, tail;
+ u32 head, tail, status;
if (iommu->ppr_log == NULL)
return;
- /* enable ppr interrupts again */
- writel(MMIO_STATUS_PPR_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET);
-
spin_lock_irqsave(&iommu->lock, flags);
+ /* enable ppr interrupts again */
+ do {
+ /*
+ * Workaround for Erratum ERBT1312
+ * Clearing the PPR_INT bit may race in the hardware, so read
+ * it again and make sure it was really cleared
+ */
+ status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
+ writel(MMIO_STATUS_PPR_INT_MASK,
+ iommu->mmio_base + MMIO_STATUS_OFFSET);
+ } while (status & MMIO_STATUS_PPR_INT_MASK);
+
head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
@@ -3947,6 +3968,9 @@ static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic)
if (!table)
goto out;
+ /* Initialize table spin-lock */
+ spin_lock_init(&table->lock);
+
if (ioapic)
/* Keep the first 32 indexes free for IOAPIC interrupts */
table->min_index = 32;
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index e38ab438bb3..083f98c0488 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -99,6 +99,7 @@
#define PASID_MASK 0x000fffff
/* MMIO status bits */
+#define MMIO_STATUS_EVT_INT_MASK (1 << 1)
#define MMIO_STATUS_COM_WAIT_INT_MASK (1 << 2)
#define MMIO_STATUS_PPR_INT_MASK (1 << 6)
diff --git a/drivers/irqchip/exynos-combiner.c b/drivers/irqchip/exynos-combiner.c
index 04d86a9803f..b357c98ddc2 100644
--- a/drivers/irqchip/exynos-combiner.c
+++ b/drivers/irqchip/exynos-combiner.c
@@ -12,6 +12,7 @@
#include <linux/export.h>
#include <linux/init.h>
#include <linux/io.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index fc6aebf1e4b..5533a88c96a 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -28,6 +28,7 @@
#include <linux/module.h>
#include <linux/list.h>
#include <linux/smp.h>
+#include <linux/cpu.h>
#include <linux/cpu_pm.h>
#include <linux/cpumask.h>
#include <linux/io.h>
@@ -38,12 +39,12 @@
#include <linux/interrupt.h>
#include <linux/percpu.h>
#include <linux/slab.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/irqchip/arm-gic.h>
#include <asm/irq.h>
#include <asm/exception.h>
#include <asm/smp_plat.h>
-#include <asm/mach/irq.h>
#include "irqchip.h"
@@ -252,10 +253,9 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
return -EINVAL;
+ raw_spin_lock(&irq_controller_lock);
mask = 0xff << shift;
bit = gic_cpu_map[cpu] << shift;
-
- raw_spin_lock(&irq_controller_lock);
val = readl_relaxed(reg) & ~mask;
writel_relaxed(val | bit, reg);
raw_spin_unlock(&irq_controller_lock);
@@ -324,7 +324,7 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
cascade_irq = irq_find_mapping(chip_data->domain, gic_irq);
if (unlikely(gic_irq < 32 || gic_irq > 1020))
- do_bad_IRQ(cascade_irq, desc);
+ handle_bad_irq(cascade_irq, desc);
else
generic_handle_irq(cascade_irq);
@@ -452,6 +452,12 @@ static void __cpuinit gic_cpu_init(struct gic_chip_data *gic)
writel_relaxed(1, base + GIC_CPU_CTRL);
}
+void gic_cpu_if_down(void)
+{
+ void __iomem *cpu_base = gic_data_cpu_base(&gic_data[0]);
+ writel_relaxed(0, cpu_base + GIC_CPU_CTRL);
+}
+
#ifdef CONFIG_CPU_PM
/*
* Saves the GIC distributor registers during suspend or idle. Must be called
@@ -645,7 +651,9 @@ static void __init gic_pm_init(struct gic_chip_data *gic)
void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
{
int cpu;
- unsigned long map = 0;
+ unsigned long flags, map = 0;
+
+ raw_spin_lock_irqsave(&irq_controller_lock, flags);
/* Convert our logical CPU mask into a physical one. */
for_each_cpu(cpu, mask)
@@ -659,9 +667,145 @@ void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
/* this always happens on GIC0 */
writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
+
+ raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
}
#endif
+#ifdef CONFIG_BL_SWITCHER
+/*
+ * gic_send_sgi - send a SGI directly to given CPU interface number
+ *
+ * cpu_id: the ID for the destination CPU interface
+ * irq: the IPI number to send a SGI for
+ */
+void gic_send_sgi(unsigned int cpu_id, unsigned int irq)
+{
+ BUG_ON(cpu_id >= NR_GIC_CPU_IF);
+ cpu_id = 1 << cpu_id;
+ /* this always happens on GIC0 */
+ writel_relaxed((cpu_id << 16) | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
+}
+
+/*
+ * gic_get_cpu_id - get the CPU interface ID for the specified CPU
+ *
+ * @cpu: the logical CPU number to get the GIC ID for.
+ *
+ * Return the CPU interface ID for the given logical CPU number,
+ * or -1 if the CPU number is too large or the interface ID is
+ * unknown (more than one bit set).
+ */
+int gic_get_cpu_id(unsigned int cpu)
+{
+ unsigned int cpu_bit;
+
+ if (cpu >= NR_GIC_CPU_IF)
+ return -1;
+ cpu_bit = gic_cpu_map[cpu];
+ if (cpu_bit & (cpu_bit - 1))
+ return -1;
+ return __ffs(cpu_bit);
+}
+
+/*
+ * gic_migrate_target - migrate IRQs to another PU interface
+ *
+ * @new_cpu_id: the CPU target ID to migrate IRQs to
+ *
+ * Migrate all peripheral interrupts with a target matching the current CPU
+ * to the interface corresponding to @new_cpu_id. The CPU interface mapping
+ * is also updated. Targets to other CPU interfaces are unchanged.
+ * This must be called with IRQs locally disabled.
+ */
+void gic_migrate_target(unsigned int new_cpu_id)
+{
+ unsigned int old_cpu_id, gic_irqs, gic_nr = 0;
+ void __iomem *dist_base;
+ int i, ror_val, cpu = smp_processor_id();
+ u32 val, old_mask, active_mask;
+
+ if (gic_nr >= MAX_GIC_NR)
+ BUG();
+
+ dist_base = gic_data_dist_base(&gic_data[gic_nr]);
+ if (!dist_base)
+ return;
+ gic_irqs = gic_data[gic_nr].gic_irqs;
+
+ old_cpu_id = __ffs(gic_cpu_map[cpu]);
+ old_mask = 0x01010101 << old_cpu_id;
+ ror_val = (old_cpu_id - new_cpu_id) & 31;
+
+ raw_spin_lock(&irq_controller_lock);
+
+ gic_cpu_map[cpu] = 1 << new_cpu_id;
+
+ for (i = 8; i < DIV_ROUND_UP(gic_irqs, 4); i++) {
+ val = readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);
+ active_mask = val & old_mask;
+ if (active_mask) {
+ val &= ~active_mask;
+ val |= ror32(active_mask, ror_val);
+ writel_relaxed(val, dist_base + GIC_DIST_TARGET + i * 4);
+ }
+ }
+
+ raw_spin_unlock(&irq_controller_lock);
+
+ /*
+ * Now let's migrate and clear any potential SGIs that might be
+ * pending for us (old_cpu_id). Since GIC_DIST_SGI_PENDING_SET
+ * is a banked register, we can only forward the SGI using
+ * GIC_DIST_SOFTINT. The original SGI source is lost but Linux
+ * doesn't use that information anyway.
+ *
+ * For the same reason we do not adjust SGI source information
+ * for previously sent SGIs by us to other CPUs either.
+ */
+ for (i = 0; i < 16; i += 4) {
+ int j;
+ val = readl_relaxed(dist_base + GIC_DIST_SGI_PENDING_SET + i);
+ if (!val)
+ continue;
+ writel_relaxed(val, dist_base + GIC_DIST_SGI_PENDING_CLEAR + i);
+ for (j = i; j < i + 4; j++) {
+ if (val & 0xff)
+ writel_relaxed((1 << (new_cpu_id + 16)) | j,
+ dist_base + GIC_DIST_SOFTINT);
+ val >>= 8;
+ }
+ }
+}
+
+/*
+ * gic_get_sgir_physaddr - get the physical address for the SGI register
+ *
+ * REturn the physical address of the SGI register to be used
+ * by some early assembly code when the kernel is not yet available.
+ */
+static unsigned long gic_dist_physaddr;
+
+unsigned long gic_get_sgir_physaddr(void)
+{
+ if (!gic_dist_physaddr)
+ return 0;
+ return gic_dist_physaddr + GIC_DIST_SOFTINT;
+}
+
+void __init gic_init_physaddr(struct device_node *node)
+{
+ struct resource res;
+ if (of_address_to_resource(node, 0, &res) == 0) {
+ gic_dist_physaddr = res.start;
+ pr_info("GIC physical location is %#lx\n", gic_dist_physaddr);
+ }
+}
+
+#else
+#define gic_init_physaddr(node) do { } while(0)
+#endif
+
static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hw)
{
@@ -700,6 +844,25 @@ static int gic_irq_domain_xlate(struct irq_domain *d,
return 0;
}
+#ifdef CONFIG_SMP
+static int __cpuinit gic_secondary_init(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ if (action == CPU_STARTING)
+ gic_cpu_init(&gic_data[0]);
+ return NOTIFY_OK;
+}
+
+/*
+ * Notifier for enabling the GIC CPU interface. Set an arbitrarily high
+ * priority because the GIC needs to be up before the ARM generic timers.
+ */
+static struct notifier_block __cpuinitdata gic_cpu_notifier = {
+ .notifier_call = gic_secondary_init,
+ .priority = 100,
+};
+#endif
+
const struct irq_domain_ops gic_irq_domain_ops = {
.map = gic_irq_domain_map,
.xlate = gic_irq_domain_xlate,
@@ -790,6 +953,7 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
#ifdef CONFIG_SMP
set_smp_cross_call(gic_raise_softirq);
+ register_cpu_notifier(&gic_cpu_notifier);
#endif
set_handle_irq(gic_handle_irq);
@@ -800,13 +964,6 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
gic_pm_init(gic);
}
-void __cpuinit gic_secondary_init(unsigned int gic_nr)
-{
- BUG_ON(gic_nr >= MAX_GIC_NR);
-
- gic_cpu_init(&gic_data[gic_nr]);
-}
-
#ifdef CONFIG_OF
static int gic_cnt __initdata = 0;
@@ -830,6 +987,8 @@ int __init gic_of_init(struct device_node *node, struct device_node *parent)
percpu_offset = 0;
gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset, node);
+ if (!gic_cnt)
+ gic_init_physaddr(node);
if (parent) {
irq = irq_of_parse_and_map(node, 0);
diff --git a/drivers/irqchip/irq-vic.c b/drivers/irqchip/irq-vic.c
index 3cf97aaebe4..e38cb00ee78 100644
--- a/drivers/irqchip/irq-vic.c
+++ b/drivers/irqchip/irq-vic.c
@@ -33,7 +33,7 @@
#include <linux/irqchip/arm-vic.h>
#include <asm/exception.h>
-#include <asm/mach/irq.h>
+#include <asm/irq.h>
#include "irqchip.h"
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index a0d931bcb37..b02b679abf3 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -107,6 +107,10 @@ static int create_gpio_led(const struct gpio_led *template,
return 0;
}
+ ret = devm_gpio_request(parent, template->gpio, template->name);
+ if (ret < 0)
+ return ret;
+
led_dat->cdev.name = template->name;
led_dat->cdev.default_trigger = template->default_trigger;
led_dat->gpio = template->gpio;
@@ -126,10 +130,7 @@ static int create_gpio_led(const struct gpio_led *template,
if (!template->retain_state_suspended)
led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
- ret = devm_gpio_request_one(parent, template->gpio,
- (led_dat->active_low ^ state) ?
- GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW,
- template->name);
+ ret = gpio_direction_output(led_dat->gpio, led_dat->active_low ^ state);
if (ret < 0)
return ret;
diff --git a/drivers/leds/leds-ot200.c b/drivers/leds/leds-ot200.c
index ee14662ed5c..98cae529373 100644
--- a/drivers/leds/leds-ot200.c
+++ b/drivers/leds/leds-ot200.c
@@ -47,37 +47,37 @@ static struct ot200_led leds[] = {
{
.name = "led_1",
.port = 0x49,
- .mask = BIT(7),
+ .mask = BIT(6),
},
{
.name = "led_2",
.port = 0x49,
- .mask = BIT(6),
+ .mask = BIT(5),
},
{
.name = "led_3",
.port = 0x49,
- .mask = BIT(5),
+ .mask = BIT(4),
},
{
.name = "led_4",
.port = 0x49,
- .mask = BIT(4),
+ .mask = BIT(3),
},
{
.name = "led_5",
.port = 0x49,
- .mask = BIT(3),
+ .mask = BIT(2),
},
{
.name = "led_6",
.port = 0x49,
- .mask = BIT(2),
+ .mask = BIT(1),
},
{
.name = "led_7",
.port = 0x49,
- .mask = BIT(1),
+ .mask = BIT(0),
}
};
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index c6083132c4b..0387e05cdb9 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -319,6 +319,9 @@ static void __cache_size_refresh(void)
static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
enum data_mode *data_mode)
{
+ unsigned noio_flag;
+ void *ptr;
+
if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) {
*data_mode = DATA_MODE_SLAB;
return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask);
@@ -332,7 +335,26 @@ static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
}
*data_mode = DATA_MODE_VMALLOC;
- return __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
+
+ /*
+ * __vmalloc allocates the data pages and auxiliary structures with
+ * gfp_flags that were specified, but pagetables are always allocated
+ * with GFP_KERNEL, no matter what was specified as gfp_mask.
+ *
+ * Consequently, we must set per-process flag PF_MEMALLOC_NOIO so that
+ * all allocations done by this process (including pagetables) are done
+ * as if GFP_NOIO was specified.
+ */
+
+ if (gfp_mask & __GFP_NORETRY)
+ noio_flag = memalloc_noio_save();
+
+ ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
+
+ if (gfp_mask & __GFP_NORETRY)
+ memalloc_noio_restore(noio_flag);
+
+ return ptr;
}
/*
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 10744091e6c..6feaba24fca 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -1971,6 +1971,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
atomic_set(&cache->nr_migrations, 0);
init_waitqueue_head(&cache->migration_wait);
+ r = -ENOMEM;
cache->nr_dirty = 0;
cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size));
if (!cache->dirty_bitset) {
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index c0e07026a8d..c434e5aab2d 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -1121,6 +1121,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache);
if (!s->pending_pool) {
ti->error = "Could not allocate mempool for pending exceptions";
+ r = -ENOMEM;
goto bad_pending_pool;
}
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index d8837d313f5..7b8b2b93034 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -94,7 +94,7 @@ static int get_stripe(struct dm_target *ti, struct stripe_c *sc,
static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
struct stripe_c *sc;
- sector_t width;
+ sector_t width, tmp_len;
uint32_t stripes;
uint32_t chunk_size;
int r;
@@ -116,15 +116,16 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
width = ti->len;
- if (sector_div(width, chunk_size)) {
+ if (sector_div(width, stripes)) {
ti->error = "Target length not divisible by "
- "chunk size";
+ "number of stripes";
return -EINVAL;
}
- if (sector_div(width, stripes)) {
+ tmp_len = width;
+ if (sector_div(tmp_len, chunk_size)) {
ti->error = "Target length not divisible by "
- "number of stripes";
+ "chunk size";
return -EINVAL;
}
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index e50dad0c65f..1ff252ab7d4 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1442,7 +1442,7 @@ static bool dm_table_supports_write_same(struct dm_table *t)
return false;
if (!ti->type->iterate_devices ||
- !ti->type->iterate_devices(ti, device_not_write_same_capable, NULL))
+ ti->type->iterate_devices(ti, device_not_write_same_capable, NULL))
return false;
}
diff --git a/drivers/md/md.c b/drivers/md/md.c
index aeceedfc530..a4a93b9859f 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1564,8 +1564,8 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
sector, count, 1) == 0)
return -EINVAL;
}
- } else if (sb->bblog_offset == 0)
- rdev->badblocks.shift = -1;
+ } else if (sb->bblog_offset != 0)
+ rdev->badblocks.shift = 0;
if (!refdev) {
ret = 1;
@@ -3221,7 +3221,7 @@ int md_rdev_init(struct md_rdev *rdev)
* be used - I wonder if that matters
*/
rdev->badblocks.count = 0;
- rdev->badblocks.shift = 0;
+ rdev->badblocks.shift = -1; /* disabled until explicitly enabled */
rdev->badblocks.page = kmalloc(PAGE_SIZE, GFP_KERNEL);
seqlock_init(&rdev->badblocks.lock);
if (rdev->badblocks.page == NULL)
@@ -3293,9 +3293,6 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe
goto abort_free;
}
}
- if (super_format == -1)
- /* hot-add for 0.90, or non-persistent: so no badblocks */
- rdev->badblocks.shift = -1;
return rdev;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index fd86b372692..6af167f8109 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -981,7 +981,12 @@ static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next;
bio->bi_next = NULL;
- generic_make_request(bio);
+ if (unlikely((bio->bi_rw & REQ_DISCARD) &&
+ !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
+ /* Just ignore it */
+ bio_endio(bio, 0);
+ else
+ generic_make_request(bio);
bio = next;
}
kfree(plug);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 77b562d18a9..46c14e5efcb 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1133,7 +1133,12 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next;
bio->bi_next = NULL;
- generic_make_request(bio);
+ if (unlikely((bio->bi_rw & REQ_DISCARD) &&
+ !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
+ /* Just ignore it */
+ bio_endio(bio, 0);
+ else
+ generic_make_request(bio);
bio = next;
}
kfree(plug);
diff --git a/drivers/mfd/adp5520.c b/drivers/mfd/adp5520.c
index 210dd038bb5..6b40e0cde96 100644
--- a/drivers/mfd/adp5520.c
+++ b/drivers/mfd/adp5520.c
@@ -36,6 +36,7 @@ struct adp5520_chip {
struct blocking_notifier_head notifier_list;
int irq;
unsigned long id;
+ uint8_t mode;
};
static int __adp5520_read(struct i2c_client *client,
@@ -326,7 +327,10 @@ static int adp5520_suspend(struct device *dev)
struct i2c_client *client = to_i2c_client(dev);
struct adp5520_chip *chip = dev_get_drvdata(&client->dev);
- adp5520_clr_bits(chip->dev, ADP5520_MODE_STATUS, ADP5520_nSTNBY);
+ adp5520_read(chip->dev, ADP5520_MODE_STATUS, &chip->mode);
+ /* All other bits are W1C */
+ chip->mode &= ADP5520_BL_EN | ADP5520_DIM_EN | ADP5520_nSTNBY;
+ adp5520_write(chip->dev, ADP5520_MODE_STATUS, 0);
return 0;
}
@@ -335,7 +339,7 @@ static int adp5520_resume(struct device *dev)
struct i2c_client *client = to_i2c_client(dev);
struct adp5520_chip *chip = dev_get_drvdata(&client->dev);
- adp5520_set_bits(chip->dev, ADP5520_MODE_STATUS, ADP5520_nSTNBY);
+ adp5520_write(chip->dev, ADP5520_MODE_STATUS, chip->mode);
return 0;
}
#endif
diff --git a/drivers/mfd/vexpress-config.c b/drivers/mfd/vexpress-config.c
index 3c1723aa622..84ce6b9daa3 100644
--- a/drivers/mfd/vexpress-config.c
+++ b/drivers/mfd/vexpress-config.c
@@ -184,13 +184,14 @@ static int vexpress_config_schedule(struct vexpress_config_trans *trans)
spin_lock_irqsave(&bridge->transactions_lock, flags);
- vexpress_config_dump_trans("Executing", trans);
-
- if (list_empty(&bridge->transactions))
+ if (list_empty(&bridge->transactions)) {
+ vexpress_config_dump_trans("Executing", trans);
status = bridge->info->func_exec(trans->func->func,
trans->offset, trans->write, trans->data);
- else
+ } else {
+ vexpress_config_dump_trans("Queuing", trans);
status = VEXPRESS_CONFIG_STATUS_WAIT;
+ }
switch (status) {
case VEXPRESS_CONFIG_STATUS_DONE:
@@ -212,25 +213,31 @@ void vexpress_config_complete(struct vexpress_config_bridge *bridge,
{
struct vexpress_config_trans *trans;
unsigned long flags;
+ const char *message = "Completed";
spin_lock_irqsave(&bridge->transactions_lock, flags);
trans = list_first_entry(&bridge->transactions,
struct vexpress_config_trans, list);
- vexpress_config_dump_trans("Completed", trans);
-
trans->status = status;
- list_del(&trans->list);
- if (!list_empty(&bridge->transactions)) {
- vexpress_config_dump_trans("Pending", trans);
+ do {
+ vexpress_config_dump_trans(message, trans);
+ list_del(&trans->list);
+ complete(&trans->completion);
- bridge->info->func_exec(trans->func->func, trans->offset,
- trans->write, trans->data);
- }
- spin_unlock_irqrestore(&bridge->transactions_lock, flags);
+ if (list_empty(&bridge->transactions))
+ break;
+
+ trans = list_first_entry(&bridge->transactions,
+ struct vexpress_config_trans, list);
+ vexpress_config_dump_trans("Executing pending", trans);
+ trans->status = bridge->info->func_exec(trans->func->func,
+ trans->offset, trans->write, trans->data);
+ message = "Finished pending";
+ } while (trans->status == VEXPRESS_CONFIG_STATUS_DONE);
- complete(&trans->completion);
+ spin_unlock_irqrestore(&bridge->transactions_lock, flags);
}
EXPORT_SYMBOL(vexpress_config_complete);
diff --git a/drivers/mfd/vexpress-sysreg.c b/drivers/mfd/vexpress-sysreg.c
index bf75e967a1f..96a020b1dcd 100644
--- a/drivers/mfd/vexpress-sysreg.c
+++ b/drivers/mfd/vexpress-sysreg.c
@@ -490,12 +490,12 @@ static int vexpress_sysreg_probe(struct platform_device *pdev)
return err;
}
+ vexpress_sysreg_dev = &pdev->dev;
+
platform_device_register_data(vexpress_sysreg_dev, "leds-gpio",
PLATFORM_DEVID_AUTO, &vexpress_sysreg_leds_pdata,
sizeof(vexpress_sysreg_leds_pdata));
- vexpress_sysreg_dev = &pdev->dev;
-
device_create_file(vexpress_sysreg_dev, &dev_attr_sys_id);
return 0;
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index fa4813aab30..107efeb7ade 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -523,4 +523,5 @@ source "drivers/misc/carma/Kconfig"
source "drivers/misc/altera-stapl/Kconfig"
source "drivers/misc/mei/Kconfig"
source "drivers/misc/vmw_vmci/Kconfig"
+source "drivers/misc/vexpress/Kconfig"
endmenu
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 9803580027e..5f6b9e7cfde 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -53,3 +53,4 @@ obj-$(CONFIG_INTEL_MEI) += mei/
obj-$(CONFIG_MAX8997_MUIC) += max8997-muic.o
obj-$(CONFIG_VMWARE_VMCI) += vmw_vmci/
obj-$(CONFIG_LATTICE_ECP3_CONFIG) += lattice-ecp3-config.o
+obj-$(CONFIG_ARCH_VEXPRESS) += vexpress/
diff --git a/drivers/misc/vexpress/Kconfig b/drivers/misc/vexpress/Kconfig
new file mode 100644
index 00000000000..3e2676ae6ee
--- /dev/null
+++ b/drivers/misc/vexpress/Kconfig
@@ -0,0 +1,3 @@
+config ARM_SPC
+ bool "ARM SPC driver support"
+ depends on ARM
diff --git a/drivers/misc/vexpress/Makefile b/drivers/misc/vexpress/Makefile
new file mode 100644
index 00000000000..95b58166d0a
--- /dev/null
+++ b/drivers/misc/vexpress/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_ARM_SPC) += arm-spc.o
diff --git a/drivers/misc/vexpress/arm-spc.c b/drivers/misc/vexpress/arm-spc.c
new file mode 100644
index 00000000000..913dd087282
--- /dev/null
+++ b/drivers/misc/vexpress/arm-spc.c
@@ -0,0 +1,718 @@
+/*
+ * Serial Power Controller (SPC) support
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ * Author(s): Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
+ * Achin Gupta <achin.gupta@arm.com>
+ * Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/slab.h>
+#include <linux/semaphore.h>
+#include <linux/vexpress.h>
+
+#include <asm/cacheflush.h>
+#include <asm/memory.h>
+#include <asm/outercache.h>
+
+#define SCC_CFGREG6 0x018
+#define SCC_CFGREG19 0x120
+#define SCC_CFGREG20 0x124
+#define A15_CONF 0x400
+#define SNOOP_CTL_A15 0x404
+#define A7_CONF 0x500
+#define SNOOP_CTL_A7 0x504
+#define SYS_INFO 0x700
+#define PERF_LVL_A15 0xB00
+#define PERF_REQ_A15 0xB04
+#define PERF_LVL_A7 0xB08
+#define PERF_REQ_A7 0xB0c
+#define COMMS 0xB10
+#define COMMS_REQ 0xB14
+#define PWC_STATUS 0xB18
+#define PWC_FLAG 0xB1c
+#define WAKE_INT_MASK 0xB24
+#define WAKE_INT_RAW 0xB28
+#define WAKE_INT_STAT 0xB2c
+#define A15_PWRDN_EN 0xB30
+#define A7_PWRDN_EN 0xB34
+#define A15_A7_ISOLATE 0xB38
+#define STANDBYWFI_STAT 0xB3c
+#define A15_CACTIVE 0xB40
+#define A15_PWRDNREQ 0xB44
+#define A15_PWRDNACK 0xB48
+#define A7_CACTIVE 0xB4c
+#define A7_PWRDNREQ 0xB50
+#define A7_PWRDNACK 0xB54
+#define A15_RESET_HOLD 0xB58
+#define A7_RESET_HOLD 0xB5c
+#define A15_RESET_STAT 0xB60
+#define A7_RESET_STAT 0xB64
+#define A15_BX_ADDR0 0xB68
+#define SYS_CFG_WDATA 0xB70
+#define SYS_CFG_RDATA 0xB74
+#define A7_BX_ADDR0 0xB78
+#define SPC_CONTROL 0xC00
+#define SPC_LATENCY 0xC04
+#define A15_PERFVAL_BASE 0xC10
+#define A7_PERFVAL_BASE 0xC30
+
+#define A15_STANDBYWFIL2_MSK (1 << 2)
+#define A7_STANDBYWFIL2_MSK (1 << 6)
+#define GBL_WAKEUP_INT_MSK (0x3 << 10)
+
+#define SYS_CFG_START (1 << 31)
+#define SYS_CFG_SCC (6 << 20)
+#define SYS_CFG_STAT (14 << 20)
+
+#define CLKF_SHIFT 16
+#define CLKF_MASK 0x1FFF
+#define CLKR_SHIFT 0
+#define CLKR_MASK 0x3F
+#define CLKOD_SHIFT 8
+#define CLKOD_MASK 0xF
+
+#define A15_PART_NO 0xF
+#define A7_PART_NO 0x7
+
+#define DRIVER_NAME "SPC"
+/*
+ * Even though the SPC takes max 3-5 ms to complete any OPP/COMMS
+ * operation, the operation could start just before jiffie is about
+ * to be incremented. So setting timeout value of 20ms = 2jiffies@100Hz
+ */
+#define TIME_OUT_US 20000
+
+#define MAX_OPPS 8
+#define MAX_CLUSTERS 2
+
+struct vexpress_spc_drvdata {
+ void __iomem *baseaddr;
+ uint32_t a15_clusid;
+ int irq;
+ uint32_t cur_rsp_mask;
+ uint32_t cur_rsp_stat;
+#define A15_OPP 0
+#define A7_OPP 1
+#define COMMS_OPP 2
+#define STAT_COMPLETE(type) ((1 << 0) << (type << 2))
+#define STAT_ERR(type) ((1 << 1) << (type << 2))
+#define RESPONSE_MASK(type) (STAT_COMPLETE(type) | STAT_ERR(type))
+ struct semaphore lock;
+ struct completion done;
+ uint32_t freqs[MAX_CLUSTERS][MAX_OPPS];
+ int freqs_cnt[MAX_CLUSTERS];
+};
+
+static struct vexpress_spc_drvdata *info;
+
+/* SCC virtual address */
+u32 vscc;
+
+u32 vexpress_spc_get_clusterid(int cpu_part_no)
+{
+ switch (cpu_part_no & 0xf) {
+ case A15_PART_NO:
+ return readl_relaxed(info->baseaddr + A15_CONF) & 0xf;
+ case A7_PART_NO:
+ return readl_relaxed(info->baseaddr + A7_CONF) & 0xf;
+ default:
+ BUG();
+ }
+}
+
+EXPORT_SYMBOL_GPL(vexpress_spc_get_clusterid);
+
+void vexpress_spc_write_bxaddr_reg(int cluster, int cpu, u32 val)
+{
+ void __iomem *baseaddr;
+
+ if (IS_ERR_OR_NULL(info))
+ return;
+
+ if (cluster != info->a15_clusid)
+ baseaddr = info->baseaddr + A7_BX_ADDR0 + (cpu << 2);
+ else
+ baseaddr = info->baseaddr + A15_BX_ADDR0 + (cpu << 2);
+
+ writel_relaxed(val, baseaddr);
+ dsb();
+ while (val != readl_relaxed(baseaddr));
+
+ return;
+}
+
+EXPORT_SYMBOL_GPL(vexpress_spc_write_bxaddr_reg);
+
+int vexpress_spc_get_nb_cpus(int cluster)
+{
+ u32 val;
+
+ if (IS_ERR_OR_NULL(info))
+ return -ENXIO;
+
+ val = readl_relaxed(info->baseaddr + SYS_INFO);
+ val = (cluster != info->a15_clusid) ? (val >> 20) : (val >> 16);
+
+ return (val & 0xf);
+}
+
+EXPORT_SYMBOL_GPL(vexpress_spc_get_nb_cpus);
+
+int vexpress_spc_standbywfil2_status(int cluster)
+{
+ u32 standbywfi_stat;
+
+ if (IS_ERR_OR_NULL(info))
+ BUG();
+
+ standbywfi_stat = readl_relaxed(info->baseaddr + STANDBYWFI_STAT);
+
+ if (cluster != info->a15_clusid)
+ return standbywfi_stat & A7_STANDBYWFIL2_MSK;
+ else
+ return standbywfi_stat & A15_STANDBYWFIL2_MSK;
+}
+
+EXPORT_SYMBOL_GPL(vexpress_spc_standbywfil2_status);
+
+int vexpress_spc_standbywfi_status(int cluster, int cpu)
+{
+ u32 standbywfi_stat;
+
+ if (IS_ERR_OR_NULL(info))
+ BUG();
+
+ standbywfi_stat = readl_relaxed(info->baseaddr + STANDBYWFI_STAT);
+
+ if (cluster != info->a15_clusid)
+ return standbywfi_stat & ((1 << cpu) << 3);
+ else
+ return standbywfi_stat & (1 << cpu);
+}
+
+EXPORT_SYMBOL_GPL(vexpress_spc_standbywfi_status);
+
+u32 vexpress_spc_read_rststat_reg(int cluster)
+{
+
+ if (IS_ERR_OR_NULL(info))
+ BUG();
+
+ if (cluster != info->a15_clusid)
+ return readl_relaxed(info->baseaddr + A7_RESET_STAT);
+ else
+ return readl_relaxed(info->baseaddr + A15_RESET_STAT);
+}
+
+EXPORT_SYMBOL_GPL(vexpress_spc_read_rststat_reg);
+
+u32 vexpress_spc_read_rsthold_reg(int cluster)
+{
+
+ if (IS_ERR_OR_NULL(info))
+ BUG();
+
+ if (cluster != info->a15_clusid)
+ return readl_relaxed(info->baseaddr + A7_RESET_HOLD);
+ else
+ return readl_relaxed(info->baseaddr + A15_RESET_HOLD);
+}
+
+EXPORT_SYMBOL_GPL(vexpress_spc_read_rsthold_reg);
+
+void vexpress_spc_write_rsthold_reg(int cluster, u32 value)
+{
+
+ if (IS_ERR_OR_NULL(info))
+ BUG();
+
+ if (cluster != info->a15_clusid)
+ writel_relaxed(value, info->baseaddr + A7_RESET_HOLD);
+ else
+ writel_relaxed(value, info->baseaddr + A15_RESET_HOLD);
+}
+
+EXPORT_SYMBOL_GPL(vexpress_spc_write_rsthold_reg);
+
+int vexpress_spc_get_performance(int cluster, u32 *freq)
+{
+ u32 perf_cfg_reg = 0;
+ int perf;
+
+ if (IS_ERR_OR_NULL(info))
+ return -ENXIO;
+
+ perf_cfg_reg = cluster != info->a15_clusid ? PERF_LVL_A7 : PERF_LVL_A15;
+
+ if (down_timeout(&info->lock, usecs_to_jiffies(TIME_OUT_US)))
+ return -ETIME;
+
+ perf = readl(info->baseaddr + perf_cfg_reg);
+
+ *freq = info->freqs[cluster][perf];
+
+ up(&info->lock);
+
+ return 0;
+
+}
+EXPORT_SYMBOL_GPL(vexpress_spc_get_performance);
+
+static int vexpress_spc_find_perf_index(int cluster, u32 freq)
+{
+ int idx;
+ /* Hash function would be ideal, based on hashtable in v3.8?? */
+ for (idx = 0; idx < info->freqs_cnt[cluster]; idx++)
+ if (info->freqs[cluster][idx] == freq)
+ break;
+ return idx;
+}
+
+static int vexpress_spc_waitforcompletion(int req_type)
+{
+ int ret;
+
+ if (!wait_for_completion_interruptible_timeout(&info->done,
+ usecs_to_jiffies(TIME_OUT_US)))
+ ret = -ETIMEDOUT;
+ else
+ ret = info->cur_rsp_stat & STAT_COMPLETE(req_type) ? 0 : -EIO;
+ return ret;
+}
+
+int vexpress_spc_set_performance(int cluster, u32 freq)
+{
+ u32 perf_cfg_reg, perf_stat_reg;
+ int ret, perf, req_type;
+
+ if (IS_ERR_OR_NULL(info))
+ return -ENXIO;
+
+ if (cluster != info->a15_clusid) {
+ req_type = A7_OPP;
+ perf_cfg_reg = PERF_LVL_A7;
+ perf_stat_reg = PERF_REQ_A7;
+ } else {
+ req_type = A15_OPP;
+ perf_cfg_reg = PERF_LVL_A15;
+ perf_stat_reg = PERF_REQ_A15;
+ }
+
+ perf = vexpress_spc_find_perf_index(cluster, freq);
+
+ if (perf >= MAX_OPPS)
+ return -EINVAL;
+
+ if (down_timeout(&info->lock, usecs_to_jiffies(TIME_OUT_US)))
+ return -ETIME;
+
+ init_completion(&info->done);
+
+ info->cur_rsp_mask = RESPONSE_MASK(req_type);
+
+ writel(perf, info->baseaddr + perf_cfg_reg);
+
+ ret = vexpress_spc_waitforcompletion(req_type);
+
+ info->cur_rsp_mask = 0;
+
+ up(&info->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vexpress_spc_set_performance);
+
+int vexpress_spc_set_global_wakeup_intr(u32 set)
+{
+ u32 wake_int_mask_reg = 0;
+
+ if (IS_ERR_OR_NULL(info))
+ return -ENXIO;
+
+ wake_int_mask_reg = readl(info->baseaddr + WAKE_INT_MASK);
+ if (set)
+ wake_int_mask_reg |= GBL_WAKEUP_INT_MSK;
+ else
+ wake_int_mask_reg &= ~GBL_WAKEUP_INT_MSK;
+
+ vexpress_spc_set_wake_intr(wake_int_mask_reg);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vexpress_spc_set_global_wakeup_intr);
+
+int vexpress_spc_set_cpu_wakeup_irq(u32 cpu, u32 cluster, u32 set)
+{
+ u32 mask = 0;
+ u32 wake_int_mask_reg = 0;
+
+ if (IS_ERR_OR_NULL(info))
+ return -ENXIO;
+
+ mask = 1 << cpu;
+ if (info->a15_clusid != cluster)
+ mask <<= 4;
+
+ wake_int_mask_reg = readl(info->baseaddr + WAKE_INT_MASK);
+ if (set)
+ wake_int_mask_reg |= mask;
+ else
+ wake_int_mask_reg &= ~mask;
+
+ vexpress_spc_set_wake_intr(wake_int_mask_reg);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vexpress_spc_set_cpu_wakeup_irq);
+
+void vexpress_spc_set_wake_intr(u32 mask)
+{
+ if (!IS_ERR_OR_NULL(info)) {
+ writel(mask & VEXPRESS_SPC_WAKE_INTR_MASK,
+ info->baseaddr + WAKE_INT_MASK);
+ dsb();
+ while ((mask & VEXPRESS_SPC_WAKE_INTR_MASK) !=
+ readl(info->baseaddr + WAKE_INT_MASK));
+ }
+
+ return;
+}
+EXPORT_SYMBOL_GPL(vexpress_spc_set_wake_intr);
+
+u32 vexpress_spc_get_wake_intr(int raw)
+{
+ u32 wake_intr_reg = raw ? WAKE_INT_RAW : WAKE_INT_STAT;
+
+ if (!IS_ERR_OR_NULL(info))
+ return readl(info->baseaddr + wake_intr_reg);
+ else
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vexpress_spc_get_wake_intr);
+
+void vexpress_spc_powerdown_enable(int cluster, int enable)
+{
+ u32 pwdrn_reg = 0;
+
+ if (!IS_ERR_OR_NULL(info)) {
+ pwdrn_reg = cluster != info->a15_clusid ? A7_PWRDN_EN : A15_PWRDN_EN;
+ writel(!!enable, info->baseaddr + pwdrn_reg);
+ dsb();
+ while (readl(info->baseaddr + pwdrn_reg) != !!enable);
+ }
+ return;
+}
+EXPORT_SYMBOL_GPL(vexpress_spc_powerdown_enable);
+
+void vexpress_spc_adb400_pd_enable(int cluster, int enable)
+{
+ u32 pwdrn_reg = 0;
+ u32 val = enable ? 0xF : 0x0; /* all adb bridges ?? */
+
+ if (IS_ERR_OR_NULL(info))
+ return;
+
+ pwdrn_reg = cluster != info->a15_clusid ? A7_PWRDNREQ : A15_PWRDNREQ;
+
+ writel(val, info->baseaddr + pwdrn_reg);
+ return;
+}
+EXPORT_SYMBOL_GPL(vexpress_spc_adb400_pd_enable);
+
+void vexpress_scc_ctl_snoops(int cluster, int enable)
+{
+ u32 val;
+ u32 snoop_reg = 0;
+ u32 or = 0;
+
+ if (IS_ERR_OR_NULL(info))
+ return;
+
+ snoop_reg = cluster != info->a15_clusid ? SNOOP_CTL_A7 : SNOOP_CTL_A15;
+ or = cluster != info->a15_clusid ? 0x2000 : 0x180;
+
+ val = readl_relaxed(info->baseaddr + snoop_reg);
+ if (enable) {
+ or = ~or;
+ val &= or;
+ } else {
+ val |= or;
+ dsb();
+ isb();
+ }
+
+ writel_relaxed(val, info->baseaddr + snoop_reg);
+}
+EXPORT_SYMBOL_GPL(vexpress_scc_ctl_snoops);
+
+u32 vexpress_scc_read_rststat(int cluster)
+{
+ if (IS_ERR_OR_NULL(info))
+ BUG();
+
+ if (cluster != info->a15_clusid)
+ return (readl_relaxed(info->baseaddr + SCC_CFGREG6) >> 16) & 0x7;
+ else
+ return (readl_relaxed(info->baseaddr + SCC_CFGREG6) >> 2) & 0x3;
+}
+EXPORT_SYMBOL_GPL(vexpress_scc_read_rststat);
+
+void vexpress_spc_wfi_cpureset(int cluster, int cpu, int enable)
+{
+ u32 rsthold_reg, prst_shift;
+ u32 val;
+
+ if (IS_ERR_OR_NULL(info))
+ return;
+
+ if (cluster != info->a15_clusid) {
+ rsthold_reg = A7_RESET_HOLD;
+ prst_shift = 3;
+ } else {
+ rsthold_reg = A15_RESET_HOLD;
+ prst_shift = 2;
+ }
+ val = readl_relaxed(info->baseaddr + rsthold_reg);
+ if (enable)
+ val |= (1 << cpu);
+ else
+ val &= ~(1 << cpu);
+ writel_relaxed(val, info->baseaddr + rsthold_reg);
+ return;
+}
+EXPORT_SYMBOL_GPL(vexpress_spc_wfi_cpureset);
+
+void vexpress_spc_wfi_cluster_reset(int cluster, int enable)
+{
+ u32 rsthold_reg, shift;
+ u32 val;
+
+ if (IS_ERR_OR_NULL(info))
+ return;
+
+ if (cluster != info->a15_clusid) {
+ rsthold_reg = A7_RESET_HOLD;
+ shift = 6;
+ } else {
+ rsthold_reg = A15_RESET_HOLD;
+ shift = 4;
+ }
+ val = readl(info->baseaddr + rsthold_reg);
+ if (enable)
+ val |= 1 << shift;
+ else
+ val &= ~(1 << shift);
+ writel(val, info->baseaddr + rsthold_reg);
+ return;
+}
+EXPORT_SYMBOL_GPL(vexpress_spc_wfi_cluster_reset);
+
+int vexpress_spc_wfi_cpustat(int cluster)
+{
+ u32 rststat_reg;
+ u32 val;
+
+ if (IS_ERR_OR_NULL(info))
+ return 0;
+
+ rststat_reg = STANDBYWFI_STAT;
+
+ val = readl_relaxed(info->baseaddr + rststat_reg);
+ return cluster != info->a15_clusid ? ((val & 0x38) >> 3) : (val & 0x3);
+}
+EXPORT_SYMBOL_GPL(vexpress_spc_wfi_cpustat);
+
+irqreturn_t vexpress_spc_irq_handler(int irq, void *data)
+{
+ struct vexpress_spc_drvdata *drv_data = data;
+ uint32_t status = readl_relaxed(drv_data->baseaddr + PWC_STATUS);
+
+ if (info->cur_rsp_mask & status) {
+ info->cur_rsp_stat = status;
+ complete(&drv_data->done);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int read_sys_cfg(int func, int offset, uint32_t *data)
+{
+ int ret;
+
+ if (down_timeout(&info->lock, usecs_to_jiffies(TIME_OUT_US)))
+ return -ETIME;
+
+ init_completion(&info->done);
+
+ info->cur_rsp_mask = RESPONSE_MASK(COMMS_OPP);
+
+ /* Set the control value */
+ writel(SYS_CFG_START | func | offset >> 2, info->baseaddr + COMMS);
+
+ ret = vexpress_spc_waitforcompletion(COMMS_OPP);
+
+ if (!ret)
+ *data = readl(info->baseaddr + SYS_CFG_RDATA);
+
+ info->cur_rsp_mask = 0;
+
+ up(&info->lock);
+
+ return ret;
+}
+
+/*
+ * Based on the firmware documentation, this is always fixed to 20
+ * All the 4 OSC: A15 PLL0/1, A7 PLL0/1 must be programmed same
+ * values for both control and value registers.
+ * This function uses A15 PLL 0 registers to compute multiple factor
+ * F out = F in * (CLKF + 1) / ((CLKOD + 1) * (CLKR + 1))
+ */
+static inline int __get_mult_factor(void)
+{
+ int i_div, o_div, f_div;
+ uint32_t tmp;
+
+ tmp = readl(info->baseaddr + SCC_CFGREG19);
+ f_div = (tmp >> CLKF_SHIFT) & CLKF_MASK;
+
+ tmp = readl(info->baseaddr + SCC_CFGREG20);
+ o_div = (tmp >> CLKOD_SHIFT) & CLKOD_MASK;
+ i_div = (tmp >> CLKR_SHIFT) & CLKR_MASK;
+
+ return (f_div + 1) / ((o_div + 1) * (i_div + 1));
+}
+
+static int vexpress_spc_populate_opps(uint32_t cluster)
+{
+ uint32_t data = 0, off, ret, j;
+ int mult_fact = __get_mult_factor();
+
+ off = cluster != info->a15_clusid ? A7_PERFVAL_BASE : A15_PERFVAL_BASE;
+ for (j = 0; j < MAX_OPPS; j++, off += 4) {
+ ret = read_sys_cfg(SYS_CFG_SCC, off, &data);
+ if (!ret)
+ info->freqs[cluster][j] = (data & 0xFFFFF) * mult_fact;
+ else
+ break;
+ }
+
+ info->freqs_cnt[cluster] = j;
+ return ret;
+}
+
+unsigned int *vexpress_spc_get_freq_table(uint32_t cluster, int *count)
+{
+
+ *count = info->freqs_cnt[cluster];
+ return info->freqs[cluster];
+}
+EXPORT_SYMBOL_GPL(vexpress_spc_get_freq_table);
+
+static int vexpress_spc_init(void)
+{
+ struct device_node *node = of_find_compatible_node(NULL, NULL,
+ "arm,spc");
+ if (!node)
+ return -ENODEV;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ pr_err("%s: unable to allocate mem\n", __func__);
+ return -ENOMEM;
+ }
+
+ info->baseaddr = of_iomap(node, 0);
+ if (WARN_ON(!info->baseaddr)) {
+ kfree(info);
+ return -EIO;
+ }
+
+ vscc = (u32) info->baseaddr;
+ sema_init(&info->lock, 1);
+
+ info->irq = irq_of_parse_and_map(node, 0);
+
+ if (info->irq) {
+ int ret;
+
+ init_completion(&info->done);
+
+ readl_relaxed(info->baseaddr + PWC_STATUS);
+
+ ret = request_irq(info->irq, vexpress_spc_irq_handler,
+ IRQF_DISABLED | IRQF_TRIGGER_HIGH | IRQF_ONESHOT, "arm-spc", info);
+ if (ret) {
+ pr_err("IRQ %d request failed \n", info->irq);
+ iounmap(info->baseaddr);
+ kfree(info);
+ return -ENODEV;
+ }
+ }
+
+ info->a15_clusid = readl_relaxed(info->baseaddr + A15_CONF) & 0xf;
+
+ if (vexpress_spc_populate_opps(0) || vexpress_spc_populate_opps(1)) {
+ if (info->irq)
+ free_irq(info->irq, info);
+ iounmap(info->baseaddr);
+ kfree(info);
+ pr_err("failed to build OPP table\n");
+ return -ENODEV;
+ }
+
+ /*
+ * Multi-cluster systems may need this data when non-coherent, during
+ * cluster power-up/power-down. Make sure it reaches main memory:
+ */
+ __cpuc_flush_dcache_area(info, sizeof *info);
+ __cpuc_flush_dcache_area(&info, sizeof info);
+ outer_clean_range(virt_to_phys(info), virt_to_phys(info + 1));
+ outer_clean_range(virt_to_phys(&info), virt_to_phys(&info + 1));
+
+ pr_info("vexpress_spc loaded at %p\n", info->baseaddr);
+ return 0;
+}
+
+static int vexpress_spc_load_result = -EAGAIN;
+static DEFINE_MUTEX(vexpress_spc_loading);
+
+bool vexpress_spc_check_loaded(void)
+{
+ if (vexpress_spc_load_result != -EAGAIN)
+ return (vexpress_spc_load_result == 0);
+
+ mutex_lock(&vexpress_spc_loading);
+ if (vexpress_spc_load_result == -EAGAIN)
+ vexpress_spc_load_result = vexpress_spc_init();
+ mutex_unlock(&vexpress_spc_loading);
+ return (vexpress_spc_load_result == 0);
+}
+EXPORT_SYMBOL_GPL(vexpress_spc_check_loaded);
+
+static int __init vexpress_spc_early_init(void)
+{
+ vexpress_spc_check_loaded();
+ return vexpress_spc_load_result;
+}
+
+early_initcall(vexpress_spc_early_init);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Serial Power Controller (SPC) support");
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index fffb8e5f836..806d37cb1d9 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -1156,6 +1156,10 @@ static int mmc_blk_err_check(struct mmc_card *card,
return MMC_BLK_ECC_ERR;
return MMC_BLK_DATA_ERR;
} else {
+ if (brq->data.blocks > 1) {
+ /* Hack to redo transfer one sector at a time */
+ return MMC_BLK_DATA_ERR;
+ }
return MMC_BLK_CMD_ERR;
}
}
@@ -1808,7 +1812,7 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
case MMC_BLK_ECC_ERR:
if (brq->data.blocks > 1) {
/* Redo read one sector at a time */
- pr_warning("%s: retrying using single block read\n",
+ pr_warning("%s: retrying using single block transfer\n",
req->rq_disk->disk_name);
disable_multi = 1;
break;
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index c8f3d6e0684..8ac51013a4f 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -368,13 +368,13 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT];
card->ext_csd.raw_trim_mult =
ext_csd[EXT_CSD_TRIM_MULT];
+ card->ext_csd.raw_partition_support = ext_csd[EXT_CSD_PARTITION_SUPPORT];
if (card->ext_csd.rev >= 4) {
/*
* Enhanced area feature support -- check whether the eMMC
* card has the Enhanced area enabled. If so, export enhanced
* area offset and size to user by adding sysfs interface.
*/
- card->ext_csd.raw_partition_support = ext_csd[EXT_CSD_PARTITION_SUPPORT];
if ((ext_csd[EXT_CSD_PARTITION_SUPPORT] & 0x2) &&
(ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) {
hc_erase_grp_sz =
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index d88219e1d86..9c581c2ed34 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -300,16 +300,6 @@ config MMC_ATMELMCI
If unsure, say N.
-config MMC_ATMELMCI_DMA
- bool "Atmel MCI DMA support"
- depends on MMC_ATMELMCI && (AVR32 || ARCH_AT91SAM9G45) && DMA_ENGINE
- help
- Say Y here to have the Atmel MCI driver use a DMA engine to
- do data transfers and thus increase the throughput and
- reduce the CPU utilization.
-
- If unsure, say N.
-
config MMC_MSM
tristate "Qualcomm SDCC Controller Support"
depends on MMC && ARCH_MSM
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 722af1de796..e75774f7260 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -178,6 +178,7 @@ struct atmel_mci {
void __iomem *regs;
struct scatterlist *sg;
+ unsigned int sg_len;
unsigned int pio_offset;
unsigned int *buffer;
unsigned int buf_size;
@@ -892,6 +893,7 @@ static u32 atmci_prepare_data(struct atmel_mci *host, struct mmc_data *data)
data->error = -EINPROGRESS;
host->sg = data->sg;
+ host->sg_len = data->sg_len;
host->data = data;
host->data_chan = NULL;
@@ -1826,7 +1828,8 @@ static void atmci_read_data_pio(struct atmel_mci *host)
if (offset == sg->length) {
flush_dcache_page(sg_page(sg));
host->sg = sg = sg_next(sg);
- if (!sg)
+ host->sg_len--;
+ if (!sg || !host->sg_len)
goto done;
offset = 0;
@@ -1839,7 +1842,8 @@ static void atmci_read_data_pio(struct atmel_mci *host)
flush_dcache_page(sg_page(sg));
host->sg = sg = sg_next(sg);
- if (!sg)
+ host->sg_len--;
+ if (!sg || !host->sg_len)
goto done;
offset = 4 - remaining;
@@ -1890,7 +1894,8 @@ static void atmci_write_data_pio(struct atmel_mci *host)
nbytes += 4;
if (offset == sg->length) {
host->sg = sg = sg_next(sg);
- if (!sg)
+ host->sg_len--;
+ if (!sg || !host->sg_len)
goto done;
offset = 0;
@@ -1904,7 +1909,8 @@ static void atmci_write_data_pio(struct atmel_mci *host)
nbytes += remaining;
host->sg = sg = sg_next(sg);
- if (!sg) {
+ host->sg_len--;
+ if (!sg || !host->sg_len) {
atmci_writel(host, ATMCI_TDR, value);
goto done;
}
@@ -2487,10 +2493,8 @@ static int __exit atmci_remove(struct platform_device *pdev)
atmci_readl(host, ATMCI_SR);
clk_disable(host->mck);
-#ifdef CONFIG_MMC_ATMELMCI_DMA
if (host->dma.chan)
dma_release_channel(host->dma.chan);
-#endif
free_irq(platform_get_irq(pdev, 0), host);
iounmap(host->regs);
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index 45cb9f3c132..3b954658824 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -136,6 +136,9 @@
#define KVASER_CTRL_MODE_SELFRECEPTION 3
#define KVASER_CTRL_MODE_OFF 4
+/* log message */
+#define KVASER_EXTENDED_FRAME BIT(31)
+
struct kvaser_msg_simple {
u8 tid;
u8 channel;
@@ -817,8 +820,13 @@ static void kvaser_usb_rx_can_msg(const struct kvaser_usb *dev,
priv = dev->nets[channel];
stats = &priv->netdev->stats;
- if (msg->u.rx_can.flag & (MSG_FLAG_ERROR_FRAME | MSG_FLAG_NERR |
- MSG_FLAG_OVERRUN)) {
+ if ((msg->u.rx_can.flag & MSG_FLAG_ERROR_FRAME) &&
+ (msg->id == CMD_LOG_MESSAGE)) {
+ kvaser_usb_rx_error(dev, msg);
+ return;
+ } else if (msg->u.rx_can.flag & (MSG_FLAG_ERROR_FRAME |
+ MSG_FLAG_NERR |
+ MSG_FLAG_OVERRUN)) {
kvaser_usb_rx_can_err(priv, msg);
return;
} else if (msg->u.rx_can.flag & ~MSG_FLAG_REMOTE_FRAME) {
@@ -834,22 +842,40 @@ static void kvaser_usb_rx_can_msg(const struct kvaser_usb *dev,
return;
}
- cf->can_id = ((msg->u.rx_can.msg[0] & 0x1f) << 6) |
- (msg->u.rx_can.msg[1] & 0x3f);
- cf->can_dlc = get_can_dlc(msg->u.rx_can.msg[5]);
+ if (msg->id == CMD_LOG_MESSAGE) {
+ cf->can_id = le32_to_cpu(msg->u.log_message.id);
+ if (cf->can_id & KVASER_EXTENDED_FRAME)
+ cf->can_id &= CAN_EFF_MASK | CAN_EFF_FLAG;
+ else
+ cf->can_id &= CAN_SFF_MASK;
- if (msg->id == CMD_RX_EXT_MESSAGE) {
- cf->can_id <<= 18;
- cf->can_id |= ((msg->u.rx_can.msg[2] & 0x0f) << 14) |
- ((msg->u.rx_can.msg[3] & 0xff) << 6) |
- (msg->u.rx_can.msg[4] & 0x3f);
- cf->can_id |= CAN_EFF_FLAG;
- }
+ cf->can_dlc = get_can_dlc(msg->u.log_message.dlc);
- if (msg->u.rx_can.flag & MSG_FLAG_REMOTE_FRAME)
- cf->can_id |= CAN_RTR_FLAG;
- else
- memcpy(cf->data, &msg->u.rx_can.msg[6], cf->can_dlc);
+ if (msg->u.log_message.flags & MSG_FLAG_REMOTE_FRAME)
+ cf->can_id |= CAN_RTR_FLAG;
+ else
+ memcpy(cf->data, &msg->u.log_message.data,
+ cf->can_dlc);
+ } else {
+ cf->can_id = ((msg->u.rx_can.msg[0] & 0x1f) << 6) |
+ (msg->u.rx_can.msg[1] & 0x3f);
+
+ if (msg->id == CMD_RX_EXT_MESSAGE) {
+ cf->can_id <<= 18;
+ cf->can_id |= ((msg->u.rx_can.msg[2] & 0x0f) << 14) |
+ ((msg->u.rx_can.msg[3] & 0xff) << 6) |
+ (msg->u.rx_can.msg[4] & 0x3f);
+ cf->can_id |= CAN_EFF_FLAG;
+ }
+
+ cf->can_dlc = get_can_dlc(msg->u.rx_can.msg[5]);
+
+ if (msg->u.rx_can.flag & MSG_FLAG_REMOTE_FRAME)
+ cf->can_id |= CAN_RTR_FLAG;
+ else
+ memcpy(cf->data, &msg->u.rx_can.msg[6],
+ cf->can_dlc);
+ }
netif_rx(skb);
@@ -911,6 +937,7 @@ static void kvaser_usb_handle_message(const struct kvaser_usb *dev,
case CMD_RX_STD_MESSAGE:
case CMD_RX_EXT_MESSAGE:
+ case CMD_LOG_MESSAGE:
kvaser_usb_rx_can_msg(dev, msg);
break;
@@ -919,11 +946,6 @@ static void kvaser_usb_handle_message(const struct kvaser_usb *dev,
kvaser_usb_rx_error(dev, msg);
break;
- case CMD_LOG_MESSAGE:
- if (msg->u.log_message.flags & MSG_FLAG_ERROR_FRAME)
- kvaser_usb_rx_error(dev, msg);
- break;
-
case CMD_TX_ACKNOWLEDGE:
kvaser_usb_tx_acknowledge(dev, msg);
break;
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
index f36ff99fd39..adb4bf5eb4b 100644
--- a/drivers/net/ethernet/3com/3c509.c
+++ b/drivers/net/ethernet/3com/3c509.c
@@ -306,6 +306,7 @@ static int el3_isa_match(struct device *pdev, unsigned int ndev)
if (!dev)
return -ENOMEM;
+ SET_NETDEV_DEV(dev, pdev);
netdev_boot_setup_check(dev);
if (!request_region(ioaddr, EL3_IO_EXTENT, "3c509-isa")) {
@@ -595,6 +596,7 @@ static int __init el3_eisa_probe (struct device *device)
return -ENOMEM;
}
+ SET_NETDEV_DEV(dev, device);
netdev_boot_setup_check(dev);
el3_dev_fill(dev, phys_addr, ioaddr, irq, if_port, EL3_EISA);
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 1928e200158..072c6f14e8f 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -632,7 +632,6 @@ struct vortex_private {
pm_state_valid:1, /* pci_dev->saved_config_space has sane contents */
open:1,
medialock:1,
- must_free_region:1, /* Flag: if zero, Cardbus owns the I/O region */
large_frames:1, /* accept large frames */
handling_irq:1; /* private in_irq indicator */
/* {get|set}_wol operations are already serialized by rtnl.
@@ -951,7 +950,7 @@ static int vortex_eisa_remove(struct device *device)
unregister_netdev(dev);
iowrite16(TotalReset|0x14, ioaddr + EL3_CMD);
- release_region(dev->base_addr, VORTEX_TOTAL_SIZE);
+ release_region(edev->base_addr, VORTEX_TOTAL_SIZE);
free_netdev(dev);
return 0;
@@ -1012,6 +1011,12 @@ static int vortex_init_one(struct pci_dev *pdev,
if (rc < 0)
goto out;
+ rc = pci_request_regions(pdev, DRV_NAME);
+ if (rc < 0) {
+ pci_disable_device(pdev);
+ goto out;
+ }
+
unit = vortex_cards_found;
if (global_use_mmio < 0 && (unit >= MAX_UNITS || use_mmio[unit] < 0)) {
@@ -1027,6 +1032,7 @@ static int vortex_init_one(struct pci_dev *pdev,
if (!ioaddr) /* If mapping fails, fall-back to BAR 0... */
ioaddr = pci_iomap(pdev, 0, 0);
if (!ioaddr) {
+ pci_release_regions(pdev);
pci_disable_device(pdev);
rc = -ENOMEM;
goto out;
@@ -1036,6 +1042,7 @@ static int vortex_init_one(struct pci_dev *pdev,
ent->driver_data, unit);
if (rc < 0) {
pci_iounmap(pdev, ioaddr);
+ pci_release_regions(pdev);
pci_disable_device(pdev);
goto out;
}
@@ -1178,11 +1185,6 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
/* PCI-only startup logic */
if (pdev) {
- /* EISA resources already marked, so only PCI needs to do this here */
- /* Ignore return value, because Cardbus drivers already allocate for us */
- if (request_region(dev->base_addr, vci->io_size, print_name) != NULL)
- vp->must_free_region = 1;
-
/* enable bus-mastering if necessary */
if (vci->flags & PCI_USES_MASTER)
pci_set_master(pdev);
@@ -1220,7 +1222,7 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
&vp->rx_ring_dma);
retval = -ENOMEM;
if (!vp->rx_ring)
- goto free_region;
+ goto free_device;
vp->tx_ring = (struct boom_tx_desc *)(vp->rx_ring + RX_RING_SIZE);
vp->tx_ring_dma = vp->rx_ring_dma + sizeof(struct boom_rx_desc) * RX_RING_SIZE;
@@ -1484,9 +1486,7 @@ free_ring:
+ sizeof(struct boom_tx_desc) * TX_RING_SIZE,
vp->rx_ring,
vp->rx_ring_dma);
-free_region:
- if (vp->must_free_region)
- release_region(dev->base_addr, vci->io_size);
+free_device:
free_netdev(dev);
pr_err(PFX "vortex_probe1 fails. Returns %d\n", retval);
out:
@@ -3254,8 +3254,9 @@ static void vortex_remove_one(struct pci_dev *pdev)
+ sizeof(struct boom_tx_desc) * TX_RING_SIZE,
vp->rx_ring,
vp->rx_ring_dma);
- if (vp->must_free_region)
- release_region(dev->base_addr, vp->io_size);
+
+ pci_release_regions(pdev);
+
free_netdev(dev);
}
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 17a972734ba..6f42e573dad 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -2921,6 +2921,31 @@ static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
return 0;
}
+static bool tg3_phy_power_bug(struct tg3 *tp)
+{
+ switch (tg3_asic_rev(tp)) {
+ case ASIC_REV_5700:
+ case ASIC_REV_5704:
+ return true;
+ case ASIC_REV_5780:
+ if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
+ return true;
+ return false;
+ case ASIC_REV_5717:
+ if (!tp->pci_fn)
+ return true;
+ return false;
+ case ASIC_REV_5719:
+ case ASIC_REV_5720:
+ if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
+ !tp->pci_fn)
+ return true;
+ return false;
+ }
+
+ return false;
+}
+
static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
{
u32 val;
@@ -2977,12 +3002,7 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
/* The PHY should not be powered down on some chips because
* of bugs.
*/
- if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
- tg3_asic_rev(tp) == ASIC_REV_5704 ||
- (tg3_asic_rev(tp) == ASIC_REV_5780 &&
- (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
- (tg3_asic_rev(tp) == ASIC_REV_5717 &&
- !tp->pci_fn))
+ if (tg3_phy_power_bug(tp))
return;
if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
@@ -7058,6 +7078,20 @@ static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
return (base > 0xffffdcc0) && (base + len + 8 < base);
}
+/* Test for TSO DMA buffers that cross into regions which are within MSS bytes
+ * of any 4GB boundaries: 4G, 8G, etc
+ */
+static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
+ u32 len, u32 mss)
+{
+ if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
+ u32 base = (u32) mapping & 0xffffffff;
+
+ return ((base + len + (mss & 0x3fff)) < base);
+ }
+ return 0;
+}
+
/* Test for DMA addresses > 40-bit */
static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
int len)
@@ -7094,6 +7128,9 @@ static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
if (tg3_4g_overflow_test(map, len))
hwbug = true;
+ if (tg3_4g_tso_overflow_test(tp, map, len, mss))
+ hwbug = true;
+
if (tg3_40bit_overflow_test(tp, map, len))
hwbug = true;
@@ -9056,6 +9093,14 @@ static void tg3_rss_write_indir_tbl(struct tg3 *tp)
}
}
+static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
+{
+ if (tg3_asic_rev(tp) == ASIC_REV_5719)
+ return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
+ else
+ return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
+}
+
/* tp->lock is held. */
static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
{
@@ -9735,16 +9780,17 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32_f(RDMAC_MODE, rdmac_mode);
udelay(40);
- if (tg3_asic_rev(tp) == ASIC_REV_5719) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
+ tg3_asic_rev(tp) == ASIC_REV_5720) {
for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
break;
}
if (i < TG3_NUM_RDMA_CHANNELS) {
val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
- val |= TG3_LSO_RD_DMA_TX_LENGTH_WA;
+ val |= tg3_lso_rd_dma_workaround_bit(tp);
tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
- tg3_flag_set(tp, 5719_RDMA_BUG);
+ tg3_flag_set(tp, 5719_5720_RDMA_BUG);
}
}
@@ -10101,15 +10147,15 @@ static void tg3_periodic_fetch_stats(struct tg3 *tp)
TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
- if (unlikely(tg3_flag(tp, 5719_RDMA_BUG) &&
+ if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
(sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
u32 val;
val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
- val &= ~TG3_LSO_RD_DMA_TX_LENGTH_WA;
+ val &= ~tg3_lso_rd_dma_workaround_bit(tp);
tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
- tg3_flag_clear(tp, 5719_RDMA_BUG);
+ tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
}
TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 8d7d4c2ab5d..25309bfbc19 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -1422,7 +1422,8 @@
#define TG3_LSO_RD_DMA_CRPTEN_CTRL 0x00004910
#define TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K 0x00030000
#define TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K 0x000c0000
-#define TG3_LSO_RD_DMA_TX_LENGTH_WA 0x02000000
+#define TG3_LSO_RD_DMA_TX_LENGTH_WA_5719 0x02000000
+#define TG3_LSO_RD_DMA_TX_LENGTH_WA_5720 0x00200000
/* 0x4914 --> 0x4be0 unused */
#define TG3_NUM_RDMA_CHANNELS 4
@@ -3043,7 +3044,7 @@ enum TG3_FLAGS {
TG3_FLAG_APE_HAS_NCSI,
TG3_FLAG_TX_TSTAMP_EN,
TG3_FLAG_4K_FIFO_LIMIT,
- TG3_FLAG_5719_RDMA_BUG,
+ TG3_FLAG_5719_5720_RDMA_BUG,
TG3_FLAG_RESET_TASK_PENDING,
TG3_FLAG_PTP_CAPABLE,
TG3_FLAG_5705_PLUS,
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
index 2e5daee0438..a3f8a2551f2 100644
--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
@@ -127,7 +127,6 @@ struct gianfar_ptp_registers {
#define DRIVER "gianfar_ptp"
#define DEFAULT_CKSEL 1
-#define N_ALARM 1 /* first alarm is used internally to reset fipers */
#define N_EXT_TS 2
#define REG_SIZE sizeof(struct gianfar_ptp_registers)
@@ -410,7 +409,7 @@ static struct ptp_clock_info ptp_gianfar_caps = {
.owner = THIS_MODULE,
.name = "gianfar clock",
.max_adj = 512000,
- .n_alarm = N_ALARM,
+ .n_alarm = 0,
.n_ext_ts = N_EXT_TS,
.n_per_out = 0,
.pps = 1,
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index c859771a990..f46dbef0727 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1324,7 +1324,7 @@ static const struct net_device_ops ibmveth_netdev_ops = {
static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
{
- int rc, i;
+ int rc, i, mac_len;
struct net_device *netdev;
struct ibmveth_adapter *adapter;
unsigned char *mac_addr_p;
@@ -1334,11 +1334,19 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
dev->unit_address);
mac_addr_p = (unsigned char *)vio_get_attribute(dev, VETH_MAC_ADDR,
- NULL);
+ &mac_len);
if (!mac_addr_p) {
dev_err(&dev->dev, "Can't find VETH_MAC_ADDR attribute\n");
return -EINVAL;
}
+ /* Workaround for old/broken pHyp */
+ if (mac_len == 8)
+ mac_addr_p += 2;
+ else if (mac_len != 6) {
+ dev_err(&dev->dev, "VETH_MAC_ADDR attribute wrong len %d\n",
+ mac_len);
+ return -EINVAL;
+ }
mcastFilterSize_p = (unsigned int *)vio_get_attribute(dev,
VETH_MCAST_FILTER_SIZE, NULL);
@@ -1363,17 +1371,6 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
- /*
- * Some older boxes running PHYP non-natively have an OF that returns
- * a 8-byte local-mac-address field (and the first 2 bytes have to be
- * ignored) while newer boxes' OF return a 6-byte field. Note that
- * IEEE 1275 specifies that local-mac-address must be a 6-byte field.
- * The RPA doc specifies that the first byte must be 10b, so we'll
- * just look for it to solve this 8 vs. 6 byte field issue
- */
- if ((*mac_addr_p & 0x3) != 0x02)
- mac_addr_p += 2;
-
adapter->mac_addr = 0;
memcpy(&adapter->mac_addr, mac_addr_p, 6);
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index b477fa53ec9..065f8c80d4f 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -145,8 +145,7 @@ static int e1000e_phc_settime(struct ptp_clock_info *ptp,
unsigned long flags;
u64 ns;
- ns = ts->tv_sec * NSEC_PER_SEC;
- ns += ts->tv_nsec;
+ ns = timespec_to_ns(ts);
/* reset the timecounter */
spin_lock_irqsave(&adapter->systim_lock, flags);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 79f4a26ea6c..a892efd9c76 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -2405,6 +2405,16 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data)
* with the write to EICR.
*/
eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
+
+ /* The lower 16bits of the EICR register are for the queue interrupts
+ * which should be masked here in order to not accidently clear them if
+ * the bits are high when ixgbe_msix_other is called. There is a race
+ * condition otherwise which results in possible performance loss
+ * especially if the ixgbe_msix_other interrupt is triggering
+ * consistently (as it would when PPS is turned on for the X540 device)
+ */
+ eicr &= 0xFFFF0000;
+
IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
if (eicr & IXGBE_EICR_LSC)
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 4ecbe64a758..15ba8c47d79 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -5787,6 +5787,14 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
goto err_stop_0;
}
+ /* 8168evl does not automatically pad to minimum length. */
+ if (unlikely(tp->mac_version == RTL_GIGA_MAC_VER_34 &&
+ skb->len < ETH_ZLEN)) {
+ if (skb_padto(skb, ETH_ZLEN))
+ goto err_update_stats;
+ skb_put(skb, ETH_ZLEN - skb->len);
+ }
+
if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
goto err_stop_0;
@@ -5858,6 +5866,7 @@ err_dma_1:
rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd);
err_dma_0:
dev_kfree_skb(skb);
+err_update_stats:
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 0095ce95150..97dd8f18c00 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -667,7 +667,7 @@ fail:
int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
u16 *fw_subtype_list, u32 *capabilities)
{
- uint8_t outbuf[MC_CMD_GET_BOARD_CFG_OUT_LENMIN];
+ uint8_t outbuf[MC_CMD_GET_BOARD_CFG_OUT_LENMAX];
size_t outlen, offset, i;
int port_num = efx_port_num(efx);
int rc;
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index 66e025ad5df..f3c2d034b32 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -930,7 +930,7 @@ static int tile_net_setup_interrupts(struct net_device *dev)
if (info->has_iqueue) {
gxio_mpipe_request_notif_ring_interrupt(
&context, cpu_x(cpu), cpu_y(cpu),
- 1, ingress_irq, info->iqueue.ring);
+ KERNEL_PL, ingress_irq, info->iqueue.ring);
}
}
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 73abbc1655d..011062ed595 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -222,7 +222,8 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
}
if (port->passthru)
- vlan = list_first_entry(&port->vlans, struct macvlan_dev, list);
+ vlan = list_first_or_null_rcu(&port->vlans,
+ struct macvlan_dev, list);
else
vlan = macvlan_hash_lookup(port, eth->h_dest);
if (vlan == NULL)
@@ -807,7 +808,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
if (err < 0)
goto upper_dev_unlink;
- list_add_tail(&vlan->list, &port->vlans);
+ list_add_tail_rcu(&vlan->list, &port->vlans);
netif_stacked_transfer_operstate(lowerdev, dev);
return 0;
@@ -835,7 +836,7 @@ void macvlan_dellink(struct net_device *dev, struct list_head *head)
{
struct macvlan_dev *vlan = netdev_priv(dev);
- list_del(&vlan->list);
+ list_del_rcu(&vlan->list);
unregister_netdevice_queue(dev, head);
netdev_upper_dev_unlink(vlan->lowerdev, dev);
}
diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c
index ed947dd76fb..f3cdf64997d 100644
--- a/drivers/net/ntb_netdev.c
+++ b/drivers/net/ntb_netdev.c
@@ -375,6 +375,8 @@ static void ntb_netdev_remove(struct pci_dev *pdev)
if (dev == NULL)
return;
+ list_del(&dev->list);
+
ndev = dev->ndev;
unregister_netdev(ndev);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index f178d29db01..f7787008b56 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1471,14 +1471,17 @@ static int tun_recvmsg(struct kiocb *iocb, struct socket *sock,
if (!tun)
return -EBADFD;
- if (flags & ~(MSG_DONTWAIT|MSG_TRUNC))
- return -EINVAL;
+ if (flags & ~(MSG_DONTWAIT|MSG_TRUNC)) {
+ ret = -EINVAL;
+ goto out;
+ }
ret = tun_do_read(tun, tfile, iocb, m->msg_iov, total_len,
flags & MSG_DONTWAIT);
if (ret > total_len) {
m->msg_flags |= MSG_TRUNC;
ret = flags & MSG_TRUNC ? ret : total_len;
}
+out:
tun_put(tun);
return ret;
}
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index f7f623a5390..577c72d5f36 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -100,6 +100,9 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
netdev_err(dev->net, "asix_rx_fixup() Bad RX Length %d\n",
rx->size);
kfree_skb(rx->ax_skb);
+ rx->ax_skb = NULL;
+ rx->size = 0U;
+
return 0;
}
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 57136dc1b88..299c53ba4e0 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -615,6 +615,13 @@ static const struct usb_device_id products [] = {
.driver_info = 0,
},
+/* Dell Wireless 5804 (Novatel E371) - handled by qmi_wwan */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, 0x819b, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
/* AnyDATA ADU960S - handled by qmi_wwan */
{
USB_DEVICE_AND_INTERFACE_INFO(0x16d5, 0x650a, USB_CLASS_COMM,
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 2a3579f6791..a7cafe4a139 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -496,6 +496,13 @@ static const struct usb_device_id products[] = {
USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&qmi_wwan_info,
},
+ { /* Dell Wireless 5804 (Novatel E371) */
+ USB_DEVICE_AND_INTERFACE_INFO(0x413C, 0x819b,
+ USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET,
+ USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&qmi_wwan_info,
+ },
{ /* ADU960S */
USB_DEVICE_AND_INTERFACE_INFO(0x16d5, 0x650a,
USB_CLASS_COMM,
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index f76c3ca07a4..21fa26735d8 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -965,7 +965,7 @@ static void ar9003_hw_do_manual_peak_cal(struct ath_hw *ah,
{
int i;
- if (!AR_SREV_9462(ah) && !AR_SREV_9565(ah))
+ if (!AR_SREV_9462(ah) && !AR_SREV_9565(ah) && !AR_SREV_9485(ah))
return;
for (i = 0; i < AR9300_MAX_CHAINS; i++) {
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
index 54ba42f4108..874f6570bd1 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
@@ -68,13 +68,16 @@
#define AR9300_BASE_ADDR 0x3ff
#define AR9300_BASE_ADDR_512 0x1ff
-#define AR9300_OTP_BASE (AR_SREV_9340(ah) ? 0x30000 : 0x14000)
-#define AR9300_OTP_STATUS (AR_SREV_9340(ah) ? 0x30018 : 0x15f18)
+#define AR9300_OTP_BASE \
+ ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x30000 : 0x14000)
+#define AR9300_OTP_STATUS \
+ ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x30018 : 0x15f18)
#define AR9300_OTP_STATUS_TYPE 0x7
#define AR9300_OTP_STATUS_VALID 0x4
#define AR9300_OTP_STATUS_ACCESS_BUSY 0x2
#define AR9300_OTP_STATUS_SM_BUSY 0x1
-#define AR9300_OTP_READ_DATA (AR_SREV_9340(ah) ? 0x3001c : 0x15f1c)
+#define AR9300_OTP_READ_DATA \
+ ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x3001c : 0x15f1c)
enum targetPowerHTRates {
HT_TARGET_RATE_0_8_16,
diff --git a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
index 712f415b8c0..88ff1d7b53a 100644
--- a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
@@ -1020,7 +1020,7 @@ static const u32 ar9485_1_1_baseband_postamble[][5] = {
{0x0000a284, 0x00000000, 0x00000000, 0x000002a0, 0x000002a0},
{0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+ {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00058d18, 0x00058d18},
{0x0000a2d0, 0x00071981, 0x00071981, 0x00071982, 0x00071982},
{0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
{0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 3714b971d18..f25a3200d96 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -2003,6 +2003,14 @@ void ath9k_get_et_stats(struct ieee80211_hw *hw,
WARN_ON(i != ATH9K_SSTATS_LEN);
}
+void ath9k_deinit_debug(struct ath_softc *sc)
+{
+ if (config_enabled(CONFIG_ATH9K_DEBUGFS) && sc->rfs_chan_spec_scan) {
+ relay_close(sc->rfs_chan_spec_scan);
+ sc->rfs_chan_spec_scan = NULL;
+ }
+}
+
int ath9k_init_debug(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 410d6d8f1aa..f939457f807 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -302,6 +302,7 @@ struct ath9k_debug {
};
int ath9k_init_debug(struct ath_hw *ah);
+void ath9k_deinit_debug(struct ath_softc *sc);
void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status);
void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
@@ -337,6 +338,10 @@ static inline int ath9k_init_debug(struct ath_hw *ah)
return 0;
}
+static inline void ath9k_deinit_debug(struct ath_softc *sc)
+{
+}
+
static inline void ath_debug_stat_interrupt(struct ath_softc *sc,
enum ath9k_int status)
{
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index af932c9444d..26db547a620 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -885,7 +885,7 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc,
if (!ath_is_world_regd(reg)) {
error = regulatory_hint(hw->wiphy, reg->alpha2);
if (error)
- goto unregister;
+ goto debug_cleanup;
}
ath_init_leds(sc);
@@ -893,6 +893,8 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc,
return 0;
+debug_cleanup:
+ ath9k_deinit_debug(sc);
unregister:
ieee80211_unregister_hw(hw);
rx_cleanup:
@@ -921,11 +923,6 @@ static void ath9k_deinit_softc(struct ath_softc *sc)
sc->dfs_detector->exit(sc->dfs_detector);
ath9k_eeprom_release(sc);
-
- if (config_enabled(CONFIG_ATH9K_DEBUGFS) && sc->rfs_chan_spec_scan) {
- relay_close(sc->rfs_chan_spec_scan);
- sc->rfs_chan_spec_scan = NULL;
- }
}
void ath9k_deinit_device(struct ath_softc *sc)
@@ -939,6 +936,7 @@ void ath9k_deinit_device(struct ath_softc *sc)
ath9k_ps_restore(sc);
+ ath9k_deinit_debug(sc);
ieee80211_unregister_hw(hw);
ath_rx_cleanup(sc);
ath9k_deinit_softc(sc);
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 988372d218a..e509c370f83 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -1308,6 +1308,7 @@ static int ath9k_sta_add(struct ieee80211_hw *hw,
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_node *an = (struct ath_node *) sta->drv_priv;
struct ieee80211_key_conf ps_key = { };
+ int key;
ath_node_attach(sc, sta, vif);
@@ -1315,7 +1316,9 @@ static int ath9k_sta_add(struct ieee80211_hw *hw,
vif->type != NL80211_IFTYPE_AP_VLAN)
return 0;
- an->ps_key = ath_key_config(common, vif, sta, &ps_key);
+ key = ath_key_config(common, vif, sta, &ps_key);
+ if (key > 0)
+ an->ps_key = key;
return 0;
}
@@ -1332,6 +1335,7 @@ static void ath9k_del_ps_key(struct ath_softc *sc,
return;
ath_key_delete(common, &ps_key);
+ an->ps_key = 0;
}
static int ath9k_sta_remove(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index 122146943bf..ee3d6403c79 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -1733,6 +1733,25 @@ drop_recycle_buffer:
sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize);
}
+void b43_dma_handle_rx_overflow(struct b43_dmaring *ring)
+{
+ int current_slot, previous_slot;
+
+ B43_WARN_ON(ring->tx);
+
+ /* Device has filled all buffers, drop all packets and let TCP
+ * decrease speed.
+ * Decrement RX index by one will let the device to see all slots
+ * as free again
+ */
+ /*
+ *TODO: How to increase rx_drop in mac80211?
+ */
+ current_slot = ring->ops->get_current_rxslot(ring);
+ previous_slot = prev_slot(ring, current_slot);
+ ring->ops->set_current_rxslot(ring, previous_slot);
+}
+
void b43_dma_rx(struct b43_dmaring *ring)
{
const struct b43_dma_ops *ops = ring->ops;
diff --git a/drivers/net/wireless/b43/dma.h b/drivers/net/wireless/b43/dma.h
index 9fdd1983079..df8c8cdcbdb 100644
--- a/drivers/net/wireless/b43/dma.h
+++ b/drivers/net/wireless/b43/dma.h
@@ -9,7 +9,7 @@
/* DMA-Interrupt reasons. */
#define B43_DMAIRQ_FATALMASK ((1 << 10) | (1 << 11) | (1 << 12) \
| (1 << 14) | (1 << 15))
-#define B43_DMAIRQ_NONFATALMASK (1 << 13)
+#define B43_DMAIRQ_RDESC_UFLOW (1 << 13)
#define B43_DMAIRQ_RX_DONE (1 << 16)
/*** 32-bit DMA Engine. ***/
@@ -295,6 +295,8 @@ int b43_dma_tx(struct b43_wldev *dev,
void b43_dma_handle_txstatus(struct b43_wldev *dev,
const struct b43_txstatus *status);
+void b43_dma_handle_rx_overflow(struct b43_dmaring *ring);
+
void b43_dma_rx(struct b43_dmaring *ring);
void b43_dma_direct_fifo_rx(struct b43_wldev *dev,
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 05682736e46..64b637a8b5b 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -1895,30 +1895,18 @@ static void b43_do_interrupt_thread(struct b43_wldev *dev)
}
}
- if (unlikely(merged_dma_reason & (B43_DMAIRQ_FATALMASK |
- B43_DMAIRQ_NONFATALMASK))) {
- if (merged_dma_reason & B43_DMAIRQ_FATALMASK) {
- b43err(dev->wl, "Fatal DMA error: "
- "0x%08X, 0x%08X, 0x%08X, "
- "0x%08X, 0x%08X, 0x%08X\n",
- dma_reason[0], dma_reason[1],
- dma_reason[2], dma_reason[3],
- dma_reason[4], dma_reason[5]);
- b43err(dev->wl, "This device does not support DMA "
+ if (unlikely(merged_dma_reason & (B43_DMAIRQ_FATALMASK))) {
+ b43err(dev->wl,
+ "Fatal DMA error: 0x%08X, 0x%08X, 0x%08X, 0x%08X, 0x%08X, 0x%08X\n",
+ dma_reason[0], dma_reason[1],
+ dma_reason[2], dma_reason[3],
+ dma_reason[4], dma_reason[5]);
+ b43err(dev->wl, "This device does not support DMA "
"on your system. It will now be switched to PIO.\n");
- /* Fall back to PIO transfers if we get fatal DMA errors! */
- dev->use_pio = true;
- b43_controller_restart(dev, "DMA error");
- return;
- }
- if (merged_dma_reason & B43_DMAIRQ_NONFATALMASK) {
- b43err(dev->wl, "DMA error: "
- "0x%08X, 0x%08X, 0x%08X, "
- "0x%08X, 0x%08X, 0x%08X\n",
- dma_reason[0], dma_reason[1],
- dma_reason[2], dma_reason[3],
- dma_reason[4], dma_reason[5]);
- }
+ /* Fall back to PIO transfers if we get fatal DMA errors! */
+ dev->use_pio = true;
+ b43_controller_restart(dev, "DMA error");
+ return;
}
if (unlikely(reason & B43_IRQ_UCODE_DEBUG))
@@ -1937,6 +1925,11 @@ static void b43_do_interrupt_thread(struct b43_wldev *dev)
handle_irq_noise(dev);
/* Check the DMA reason registers for received data. */
+ if (dma_reason[0] & B43_DMAIRQ_RDESC_UFLOW) {
+ if (B43_DEBUG)
+ b43warn(dev->wl, "RX descriptor underrun\n");
+ b43_dma_handle_rx_overflow(dev->dma.rx_ring);
+ }
if (dma_reason[0] & B43_DMAIRQ_RX_DONE) {
if (b43_using_pio_transfers(dev))
b43_pio_rx(dev->pio.rx_queue);
@@ -1994,7 +1987,7 @@ static irqreturn_t b43_do_interrupt(struct b43_wldev *dev)
return IRQ_NONE;
dev->dma_reason[0] = b43_read32(dev, B43_MMIO_DMA0_REASON)
- & 0x0001DC00;
+ & 0x0001FC00;
dev->dma_reason[1] = b43_read32(dev, B43_MMIO_DMA1_REASON)
& 0x0000DC00;
dev->dma_reason[2] = b43_read32(dev, B43_MMIO_DMA2_REASON)
@@ -3126,7 +3119,7 @@ static int b43_chip_init(struct b43_wldev *dev)
b43_write32(dev, 0x018C, 0x02000000);
}
b43_write32(dev, B43_MMIO_GEN_IRQ_REASON, 0x00004000);
- b43_write32(dev, B43_MMIO_DMA0_IRQ_MASK, 0x0001DC00);
+ b43_write32(dev, B43_MMIO_DMA0_IRQ_MASK, 0x0001FC00);
b43_write32(dev, B43_MMIO_DMA1_IRQ_MASK, 0x0000DC00);
b43_write32(dev, B43_MMIO_DMA2_IRQ_MASK, 0x0000DC00);
b43_write32(dev, B43_MMIO_DMA3_IRQ_MASK, 0x0001DC00);
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index 7941eb3a016..cbaa777dc03 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -5740,8 +5740,7 @@ il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length)
hw->flags =
IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_AMPDU_AGGREGATION |
IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC | IEEE80211_HW_SPECTRUM_MGMT |
- IEEE80211_HW_REPORTS_TX_ACK_STATUS | IEEE80211_HW_SUPPORTS_PS |
- IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
+ IEEE80211_HW_SUPPORTS_PS | IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
if (il->cfg->sku & IL_SKU_N)
hw->flags |=
IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
index 20806cae11b..81d4071130c 100644
--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
@@ -2237,15 +2237,15 @@ static ssize_t iwl_dbgfs_log_event_read(struct file *file,
size_t count, loff_t *ppos)
{
struct iwl_priv *priv = file->private_data;
- char *buf;
- int pos = 0;
- ssize_t ret = -ENOMEM;
+ char *buf = NULL;
+ ssize_t ret;
- ret = pos = iwl_dump_nic_event_log(priv, true, &buf, true);
- if (buf) {
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- }
+ ret = iwl_dump_nic_event_log(priv, true, &buf, true);
+ if (ret < 0)
+ goto err;
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
+err:
+ kfree(buf);
return ret;
}
diff --git a/drivers/net/wireless/iwlwifi/dvm/sta.c b/drivers/net/wireless/iwlwifi/dvm/sta.c
index b775769f832..c3c13ce96eb 100644
--- a/drivers/net/wireless/iwlwifi/dvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/dvm/sta.c
@@ -695,6 +695,7 @@ void iwl_clear_ucode_stations(struct iwl_priv *priv,
void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
{
struct iwl_addsta_cmd sta_cmd;
+ static const struct iwl_link_quality_cmd zero_lq = {};
struct iwl_link_quality_cmd lq;
int i;
bool found = false;
@@ -733,7 +734,9 @@ void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
else
memcpy(&lq, priv->stations[i].lq,
sizeof(struct iwl_link_quality_cmd));
- send_lq = true;
+
+ if (memcmp(&lq, &zero_lq, sizeof(lq)))
+ send_lq = true;
}
spin_unlock_bh(&priv->sta_lock);
ret = iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
index 2adb61f103f..44643823a10 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
@@ -165,6 +165,8 @@ enum {
REPLY_DEBUG_CMD = 0xf0,
DEBUG_LOG_MSG = 0xf7,
+ MCAST_FILTER_CMD = 0xd0,
+
/* D3 commands/notifications */
D3_CONFIG_CMD = 0xd3,
PROT_OFFLOAD_CONFIG_CMD = 0xd4,
@@ -951,4 +953,29 @@ struct iwl_set_calib_default_cmd {
u8 data[0];
} __packed; /* PHY_CALIB_OVERRIDE_VALUES_S */
+#define MAX_PORT_ID_NUM 2
+
+/**
+ * struct iwl_mcast_filter_cmd - configure multicast filter.
+ * @filter_own: Set 1 to filter out multicast packets sent by station itself
+ * @port_id: Multicast MAC addresses array specifier. This is a strange way
+ * to identify network interface adopted in host-device IF.
+ * It is used by FW as index in array of addresses. This array has
+ * MAX_PORT_ID_NUM members.
+ * @count: Number of MAC addresses in the array
+ * @pass_all: Set 1 to pass all multicast packets.
+ * @bssid: current association BSSID.
+ * @addr_list: Place holder for array of MAC addresses.
+ * IMPORTANT: add padding if necessary to ensure DWORD alignment.
+ */
+struct iwl_mcast_filter_cmd {
+ u8 filter_own;
+ u8 port_id;
+ u8 count;
+ u8 pass_all;
+ u8 bssid[6];
+ u8 reserved[2];
+ u8 addr_list[0];
+} __packed; /* MCAST_FILTERING_CMD_API_S_VER_1 */
+
#endif /* __fw_api_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
index 341dbc0237e..bf76b17b257 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
@@ -586,10 +586,12 @@ static int iwl_mvm_mac_ctxt_send_cmd(struct iwl_mvm *mvm,
*/
static void iwl_mvm_mac_ctxt_cmd_fill_sta(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
- struct iwl_mac_data_sta *ctxt_sta)
+ struct iwl_mac_data_sta *ctxt_sta,
+ bool force_assoc_off)
{
/* We need the dtim_period to set the MAC as associated */
- if (vif->bss_conf.assoc && vif->bss_conf.dtim_period) {
+ if (vif->bss_conf.assoc && vif->bss_conf.dtim_period &&
+ !force_assoc_off) {
u32 dtim_offs;
/*
@@ -652,7 +654,8 @@ static int iwl_mvm_mac_ctxt_cmd_station(struct iwl_mvm *mvm,
iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
/* Fill the data specific for station mode */
- iwl_mvm_mac_ctxt_cmd_fill_sta(mvm, vif, &cmd.sta);
+ iwl_mvm_mac_ctxt_cmd_fill_sta(mvm, vif, &cmd.sta,
+ action == FW_CTXT_ACTION_ADD);
return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
}
@@ -669,7 +672,8 @@ static int iwl_mvm_mac_ctxt_cmd_p2p_client(struct iwl_mvm *mvm,
iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
/* Fill the data specific for station mode */
- iwl_mvm_mac_ctxt_cmd_fill_sta(mvm, vif, &cmd.p2p_sta.sta);
+ iwl_mvm_mac_ctxt_cmd_fill_sta(mvm, vif, &cmd.p2p_sta.sta,
+ action == FW_CTXT_ACTION_ADD);
cmd.p2p_sta.ctwin = cpu_to_le32(vif->bss_conf.p2p_ctwindow);
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index 7e169b085af..8572358e568 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -82,15 +82,6 @@ static const struct ieee80211_iface_limit iwl_mvm_limits[] = {
.types = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP),
},
- {
- .max = 1,
- .types = BIT(NL80211_IFTYPE_P2P_CLIENT) |
- BIT(NL80211_IFTYPE_P2P_GO),
- },
- {
- .max = 1,
- .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
- },
};
static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = {
@@ -136,10 +127,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->chanctx_data_size = sizeof(struct iwl_mvm_phy_ctxt);
hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_P2P_CLIENT) |
- BIT(NL80211_IFTYPE_AP) |
- BIT(NL80211_IFTYPE_P2P_GO) |
- BIT(NL80211_IFTYPE_P2P_DEVICE);
+ BIT(NL80211_IFTYPE_AP);
hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
WIPHY_FLAG_DISABLE_BEACON_HINTS |
@@ -657,6 +645,20 @@ static void iwl_mvm_configure_filter(struct ieee80211_hw *hw,
*total_flags = 0;
}
+static int iwl_mvm_configure_mcast_filter(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mcast_filter_cmd mcast_filter_cmd = {
+ .pass_all = 1,
+ };
+
+ memcpy(mcast_filter_cmd.bssid, vif->bss_conf.bssid, ETH_ALEN);
+
+ return iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_SYNC,
+ sizeof(mcast_filter_cmd),
+ &mcast_filter_cmd);
+}
+
static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
@@ -677,6 +679,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
IWL_ERR(mvm, "failed to update quotas\n");
return;
}
+ iwl_mvm_configure_mcast_filter(mvm, vif);
} else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
/* remove AP station now that the MAC is unassoc */
ret = iwl_mvm_rm_sta_id(mvm, vif, mvmvif->ap_sta_id);
@@ -886,7 +889,7 @@ static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
switch (cmd) {
case STA_NOTIFY_SLEEP:
- if (atomic_read(&mvmsta->pending_frames) > 0)
+ if (atomic_read(&mvm->pending_frames[mvmsta->sta_id]) > 0)
ieee80211_sta_block_awake(hw, sta, true);
/*
* The fw updates the STA to be asleep. Tx packets on the Tx
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index bdae700c769..dc59ef56880 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -293,6 +293,7 @@ struct iwl_mvm {
struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT];
struct work_struct sta_drained_wk;
unsigned long sta_drained[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)];
+ atomic_t pending_frames[IWL_MVM_STATION_COUNT];
/* configured by mac80211 */
u32 rts_threshold;
diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
index d0f9c1e0475..ddac83322d2 100644
--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
@@ -293,6 +293,7 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = {
CMD(NET_DETECT_PROFILES_CMD),
CMD(NET_DETECT_HOTSPOTS_CMD),
CMD(NET_DETECT_HOTSPOTS_QUERY_CMD),
+ CMD(MCAST_FILTER_CMD),
};
#undef CMD
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index 9b21b92aa8d..5c52faa54b3 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -298,6 +298,12 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
else
cmd->type = cpu_to_le32(SCAN_TYPE_FORCED);
+ /*
+ * TODO: This is a WA due to a bug in the FW AUX framework that does not
+ * properly handle time events that fail to be scheduled
+ */
+ cmd->type = cpu_to_le32(SCAN_TYPE_FORCED);
+
cmd->repeats = cpu_to_le32(1);
/*
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
index 274f44e2ef6..7b8644e75a1 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -172,7 +172,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
/* HW restart, don't assume the memory has been zeroed */
- atomic_set(&mvm_sta->pending_frames, 0);
+ atomic_set(&mvm->pending_frames[sta_id], 0);
mvm_sta->tid_disable_agg = 0;
mvm_sta->tfd_queue_msk = 0;
for (i = 0; i < IEEE80211_NUM_ACS; i++)
@@ -360,14 +360,21 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
}
/*
+ * Make sure that the tx response code sees the station as -EBUSY and
+ * calls the drain worker.
+ */
+ spin_lock_bh(&mvm_sta->lock);
+ /*
* There are frames pending on the AC queues for this station.
* We need to wait until all the frames are drained...
*/
- if (atomic_read(&mvm_sta->pending_frames)) {
- ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
+ if (atomic_read(&mvm->pending_frames[mvm_sta->sta_id])) {
rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id],
ERR_PTR(-EBUSY));
+ spin_unlock_bh(&mvm_sta->lock);
+ ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
} else {
+ spin_unlock_bh(&mvm_sta->lock);
ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.h b/drivers/net/wireless/iwlwifi/mvm/sta.h
index 896f88ac814..2dbf7ba7469 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.h
@@ -273,7 +273,6 @@ struct iwl_mvm_tid_data {
* @max_agg_bufsize: the maximal size of the AGG buffer for this station
* @lock: lock to protect the whole struct. Since %tid_data is access from Tx
* and from Tx response flow, it needs a spinlock.
- * @pending_frames: number of frames for this STA on the shared Tx queues.
* @tid_data: per tid data. Look at %iwl_mvm_tid_data.
*
* When mac80211 creates a station it reserves some space (hw->sta_data_size)
@@ -288,7 +287,6 @@ struct iwl_mvm_sta {
u16 tid_disable_agg;
u8 max_agg_bufsize;
spinlock_t lock;
- atomic_t pending_frames;
struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT];
struct iwl_lq_sta lq_sta;
struct ieee80211_vif *vif;
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index 6645efe5c03..44f26f475f1 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -416,9 +416,8 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
spin_unlock(&mvmsta->lock);
- if (mvmsta->vif->type == NL80211_IFTYPE_AP &&
- txq_id < IWL_FIRST_AMPDU_QUEUE)
- atomic_inc(&mvmsta->pending_frames);
+ if (txq_id < IWL_FIRST_AMPDU_QUEUE)
+ atomic_inc(&mvm->pending_frames[mvmsta->sta_id]);
return 0;
@@ -678,16 +677,41 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
/*
* If the txq is not an AMPDU queue, there is no chance we freed
* several skbs. Check that out...
- * If there are no pending frames for this STA, notify mac80211 that
- * this station can go to sleep in its STA table.
*/
- if (txq_id < IWL_FIRST_AMPDU_QUEUE && mvmsta &&
- !WARN_ON(skb_freed > 1) &&
- mvmsta->vif->type == NL80211_IFTYPE_AP &&
- atomic_sub_and_test(skb_freed, &mvmsta->pending_frames)) {
- ieee80211_sta_block_awake(mvm->hw, sta, false);
- set_bit(sta_id, mvm->sta_drained);
- schedule_work(&mvm->sta_drained_wk);
+ if (txq_id < IWL_FIRST_AMPDU_QUEUE && !WARN_ON(skb_freed > 1) &&
+ atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id])) {
+ if (mvmsta) {
+ /*
+ * If there are no pending frames for this STA, notify
+ * mac80211 that this station can go to sleep in its
+ * STA table.
+ */
+ if (mvmsta->vif->type == NL80211_IFTYPE_AP)
+ ieee80211_sta_block_awake(mvm->hw, sta, false);
+ /*
+ * We might very well have taken mvmsta pointer while
+ * the station was being removed. The remove flow might
+ * have seen a pending_frame (because we didn't take
+ * the lock) even if now the queues are drained. So make
+ * really sure now that this the station is not being
+ * removed. If it is, run the drain worker to remove it.
+ */
+ spin_lock_bh(&mvmsta->lock);
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+ if (IS_ERR_OR_NULL(sta)) {
+ /*
+ * Station disappeared in the meantime:
+ * so we are draining.
+ */
+ set_bit(sta_id, mvm->sta_drained);
+ schedule_work(&mvm->sta_drained_wk);
+ }
+ spin_unlock_bh(&mvmsta->lock);
+ } else if (!mvmsta) {
+ /* Tx response without STA, so we are draining */
+ set_bit(sta_id, mvm->sta_drained);
+ schedule_work(&mvm->sta_drained_wk);
+ }
}
rcu_read_unlock();
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index cffdf4fbf16..2b49f48d8d8 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -2118,7 +2118,6 @@ static const struct ieee80211_iface_limit hwsim_if_limits[] = {
#endif
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_P2P_GO) },
- { .max = 1, .types = BIT(NL80211_IFTYPE_P2P_DEVICE) },
};
static struct ieee80211_iface_combination hwsim_if_comb = {
@@ -2230,8 +2229,7 @@ static int __init init_mac80211_hwsim(void)
BIT(NL80211_IFTYPE_P2P_CLIENT) |
BIT(NL80211_IFTYPE_P2P_GO) |
BIT(NL80211_IFTYPE_ADHOC) |
- BIT(NL80211_IFTYPE_MESH_POINT) |
- BIT(NL80211_IFTYPE_P2P_DEVICE);
+ BIT(NL80211_IFTYPE_MESH_POINT);
hw->flags = IEEE80211_HW_MFP_CAPABLE |
IEEE80211_HW_SIGNAL_DBM |
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index 8aaf56ade4d..c13f6e9e52f 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -2280,9 +2280,6 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
if (wdev->netdev->reg_state == NETREG_REGISTERED)
unregister_netdevice(wdev->netdev);
- if (wdev->netdev->reg_state == NETREG_UNREGISTERED)
- free_netdev(wdev->netdev);
-
/* Clear the priv in adapter */
priv->netdev = NULL;
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index b5c8b962ce1..aeade10b9d5 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -1176,6 +1176,7 @@ mwifiex_process_hs_config(struct mwifiex_adapter *adapter)
adapter->if_ops.wakeup(adapter);
adapter->hs_activated = false;
adapter->is_hs_configured = false;
+ adapter->is_suspended = false;
mwifiex_hs_activated_event(mwifiex_get_priv(adapter,
MWIFIEX_BSS_ROLE_ANY),
false);
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 9c802ede9c3..6d9bc63d6dd 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -646,6 +646,7 @@ void mwifiex_init_priv_params(struct mwifiex_private *priv,
struct net_device *dev)
{
dev->netdev_ops = &mwifiex_netdev_ops;
+ dev->destructor = free_netdev;
/* Initialize private structure */
priv->current_key_index = 0;
priv->media_connected = false;
diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
index feb20461339..f024fb072dd 100644
--- a/drivers/net/wireless/mwifiex/pcie.c
+++ b/drivers/net/wireless/mwifiex/pcie.c
@@ -2281,9 +2281,9 @@ static void mwifiex_pcie_cleanup(struct mwifiex_adapter *adapter)
if (pdev) {
pci_iounmap(pdev, card->pci_mmap);
pci_iounmap(pdev, card->pci_mmap1);
-
- pci_release_regions(pdev);
pci_disable_device(pdev);
+ pci_release_region(pdev, 2);
+ pci_release_region(pdev, 0);
pci_set_drvdata(pdev, NULL);
}
}
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index 13100f8de3d..fb420fe0ec0 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -99,7 +99,7 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv,
} else {
/* Multicast */
priv->curr_pkt_filter &= ~HostCmd_ACT_MAC_PROMISCUOUS_ENABLE;
- if (mcast_list->mode == MWIFIEX_MULTICAST_MODE) {
+ if (mcast_list->mode == MWIFIEX_ALL_MULTI_MODE) {
dev_dbg(priv->adapter->dev,
"info: Enabling All Multicast!\n");
priv->curr_pkt_filter |=
@@ -111,20 +111,11 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv,
dev_dbg(priv->adapter->dev,
"info: Set multicast list=%d\n",
mcast_list->num_multicast_addr);
- /* Set multicast addresses to firmware */
- if (old_pkt_filter == priv->curr_pkt_filter) {
- /* Send request to firmware */
- ret = mwifiex_send_cmd_async(priv,
- HostCmd_CMD_MAC_MULTICAST_ADR,
- HostCmd_ACT_GEN_SET, 0,
- mcast_list);
- } else {
- /* Send request to firmware */
- ret = mwifiex_send_cmd_async(priv,
- HostCmd_CMD_MAC_MULTICAST_ADR,
- HostCmd_ACT_GEN_SET, 0,
- mcast_list);
- }
+ /* Send multicast addresses to firmware */
+ ret = mwifiex_send_cmd_async(priv,
+ HostCmd_CMD_MAC_MULTICAST_ADR,
+ HostCmd_ACT_GEN_SET, 0,
+ mcast_list);
}
}
}
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index a658b4bc7da..92849e5cf65 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -4436,6 +4436,8 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
if (!rt2x00_rt(rt2x00dev, RT5390) &&
!rt2x00_rt(rt2x00dev, RT5392)) {
+ u8 min_gain = rt2x00_rt(rt2x00dev, RT3070) ? 1 : 2;
+
rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR17_TX_LO1_EN, 0);
if (rt2x00_rt(rt2x00dev, RT3070) ||
@@ -4446,8 +4448,10 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
&rt2x00dev->cap_flags))
rt2x00_set_field8(&rfcsr, RFCSR17_R, 1);
}
- rt2x00_set_field8(&rfcsr, RFCSR17_TXMIXER_GAIN,
- drv_data->txmixer_gain_24g);
+ if (drv_data->txmixer_gain_24g >= min_gain) {
+ rt2x00_set_field8(&rfcsr, RFCSR17_TXMIXER_GAIN,
+ drv_data->txmixer_gain_24g);
+ }
rt2800_rfcsr_write(rt2x00dev, 17, rfcsr);
}
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index cd49ba94963..8099e9d3edd 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -47,11 +47,33 @@
#include <asm/xen/hypercall.h>
#include <asm/xen/page.h>
+/*
+ * This is the maximum slots a skb can have. If a guest sends a skb
+ * which exceeds this limit it is considered malicious.
+ */
+#define FATAL_SKB_SLOTS_DEFAULT 20
+static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;
+module_param(fatal_skb_slots, uint, 0444);
+
+/*
+ * To avoid confusion, we define XEN_NETBK_LEGACY_SLOTS_MAX indicating
+ * the maximum slots a valid packet can use. Now this value is defined
+ * to be XEN_NETIF_NR_SLOTS_MIN, which is supposed to be supported by
+ * all backend.
+ */
+#define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN
+
+typedef unsigned int pending_ring_idx_t;
+#define INVALID_PENDING_RING_IDX (~0U)
+
struct pending_tx_info {
- struct xen_netif_tx_request req;
+ struct xen_netif_tx_request req; /* coalesced tx request */
struct xenvif *vif;
+ pending_ring_idx_t head; /* head != INVALID_PENDING_RING_IDX
+ * if it is head of one or more tx
+ * reqs
+ */
};
-typedef unsigned int pending_ring_idx_t;
struct netbk_rx_meta {
int id;
@@ -102,7 +124,11 @@ struct xen_netbk {
atomic_t netfront_count;
struct pending_tx_info pending_tx_info[MAX_PENDING_REQS];
- struct gnttab_copy tx_copy_ops[MAX_PENDING_REQS];
+ /* Coalescing tx requests before copying makes number of grant
+ * copy ops greater or equal to number of slots required. In
+ * worst case a tx request consumes 2 gnttab_copy.
+ */
+ struct gnttab_copy tx_copy_ops[2*MAX_PENDING_REQS];
u16 pending_ring[MAX_PENDING_REQS];
@@ -118,6 +144,16 @@ struct xen_netbk {
static struct xen_netbk *xen_netbk;
static int xen_netbk_group_nr;
+/*
+ * If head != INVALID_PENDING_RING_IDX, it means this tx request is head of
+ * one or more merged tx requests, otherwise it is the continuation of
+ * previous tx request.
+ */
+static inline int pending_tx_is_head(struct xen_netbk *netbk, RING_IDX idx)
+{
+ return netbk->pending_tx_info[idx].head != INVALID_PENDING_RING_IDX;
+}
+
void xen_netbk_add_xenvif(struct xenvif *vif)
{
int i;
@@ -250,6 +286,7 @@ static int max_required_rx_slots(struct xenvif *vif)
{
int max = DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE);
+ /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
if (vif->can_sg || vif->gso || vif->gso_prefix)
max += MAX_SKB_FRAGS + 1; /* extra_info + frags */
@@ -657,6 +694,7 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk)
__skb_queue_tail(&rxq, skb);
/* Filled the batch queue? */
+ /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
if (count + MAX_SKB_FRAGS >= XEN_NETIF_RX_RING_SIZE)
break;
}
@@ -902,47 +940,99 @@ static int netbk_count_requests(struct xenvif *vif,
int work_to_do)
{
RING_IDX cons = vif->tx.req_cons;
- int frags = 0;
+ int slots = 0;
+ int drop_err = 0;
+ int more_data;
if (!(first->flags & XEN_NETTXF_more_data))
return 0;
do {
- if (frags >= work_to_do) {
- netdev_err(vif->dev, "Need more frags\n");
+ struct xen_netif_tx_request dropped_tx = { 0 };
+
+ if (slots >= work_to_do) {
+ netdev_err(vif->dev,
+ "Asked for %d slots but exceeds this limit\n",
+ work_to_do);
netbk_fatal_tx_err(vif);
return -ENODATA;
}
- if (unlikely(frags >= MAX_SKB_FRAGS)) {
- netdev_err(vif->dev, "Too many frags\n");
+ /* This guest is really using too many slots and
+ * considered malicious.
+ */
+ if (unlikely(slots >= fatal_skb_slots)) {
+ netdev_err(vif->dev,
+ "Malicious frontend using %d slots, threshold %u\n",
+ slots, fatal_skb_slots);
netbk_fatal_tx_err(vif);
return -E2BIG;
}
- memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags),
+ /* Xen network protocol had implicit dependency on
+ * MAX_SKB_FRAGS. XEN_NETBK_LEGACY_SLOTS_MAX is set to
+ * the historical MAX_SKB_FRAGS value 18 to honor the
+ * same behavior as before. Any packet using more than
+ * 18 slots but less than fatal_skb_slots slots is
+ * dropped
+ */
+ if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) {
+ if (net_ratelimit())
+ netdev_dbg(vif->dev,
+ "Too many slots (%d) exceeding limit (%d), dropping packet\n",
+ slots, XEN_NETBK_LEGACY_SLOTS_MAX);
+ drop_err = -E2BIG;
+ }
+
+ if (drop_err)
+ txp = &dropped_tx;
+
+ memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + slots),
sizeof(*txp));
- if (txp->size > first->size) {
- netdev_err(vif->dev, "Frag is bigger than frame.\n");
- netbk_fatal_tx_err(vif);
- return -EIO;
+
+ /* If the guest submitted a frame >= 64 KiB then
+ * first->size overflowed and following slots will
+ * appear to be larger than the frame.
+ *
+ * This cannot be fatal error as there are buggy
+ * frontends that do this.
+ *
+ * Consume all slots and drop the packet.
+ */
+ if (!drop_err && txp->size > first->size) {
+ if (net_ratelimit())
+ netdev_dbg(vif->dev,
+ "Invalid tx request, slot size %u > remaining size %u\n",
+ txp->size, first->size);
+ drop_err = -EIO;
}
first->size -= txp->size;
- frags++;
+ slots++;
if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
- netdev_err(vif->dev, "txp->offset: %x, size: %u\n",
+ netdev_err(vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n",
txp->offset, txp->size);
netbk_fatal_tx_err(vif);
return -EINVAL;
}
- } while ((txp++)->flags & XEN_NETTXF_more_data);
- return frags;
+
+ more_data = txp->flags & XEN_NETTXF_more_data;
+
+ if (!drop_err)
+ txp++;
+
+ } while (more_data);
+
+ if (drop_err) {
+ netbk_tx_err(vif, first, cons + slots);
+ return drop_err;
+ }
+
+ return slots;
}
static struct page *xen_netbk_alloc_page(struct xen_netbk *netbk,
- struct sk_buff *skb,
u16 pending_idx)
{
struct page *page;
@@ -963,48 +1053,114 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
struct skb_shared_info *shinfo = skb_shinfo(skb);
skb_frag_t *frags = shinfo->frags;
u16 pending_idx = *((u16 *)skb->data);
- int i, start;
+ u16 head_idx = 0;
+ int slot, start;
+ struct page *page;
+ pending_ring_idx_t index, start_idx = 0;
+ uint16_t dst_offset;
+ unsigned int nr_slots;
+ struct pending_tx_info *first = NULL;
+
+ /* At this point shinfo->nr_frags is in fact the number of
+ * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
+ */
+ nr_slots = shinfo->nr_frags;
/* Skip first skb fragment if it is on same page as header fragment. */
start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
- for (i = start; i < shinfo->nr_frags; i++, txp++) {
- struct page *page;
- pending_ring_idx_t index;
+ /* Coalesce tx requests, at this point the packet passed in
+ * should be <= 64K. Any packets larger than 64K have been
+ * handled in netbk_count_requests().
+ */
+ for (shinfo->nr_frags = slot = start; slot < nr_slots;
+ shinfo->nr_frags++) {
struct pending_tx_info *pending_tx_info =
netbk->pending_tx_info;
- index = pending_index(netbk->pending_cons++);
- pending_idx = netbk->pending_ring[index];
- page = xen_netbk_alloc_page(netbk, skb, pending_idx);
+ page = alloc_page(GFP_KERNEL|__GFP_COLD);
if (!page)
goto err;
- gop->source.u.ref = txp->gref;
- gop->source.domid = vif->domid;
- gop->source.offset = txp->offset;
-
- gop->dest.u.gmfn = virt_to_mfn(page_address(page));
- gop->dest.domid = DOMID_SELF;
- gop->dest.offset = txp->offset;
-
- gop->len = txp->size;
- gop->flags = GNTCOPY_source_gref;
+ dst_offset = 0;
+ first = NULL;
+ while (dst_offset < PAGE_SIZE && slot < nr_slots) {
+ gop->flags = GNTCOPY_source_gref;
+
+ gop->source.u.ref = txp->gref;
+ gop->source.domid = vif->domid;
+ gop->source.offset = txp->offset;
+
+ gop->dest.domid = DOMID_SELF;
+
+ gop->dest.offset = dst_offset;
+ gop->dest.u.gmfn = virt_to_mfn(page_address(page));
+
+ if (dst_offset + txp->size > PAGE_SIZE) {
+ /* This page can only merge a portion
+ * of tx request. Do not increment any
+ * pointer / counter here. The txp
+ * will be dealt with in future
+ * rounds, eventually hitting the
+ * `else` branch.
+ */
+ gop->len = PAGE_SIZE - dst_offset;
+ txp->offset += gop->len;
+ txp->size -= gop->len;
+ dst_offset += gop->len; /* quit loop */
+ } else {
+ /* This tx request can be merged in the page */
+ gop->len = txp->size;
+ dst_offset += gop->len;
+
+ index = pending_index(netbk->pending_cons++);
+
+ pending_idx = netbk->pending_ring[index];
+
+ memcpy(&pending_tx_info[pending_idx].req, txp,
+ sizeof(*txp));
+ xenvif_get(vif);
+
+ pending_tx_info[pending_idx].vif = vif;
+
+ /* Poison these fields, corresponding
+ * fields for head tx req will be set
+ * to correct values after the loop.
+ */
+ netbk->mmap_pages[pending_idx] = (void *)(~0UL);
+ pending_tx_info[pending_idx].head =
+ INVALID_PENDING_RING_IDX;
+
+ if (!first) {
+ first = &pending_tx_info[pending_idx];
+ start_idx = index;
+ head_idx = pending_idx;
+ }
+
+ txp++;
+ slot++;
+ }
- gop++;
+ gop++;
+ }
- memcpy(&pending_tx_info[pending_idx].req, txp, sizeof(*txp));
- xenvif_get(vif);
- pending_tx_info[pending_idx].vif = vif;
- frag_set_pending_idx(&frags[i], pending_idx);
+ first->req.offset = 0;
+ first->req.size = dst_offset;
+ first->head = start_idx;
+ set_page_ext(page, netbk, head_idx);
+ netbk->mmap_pages[head_idx] = page;
+ frag_set_pending_idx(&frags[shinfo->nr_frags], head_idx);
}
+ BUG_ON(shinfo->nr_frags > MAX_SKB_FRAGS);
+
return gop;
err:
/* Unwind, freeing all pages and sending error responses. */
- while (i-- > start) {
- xen_netbk_idx_release(netbk, frag_get_pending_idx(&frags[i]),
- XEN_NETIF_RSP_ERROR);
+ while (shinfo->nr_frags-- > start) {
+ xen_netbk_idx_release(netbk,
+ frag_get_pending_idx(&frags[shinfo->nr_frags]),
+ XEN_NETIF_RSP_ERROR);
}
/* The head too, if necessary. */
if (start)
@@ -1020,8 +1176,10 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
struct gnttab_copy *gop = *gopp;
u16 pending_idx = *((u16 *)skb->data);
struct skb_shared_info *shinfo = skb_shinfo(skb);
+ struct pending_tx_info *tx_info;
int nr_frags = shinfo->nr_frags;
int i, err, start;
+ u16 peek; /* peek into next tx request */
/* Check status of header. */
err = gop->status;
@@ -1033,11 +1191,20 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
for (i = start; i < nr_frags; i++) {
int j, newerr;
+ pending_ring_idx_t head;
pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
+ tx_info = &netbk->pending_tx_info[pending_idx];
+ head = tx_info->head;
/* Check error status: if okay then remember grant handle. */
- newerr = (++gop)->status;
+ do {
+ newerr = (++gop)->status;
+ if (newerr)
+ break;
+ peek = netbk->pending_ring[pending_index(++head)];
+ } while (!pending_tx_is_head(netbk, peek));
+
if (likely(!newerr)) {
/* Had a previous error? Invalidate this fragment. */
if (unlikely(err))
@@ -1262,11 +1429,12 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
struct sk_buff *skb;
int ret;
- while (((nr_pending_reqs(netbk) + MAX_SKB_FRAGS) < MAX_PENDING_REQS) &&
+ while ((nr_pending_reqs(netbk) + XEN_NETBK_LEGACY_SLOTS_MAX
+ < MAX_PENDING_REQS) &&
!list_empty(&netbk->net_schedule_list)) {
struct xenvif *vif;
struct xen_netif_tx_request txreq;
- struct xen_netif_tx_request txfrags[MAX_SKB_FRAGS];
+ struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
struct page *page;
struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
u16 pending_idx;
@@ -1354,7 +1522,7 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
pending_idx = netbk->pending_ring[index];
data_len = (txreq.size > PKT_PROT_LEN &&
- ret < MAX_SKB_FRAGS) ?
+ ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
PKT_PROT_LEN : txreq.size;
skb = alloc_skb(data_len + NET_SKB_PAD + NET_IP_ALIGN,
@@ -1381,7 +1549,7 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
}
/* XXX could copy straight to head */
- page = xen_netbk_alloc_page(netbk, skb, pending_idx);
+ page = xen_netbk_alloc_page(netbk, pending_idx);
if (!page) {
kfree_skb(skb);
netbk_tx_err(vif, &txreq, idx);
@@ -1404,6 +1572,7 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
memcpy(&netbk->pending_tx_info[pending_idx].req,
&txreq, sizeof(txreq));
netbk->pending_tx_info[pending_idx].vif = vif;
+ netbk->pending_tx_info[pending_idx].head = index;
*((u16 *)skb->data) = pending_idx;
__skb_put(skb, data_len);
@@ -1531,7 +1700,10 @@ static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
{
struct xenvif *vif;
struct pending_tx_info *pending_tx_info;
- pending_ring_idx_t index;
+ pending_ring_idx_t head;
+ u16 peek; /* peek into next tx request */
+
+ BUG_ON(netbk->mmap_pages[pending_idx] == (void *)(~0UL));
/* Already complete? */
if (netbk->mmap_pages[pending_idx] == NULL)
@@ -1540,19 +1712,40 @@ static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
pending_tx_info = &netbk->pending_tx_info[pending_idx];
vif = pending_tx_info->vif;
+ head = pending_tx_info->head;
+
+ BUG_ON(!pending_tx_is_head(netbk, head));
+ BUG_ON(netbk->pending_ring[pending_index(head)] != pending_idx);
- make_tx_response(vif, &pending_tx_info->req, status);
+ do {
+ pending_ring_idx_t index;
+ pending_ring_idx_t idx = pending_index(head);
+ u16 info_idx = netbk->pending_ring[idx];
- index = pending_index(netbk->pending_prod++);
- netbk->pending_ring[index] = pending_idx;
+ pending_tx_info = &netbk->pending_tx_info[info_idx];
+ make_tx_response(vif, &pending_tx_info->req, status);
- xenvif_put(vif);
+ /* Setting any number other than
+ * INVALID_PENDING_RING_IDX indicates this slot is
+ * starting a new packet / ending a previous packet.
+ */
+ pending_tx_info->head = 0;
+
+ index = pending_index(netbk->pending_prod++);
+ netbk->pending_ring[index] = netbk->pending_ring[info_idx];
+
+ xenvif_put(vif);
+
+ peek = netbk->pending_ring[pending_index(++head)];
+
+ } while (!pending_tx_is_head(netbk, peek));
netbk->mmap_pages[pending_idx]->mapping = 0;
put_page(netbk->mmap_pages[pending_idx]);
netbk->mmap_pages[pending_idx] = NULL;
}
+
static void make_tx_response(struct xenvif *vif,
struct xen_netif_tx_request *txp,
s8 st)
@@ -1605,8 +1798,9 @@ static inline int rx_work_todo(struct xen_netbk *netbk)
static inline int tx_work_todo(struct xen_netbk *netbk)
{
- if (((nr_pending_reqs(netbk) + MAX_SKB_FRAGS) < MAX_PENDING_REQS) &&
- !list_empty(&netbk->net_schedule_list))
+ if ((nr_pending_reqs(netbk) + XEN_NETBK_LEGACY_SLOTS_MAX
+ < MAX_PENDING_REQS) &&
+ !list_empty(&netbk->net_schedule_list))
return 1;
return 0;
@@ -1689,6 +1883,13 @@ static int __init netback_init(void)
if (!xen_domain())
return -ENODEV;
+ if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {
+ printk(KERN_INFO
+ "xen-netback: fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
+ fatal_skb_slots, XEN_NETBK_LEGACY_SLOTS_MAX);
+ fatal_skb_slots = XEN_NETBK_LEGACY_SLOTS_MAX;
+ }
+
xen_netbk_group_nr = num_online_cpus();
xen_netbk = vzalloc(sizeof(struct xen_netbk) * xen_netbk_group_nr);
if (!xen_netbk)
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 7ffa43bd7cf..1f57423c794 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -36,7 +36,7 @@
#include <linux/skbuff.h>
#include <linux/ethtool.h>
#include <linux/if_ether.h>
-#include <linux/tcp.h>
+#include <net/tcp.h>
#include <linux/udp.h>
#include <linux/moduleparam.h>
#include <linux/mm.h>
@@ -548,6 +548,16 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned int len = skb_headlen(skb);
unsigned long flags;
+ /* If skb->len is too big for wire format, drop skb and alert
+ * user about misconfiguration.
+ */
+ if (unlikely(skb->len > XEN_NETIF_MAX_TX_SIZE)) {
+ net_alert_ratelimited(
+ "xennet: skb->len = %u, too big for wire format\n",
+ skb->len);
+ goto drop;
+ }
+
slots = DIV_ROUND_UP(offset + len, PAGE_SIZE) +
xennet_count_skb_frag_slots(skb);
if (unlikely(slots > MAX_SKB_FRAGS + 1)) {
@@ -1064,7 +1074,8 @@ err:
static int xennet_change_mtu(struct net_device *dev, int mtu)
{
- int max = xennet_can_sg(dev) ? 65535 - ETH_HLEN : ETH_DATA_LEN;
+ int max = xennet_can_sg(dev) ?
+ XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER : ETH_DATA_LEN;
if (mtu > max)
return -EINVAL;
@@ -1368,6 +1379,8 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
SET_ETHTOOL_OPS(netdev, &xennet_ethtool_ops);
SET_NETDEV_DEV(netdev, &dev->dev);
+ netif_set_gso_max_size(netdev, XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER);
+
np->netdev = netdev;
netif_carrier_off(netdev);
diff --git a/drivers/ntb/ntb_hw.c b/drivers/ntb/ntb_hw.c
index f802e7c9235..2dacd19e1b8 100644
--- a/drivers/ntb/ntb_hw.c
+++ b/drivers/ntb/ntb_hw.c
@@ -345,7 +345,7 @@ int ntb_read_remote_spad(struct ntb_device *ndev, unsigned int idx, u32 *val)
*/
void __iomem *ntb_get_mw_vbase(struct ntb_device *ndev, unsigned int mw)
{
- if (mw > NTB_NUM_MW)
+ if (mw >= NTB_NUM_MW)
return NULL;
return ndev->mw[mw].vbase;
@@ -362,7 +362,7 @@ void __iomem *ntb_get_mw_vbase(struct ntb_device *ndev, unsigned int mw)
*/
resource_size_t ntb_get_mw_size(struct ntb_device *ndev, unsigned int mw)
{
- if (mw > NTB_NUM_MW)
+ if (mw >= NTB_NUM_MW)
return 0;
return ndev->mw[mw].bar_sz;
@@ -380,7 +380,7 @@ resource_size_t ntb_get_mw_size(struct ntb_device *ndev, unsigned int mw)
*/
void ntb_set_mw_addr(struct ntb_device *ndev, unsigned int mw, u64 addr)
{
- if (mw > NTB_NUM_MW)
+ if (mw >= NTB_NUM_MW)
return;
dev_dbg(&ndev->pdev->dev, "Writing addr %Lx to BAR %d\n", addr,
@@ -1027,8 +1027,8 @@ static int ntb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ndev->mw[i].vbase =
ioremap_wc(pci_resource_start(pdev, MW_TO_BAR(i)),
ndev->mw[i].bar_sz);
- dev_info(&pdev->dev, "MW %d size %d\n", i,
- (u32) pci_resource_len(pdev, MW_TO_BAR(i)));
+ dev_info(&pdev->dev, "MW %d size %llu\n", i,
+ pci_resource_len(pdev, MW_TO_BAR(i)));
if (!ndev->mw[i].vbase) {
dev_warn(&pdev->dev, "Cannot remap BAR %d\n",
MW_TO_BAR(i));
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index e0bdfd7f993..f8d7081ee30 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -58,7 +58,7 @@
#include <linux/ntb.h>
#include "ntb_hw.h"
-#define NTB_TRANSPORT_VERSION 2
+#define NTB_TRANSPORT_VERSION 3
static unsigned int transport_mtu = 0x401E;
module_param(transport_mtu, uint, 0644);
@@ -173,10 +173,13 @@ struct ntb_payload_header {
enum {
VERSION = 0,
- MW0_SZ,
- MW1_SZ,
- NUM_QPS,
QP_LINKS,
+ NUM_QPS,
+ NUM_MWS,
+ MW0_SZ_HIGH,
+ MW0_SZ_LOW,
+ MW1_SZ_HIGH,
+ MW1_SZ_LOW,
MAX_SPAD,
};
@@ -297,7 +300,7 @@ int ntb_register_client_dev(char *device_name)
{
struct ntb_transport_client_dev *client_dev;
struct ntb_transport *nt;
- int rc;
+ int rc, i = 0;
if (list_empty(&ntb_transport_list))
return -ENODEV;
@@ -315,7 +318,7 @@ int ntb_register_client_dev(char *device_name)
dev = &client_dev->dev;
/* setup and register client devices */
- dev_set_name(dev, "%s", device_name);
+ dev_set_name(dev, "%s%d", device_name, i);
dev->bus = &ntb_bus_type;
dev->release = ntb_client_release;
dev->parent = &ntb_query_pdev(nt->ndev)->dev;
@@ -327,6 +330,7 @@ int ntb_register_client_dev(char *device_name)
}
list_add_tail(&client_dev->entry, &nt->client_devs);
+ i++;
}
return 0;
@@ -486,12 +490,13 @@ static void ntb_transport_setup_qp_mw(struct ntb_transport *nt,
(qp_num / NTB_NUM_MW * rx_size);
rx_size -= sizeof(struct ntb_rx_info);
- qp->rx_buff = qp->remote_rx_info + sizeof(struct ntb_rx_info);
- qp->rx_max_frame = min(transport_mtu, rx_size);
+ qp->rx_buff = qp->remote_rx_info + 1;
+ /* Due to housekeeping, there must be atleast 2 buffs */
+ qp->rx_max_frame = min(transport_mtu, rx_size / 2);
qp->rx_max_entry = rx_size / qp->rx_max_frame;
qp->rx_index = 0;
- qp->remote_rx_info->entry = qp->rx_max_entry;
+ qp->remote_rx_info->entry = qp->rx_max_entry - 1;
/* setup the hdr offsets with 0's */
for (i = 0; i < qp->rx_max_entry; i++) {
@@ -502,6 +507,19 @@ static void ntb_transport_setup_qp_mw(struct ntb_transport *nt,
qp->rx_pkts = 0;
qp->tx_pkts = 0;
+ qp->tx_index = 0;
+}
+
+static void ntb_free_mw(struct ntb_transport *nt, int num_mw)
+{
+ struct ntb_transport_mw *mw = &nt->mw[num_mw];
+ struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
+
+ if (!mw->virt_addr)
+ return;
+
+ dma_free_coherent(&pdev->dev, mw->size, mw->virt_addr, mw->dma_addr);
+ mw->virt_addr = NULL;
}
static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size)
@@ -509,12 +527,20 @@ static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size)
struct ntb_transport_mw *mw = &nt->mw[num_mw];
struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
+ /* No need to re-setup */
+ if (mw->size == ALIGN(size, 4096))
+ return 0;
+
+ if (mw->size != 0)
+ ntb_free_mw(nt, num_mw);
+
/* Alloc memory for receiving data. Must be 4k aligned */
mw->size = ALIGN(size, 4096);
mw->virt_addr = dma_alloc_coherent(&pdev->dev, mw->size, &mw->dma_addr,
GFP_KERNEL);
if (!mw->virt_addr) {
+ mw->size = 0;
dev_err(&pdev->dev, "Unable to allocate MW buffer of size %d\n",
(int) mw->size);
return -ENOMEM;
@@ -604,25 +630,31 @@ static void ntb_transport_link_work(struct work_struct *work)
u32 val;
int rc, i;
- /* send the local info */
- rc = ntb_write_remote_spad(ndev, VERSION, NTB_TRANSPORT_VERSION);
- if (rc) {
- dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
- 0, VERSION);
- goto out;
- }
+ /* send the local info, in the opposite order of the way we read it */
+ for (i = 0; i < NTB_NUM_MW; i++) {
+ rc = ntb_write_remote_spad(ndev, MW0_SZ_HIGH + (i * 2),
+ ntb_get_mw_size(ndev, i) >> 32);
+ if (rc) {
+ dev_err(&pdev->dev, "Error writing %u to remote spad %d\n",
+ (u32)(ntb_get_mw_size(ndev, i) >> 32),
+ MW0_SZ_HIGH + (i * 2));
+ goto out;
+ }
- rc = ntb_write_remote_spad(ndev, MW0_SZ, ntb_get_mw_size(ndev, 0));
- if (rc) {
- dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
- (u32) ntb_get_mw_size(ndev, 0), MW0_SZ);
- goto out;
+ rc = ntb_write_remote_spad(ndev, MW0_SZ_LOW + (i * 2),
+ (u32) ntb_get_mw_size(ndev, i));
+ if (rc) {
+ dev_err(&pdev->dev, "Error writing %u to remote spad %d\n",
+ (u32) ntb_get_mw_size(ndev, i),
+ MW0_SZ_LOW + (i * 2));
+ goto out;
+ }
}
- rc = ntb_write_remote_spad(ndev, MW1_SZ, ntb_get_mw_size(ndev, 1));
+ rc = ntb_write_remote_spad(ndev, NUM_MWS, NTB_NUM_MW);
if (rc) {
dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
- (u32) ntb_get_mw_size(ndev, 1), MW1_SZ);
+ NTB_NUM_MW, NUM_MWS);
goto out;
}
@@ -633,16 +665,10 @@ static void ntb_transport_link_work(struct work_struct *work)
goto out;
}
- rc = ntb_read_local_spad(nt->ndev, QP_LINKS, &val);
- if (rc) {
- dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS);
- goto out;
- }
-
- rc = ntb_write_remote_spad(ndev, QP_LINKS, val);
+ rc = ntb_write_remote_spad(ndev, VERSION, NTB_TRANSPORT_VERSION);
if (rc) {
dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
- val, QP_LINKS);
+ NTB_TRANSPORT_VERSION, VERSION);
goto out;
}
@@ -667,33 +693,43 @@ static void ntb_transport_link_work(struct work_struct *work)
goto out;
dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val);
- rc = ntb_read_remote_spad(ndev, MW0_SZ, &val);
+ rc = ntb_read_remote_spad(ndev, NUM_MWS, &val);
if (rc) {
- dev_err(&pdev->dev, "Error reading remote spad %d\n", MW0_SZ);
+ dev_err(&pdev->dev, "Error reading remote spad %d\n", NUM_MWS);
goto out;
}
- if (!val)
+ if (val != NTB_NUM_MW)
goto out;
- dev_dbg(&pdev->dev, "Remote MW0 size = %d\n", val);
+ dev_dbg(&pdev->dev, "Remote number of mws = %d\n", val);
- rc = ntb_set_mw(nt, 0, val);
- if (rc)
- goto out;
+ for (i = 0; i < NTB_NUM_MW; i++) {
+ u64 val64;
- rc = ntb_read_remote_spad(ndev, MW1_SZ, &val);
- if (rc) {
- dev_err(&pdev->dev, "Error reading remote spad %d\n", MW1_SZ);
- goto out;
- }
+ rc = ntb_read_remote_spad(ndev, MW0_SZ_HIGH + (i * 2), &val);
+ if (rc) {
+ dev_err(&pdev->dev, "Error reading remote spad %d\n",
+ MW0_SZ_HIGH + (i * 2));
+ goto out1;
+ }
- if (!val)
- goto out;
- dev_dbg(&pdev->dev, "Remote MW1 size = %d\n", val);
+ val64 = (u64) val << 32;
- rc = ntb_set_mw(nt, 1, val);
- if (rc)
- goto out;
+ rc = ntb_read_remote_spad(ndev, MW0_SZ_LOW + (i * 2), &val);
+ if (rc) {
+ dev_err(&pdev->dev, "Error reading remote spad %d\n",
+ MW0_SZ_LOW + (i * 2));
+ goto out1;
+ }
+
+ val64 |= val;
+
+ dev_dbg(&pdev->dev, "Remote MW%d size = %llu\n", i, val64);
+
+ rc = ntb_set_mw(nt, i, val64);
+ if (rc)
+ goto out1;
+ }
nt->transport_link = NTB_LINK_UP;
@@ -708,6 +744,9 @@ static void ntb_transport_link_work(struct work_struct *work)
return;
+out1:
+ for (i = 0; i < NTB_NUM_MW; i++)
+ ntb_free_mw(nt, i);
out:
if (ntb_hw_link_status(ndev))
schedule_delayed_work(&nt->link_work,
@@ -780,10 +819,10 @@ static void ntb_transport_init_queue(struct ntb_transport *nt,
(qp_num / NTB_NUM_MW * tx_size);
tx_size -= sizeof(struct ntb_rx_info);
- qp->tx_mw = qp->rx_info + sizeof(struct ntb_rx_info);
- qp->tx_max_frame = min(transport_mtu, tx_size);
+ qp->tx_mw = qp->rx_info + 1;
+ /* Due to housekeeping, there must be atleast 2 buffs */
+ qp->tx_max_frame = min(transport_mtu, tx_size / 2);
qp->tx_max_entry = tx_size / qp->tx_max_frame;
- qp->tx_index = 0;
if (nt->debugfs_dir) {
char debugfs_name[4];
@@ -897,10 +936,7 @@ void ntb_transport_free(void *transport)
pdev = ntb_query_pdev(nt->ndev);
for (i = 0; i < NTB_NUM_MW; i++)
- if (nt->mw[i].virt_addr)
- dma_free_coherent(&pdev->dev, nt->mw[i].size,
- nt->mw[i].virt_addr,
- nt->mw[i].dma_addr);
+ ntb_free_mw(nt, i);
kfree(nt->qps);
ntb_unregister_transport(nt->ndev);
@@ -999,11 +1035,16 @@ out:
static void ntb_transport_rx(unsigned long data)
{
struct ntb_transport_qp *qp = (struct ntb_transport_qp *)data;
- int rc;
+ int rc, i;
- do {
+ /* Limit the number of packets processed in a single interrupt to
+ * provide fairness to others
+ */
+ for (i = 0; i < qp->rx_max_entry; i++) {
rc = ntb_process_rxc(qp);
- } while (!rc);
+ if (rc)
+ break;
+ }
}
static void ntb_transport_rxc_db(void *data, int db_num)
@@ -1210,12 +1251,14 @@ EXPORT_SYMBOL_GPL(ntb_transport_create_queue);
*/
void ntb_transport_free_queue(struct ntb_transport_qp *qp)
{
- struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
+ struct pci_dev *pdev;
struct ntb_queue_entry *entry;
if (!qp)
return;
+ pdev = ntb_query_pdev(qp->ndev);
+
cancel_delayed_work_sync(&qp->link_work);
ntb_unregister_db_callback(qp->ndev, qp->qp_num);
@@ -1371,12 +1414,13 @@ EXPORT_SYMBOL_GPL(ntb_transport_link_up);
*/
void ntb_transport_link_down(struct ntb_transport_qp *qp)
{
- struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
+ struct pci_dev *pdev;
int rc, val;
if (!qp)
return;
+ pdev = ntb_query_pdev(qp->ndev);
qp->client_ready = NTB_LINK_DOWN;
rc = ntb_read_local_spad(qp->ndev, QP_LINKS, &val);
@@ -1408,6 +1452,9 @@ EXPORT_SYMBOL_GPL(ntb_transport_link_down);
*/
bool ntb_transport_link_query(struct ntb_transport_qp *qp)
{
+ if (!qp)
+ return false;
+
return qp->qp_link == NTB_LINK_UP;
}
EXPORT_SYMBOL_GPL(ntb_transport_link_query);
@@ -1422,6 +1469,9 @@ EXPORT_SYMBOL_GPL(ntb_transport_link_query);
*/
unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp)
{
+ if (!qp)
+ return 0;
+
return qp->qp_num;
}
EXPORT_SYMBOL_GPL(ntb_transport_qp_num);
@@ -1436,6 +1486,9 @@ EXPORT_SYMBOL_GPL(ntb_transport_qp_num);
*/
unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp)
{
+ if (!qp)
+ return 0;
+
return qp->tx_max_frame - sizeof(struct ntb_payload_header);
}
EXPORT_SYMBOL_GPL(ntb_transport_max_size);
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 8647dc6f52d..f9c61fb802b 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -174,6 +174,7 @@ int pci_bus_add_device(struct pci_dev *dev)
* Can not put in pci_device_add yet because resources
* are not assigned yet for some devices.
*/
+ pci_fixup_device(pci_fixup_final, dev);
pci_create_sysfs_dev_files(dev);
dev->match_driver = true;
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index b099e0025d2..fc9bd81e8e3 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -646,15 +646,11 @@ static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
error = platform_pci_set_power_state(dev, state);
if (!error)
pci_update_current_state(dev, state);
- /* Fall back to PCI_D0 if native PM is not supported */
- if (!dev->pm_cap)
- dev->current_state = PCI_D0;
- } else {
+ } else
error = -ENODEV;
- /* Fall back to PCI_D0 if native PM is not supported */
- if (!dev->pm_cap)
- dev->current_state = PCI_D0;
- }
+
+ if (error && !dev->pm_cap) /* Fall back to PCI_D0 */
+ dev->current_state = PCI_D0;
return error;
}
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index b494066ef32..563771fcab9 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -988,7 +988,6 @@ int pci_setup_device(struct pci_dev *dev)
dev->sysdata = dev->bus->sysdata;
dev->dev.parent = dev->bus->bridge;
dev->dev.bus = &pci_bus_type;
- dev->dev.type = &pci_dev_type;
dev->hdr_type = hdr_type & 0x7f;
dev->multifunction = !!(hdr_type & 0x80);
dev->error_state = pci_channel_io_normal;
@@ -1208,6 +1207,7 @@ struct pci_dev *alloc_pci_dev(void)
return NULL;
INIT_LIST_HEAD(&dev->bus_list);
+ dev->dev.type = &pci_dev_type;
return dev;
}
@@ -1339,7 +1339,6 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
list_add_tail(&dev->bus_list, &bus->devices);
up_write(&pci_bus_sem);
- pci_fixup_device(pci_fixup_final, dev);
ret = pcibios_add_device(dev);
WARN_ON(ret < 0);
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index efb7f10e902..b141a28473b 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -18,6 +18,7 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/io.h>
#include <linux/gpio.h>
#include <linux/pinctrl/machine.h>
@@ -27,8 +28,6 @@
/* Since we request GPIOs from ourself */
#include <linux/pinctrl/consumer.h>
-#include <asm/mach/irq.h>
-
#include <mach/hardware.h>
#include <mach/at91_pio.h>
diff --git a/drivers/pinctrl/pinctrl-exynos.c b/drivers/pinctrl/pinctrl-exynos.c
index 538b9ddaadf..7265e551ddd 100644
--- a/drivers/pinctrl/pinctrl-exynos.c
+++ b/drivers/pinctrl/pinctrl-exynos.c
@@ -23,13 +23,12 @@
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/of_irq.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/err.h>
-#include <asm/mach/irq.h>
-
#include "pinctrl-samsung.h"
#include "pinctrl-exynos.h"
diff --git a/drivers/pinctrl/pinctrl-nomadik.c b/drivers/pinctrl/pinctrl-nomadik.c
index 36d20293de5..93eba9715e6 100644
--- a/drivers/pinctrl/pinctrl-nomadik.c
+++ b/drivers/pinctrl/pinctrl-nomadik.c
@@ -23,6 +23,7 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/slab.h>
#include <linux/of_device.h>
#include <linux/of_address.h>
@@ -33,7 +34,6 @@
/* Since we request GPIOs from ourself */
#include <linux/pinctrl/consumer.h>
#include <linux/platform_data/pinctrl-nomadik.h>
-#include <asm/mach/irq.h>
#include "pinctrl-nomadik.h"
#include "core.h"
diff --git a/drivers/pinctrl/pinctrl-sirf.c b/drivers/pinctrl/pinctrl-sirf.c
index d02498b30c6..ab26b4b669d 100644
--- a/drivers/pinctrl/pinctrl-sirf.c
+++ b/drivers/pinctrl/pinctrl-sirf.c
@@ -14,6 +14,7 @@
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/irqdomain.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinmux.h>
#include <linux/pinctrl/consumer.h>
@@ -25,7 +26,6 @@
#include <linux/bitops.h>
#include <linux/gpio.h>
#include <linux/of_gpio.h>
-#include <asm/mach/irq.h>
#define DRIVER_NAME "pinmux-sirf"
diff --git a/drivers/pinctrl/spear/pinctrl-plgpio.c b/drivers/pinctrl/spear/pinctrl-plgpio.c
index 295b349a05c..a4908ecd74f 100644
--- a/drivers/pinctrl/spear/pinctrl-plgpio.c
+++ b/drivers/pinctrl/spear/pinctrl-plgpio.c
@@ -15,12 +15,12 @@
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/module.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/spinlock.h>
-#include <asm/mach/irq.h>
#define MAX_GPIO_PER_REG 32
#define PIN_OFFSET(pin) (pin % MAX_GPIO_PER_REG)
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index 1a779bbfb87..72184cc49e4 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -679,7 +679,7 @@ static int hp_wmi_rfkill_setup(struct platform_device *device)
}
rfkill_init_sw_state(gps_rfkill,
hp_wmi_get_sw_state(HPWMI_GPS));
- rfkill_set_hw_state(bluetooth_rfkill,
+ rfkill_set_hw_state(gps_rfkill,
hp_wmi_get_hw_state(HPWMI_GPS));
err = rfkill_register(gps_rfkill);
if (err)
diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c
index e64a7a870d4..a8e43cf70fa 100644
--- a/drivers/platform/x86/hp_accel.c
+++ b/drivers/platform/x86/hp_accel.c
@@ -362,7 +362,8 @@ static int lis3lv02d_suspend(struct device *dev)
static int lis3lv02d_resume(struct device *dev)
{
- return lis3lv02d_poweron(&lis3_dev);
+ lis3lv02d_poweron(&lis3_dev);
+ return 0;
}
static SIMPLE_DEV_PM_OPS(hp_accel_pm, lis3lv02d_suspend, lis3lv02d_resume);
diff --git a/drivers/pwm/pwm-spear.c b/drivers/pwm/pwm-spear.c
index 69a2d9eb34d..3223b57e8f9 100644
--- a/drivers/pwm/pwm-spear.c
+++ b/drivers/pwm/pwm-spear.c
@@ -143,7 +143,7 @@ static int spear_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
u32 val;
rc = clk_enable(pc->clk);
- if (!rc)
+ if (rc)
return rc;
val = spear_pwm_readl(pc, pwm->hwpwm, PWMCR);
@@ -209,12 +209,12 @@ static int spear_pwm_probe(struct platform_device *pdev)
pc->chip.npwm = NUM_PWM;
ret = clk_prepare(pc->clk);
- if (!ret)
+ if (ret)
return ret;
if (of_device_is_compatible(np, "st,spear1340-pwm")) {
ret = clk_enable(pc->clk);
- if (!ret) {
+ if (ret) {
clk_unprepare(pc->clk);
return ret;
}
diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c
index 6faba406b6e..a8b2c23a7ef 100644
--- a/drivers/rapidio/devices/tsi721.c
+++ b/drivers/rapidio/devices/tsi721.c
@@ -471,6 +471,10 @@ static irqreturn_t tsi721_irqhandler(int irq, void *ptr)
u32 intval;
u32 ch_inte;
+ /* For MSI mode disable all device-level interrupts */
+ if (priv->flags & TSI721_USING_MSI)
+ iowrite32(0, priv->regs + TSI721_DEV_INTE);
+
dev_int = ioread32(priv->regs + TSI721_DEV_INT);
if (!dev_int)
return IRQ_NONE;
@@ -560,6 +564,14 @@ static irqreturn_t tsi721_irqhandler(int irq, void *ptr)
}
}
#endif
+
+ /* For MSI mode re-enable device-level interrupts */
+ if (priv->flags & TSI721_USING_MSI) {
+ dev_int = TSI721_DEV_INT_SR2PC_CH | TSI721_DEV_INT_SRIO |
+ TSI721_DEV_INT_SMSG_CH | TSI721_DEV_INT_BDMA_CH;
+ iowrite32(dev_int, priv->regs + TSI721_DEV_INTE);
+ }
+
return IRQ_HANDLED;
}
diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c
index 39cf1460678..149fad495ca 100644
--- a/drivers/regulator/palmas-regulator.c
+++ b/drivers/regulator/palmas-regulator.c
@@ -677,7 +677,7 @@ static int palmas_probe(struct platform_device *pdev)
pmic->desc[id].vsel_mask = SMPS10_VSEL;
pmic->desc[id].enable_reg =
PALMAS_BASE_TO_REG(PALMAS_SMPS_BASE,
- PALMAS_SMPS10_STATUS);
+ PALMAS_SMPS10_CTRL);
pmic->desc[id].enable_mask = SMPS10_BOOST_EN;
pmic->desc[id].min_uV = 3750000;
pmic->desc[id].uV_step = 1250000;
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
index c6d77e20622..be6e1212562 100644
--- a/drivers/remoteproc/Kconfig
+++ b/drivers/remoteproc/Kconfig
@@ -6,6 +6,7 @@ config REMOTEPROC
depends on HAS_DMA
select FW_LOADER
select VIRTIO
+ select VIRTUALIZATION
config OMAP_REMOTEPROC
tristate "OMAP remoteproc support"
diff --git a/drivers/rpmsg/Kconfig b/drivers/rpmsg/Kconfig
index f6e0ea6ffda..69a21938758 100644
--- a/drivers/rpmsg/Kconfig
+++ b/drivers/rpmsg/Kconfig
@@ -4,5 +4,6 @@ menu "Rpmsg drivers"
config RPMSG
tristate
select VIRTIO
+ select VIRTUALIZATION
endmenu
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 79fbe3832df..9e95473180e 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -20,7 +20,6 @@ if RTC_CLASS
config RTC_HCTOSYS
bool "Set system time from RTC on startup and resume"
default y
- depends on !ALWAYS_USE_PERSISTENT_CLOCK
help
If you say yes here, the system time (wall clock) will be set using
the value read from a specified RTC device. This is useful to avoid
@@ -29,7 +28,6 @@ config RTC_HCTOSYS
config RTC_SYSTOHC
bool "Set the RTC time based on NTP synchronization"
default y
- depends on !ALWAYS_USE_PERSISTENT_CLOCK
help
If you say yes here, the system time (wall clock) will be stored
in the RTC specified by RTC_HCTOSYS_DEVICE approximately every 11
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index 434ebc3a99d..eebd8ac78e4 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -297,7 +297,7 @@ static int __init at91_rtc_probe(struct platform_device *pdev)
"at91_rtc", pdev);
if (ret) {
dev_err(&pdev->dev, "IRQ %d already in use.\n", irq);
- return ret;
+ goto err_unmap;
}
/* cpu init code should really have flagged this device as
@@ -309,13 +309,20 @@ static int __init at91_rtc_probe(struct platform_device *pdev)
rtc = rtc_device_register(pdev->name, &pdev->dev,
&at91_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc)) {
- free_irq(irq, pdev);
- return PTR_ERR(rtc);
+ ret = PTR_ERR(rtc);
+ goto err_free_irq;
}
platform_set_drvdata(pdev, rtc);
dev_info(&pdev->dev, "AT91 Real Time Clock driver.\n");
return 0;
+
+err_free_irq:
+ free_irq(irq, pdev);
+err_unmap:
+ iounmap(at91_rtc_regs);
+
+ return ret;
}
/*
@@ -332,6 +339,7 @@ static int __exit at91_rtc_remove(struct platform_device *pdev)
free_irq(irq, pdev);
rtc_device_unregister(rtc);
+ iounmap(at91_rtc_regs);
platform_set_drvdata(pdev, NULL);
return 0;
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index af97c94e8a3..cc5bea9c4b1 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -804,9 +804,8 @@ static int cmos_suspend(struct device *dev)
mask = RTC_IRQMASK;
tmp &= ~mask;
CMOS_WRITE(tmp, RTC_CONTROL);
+ hpet_mask_rtc_irq_bit(mask);
- /* shut down hpet emulation - we don't need it for alarm */
- hpet_mask_rtc_irq_bit(RTC_PIE|RTC_AIE|RTC_UIE);
cmos_checkintr(cmos, tmp);
}
spin_unlock_irq(&rtc_lock);
@@ -870,6 +869,7 @@ static int cmos_resume(struct device *dev)
rtc_update_irq(cmos->rtc, 1, mask);
tmp &= ~RTC_AIE;
hpet_mask_rtc_irq_bit(RTC_AIE);
+ hpet_rtc_timer_init();
} while (mask & RTC_AIE);
spin_unlock_irq(&rtc_lock);
}
diff --git a/drivers/rtc/rtc-pcf2123.c b/drivers/rtc/rtc-pcf2123.c
index 02b742afa76..6dd6b38b0b4 100644
--- a/drivers/rtc/rtc-pcf2123.c
+++ b/drivers/rtc/rtc-pcf2123.c
@@ -265,6 +265,7 @@ static int pcf2123_probe(struct spi_device *spi)
if (!(rxbuf[0] & 0x20)) {
dev_err(&spi->dev, "chip not found\n");
+ ret = -ENODEV;
goto kfree_exit;
}
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index cd798386b62..178836ec252 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -561,6 +561,8 @@ static void __init sclp_add_standby_memory(void)
add_memory_merged(0);
}
+#define MEM_SCT_SIZE (1UL << SECTION_SIZE_BITS)
+
static void __init insert_increment(u16 rn, int standby, int assigned)
{
struct memory_increment *incr, *new_incr;
@@ -573,7 +575,7 @@ static void __init insert_increment(u16 rn, int standby, int assigned)
new_incr->rn = rn;
new_incr->standby = standby;
if (!standby)
- new_incr->usecount = 1;
+ new_incr->usecount = rzm > MEM_SCT_SIZE ? rzm/MEM_SCT_SIZE : 1;
last_rn = 0;
prev = &sclp_mem_list;
list_for_each_entry(incr, &sclp_mem_list, list) {
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 2197b57fb22..7e64546bd98 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -4777,7 +4777,7 @@ static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
- if (!ioa_cfg->in_reset_reload) {
+ if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
dev_err(&ioa_cfg->pdev->dev,
"Adapter being reset as a result of error recovery.\n");
@@ -6739,6 +6739,7 @@ static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
{
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+ int i;
ENTER;
if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
@@ -6750,6 +6751,13 @@ static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
ioa_cfg->in_reset_reload = 0;
ioa_cfg->reset_retries = 0;
+ for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+ spin_lock(&ioa_cfg->hrrq[i]._lock);
+ ioa_cfg->hrrq[i].ioa_is_dead = 1;
+ spin_unlock(&ioa_cfg->hrrq[i]._lock);
+ }
+ wmb();
+
list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
wake_up_all(&ioa_cfg->reset_wait_q);
LEAVE;
@@ -8651,7 +8659,7 @@ static void ipr_pci_perm_failure(struct pci_dev *pdev)
spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
ioa_cfg->sdt_state = ABORT_DUMP;
- ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
+ ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
ioa_cfg->in_ioa_bringdown = 1;
for (i = 0; i < ioa_cfg->hrrq_num; i++) {
spin_lock(&ioa_cfg->hrrq[i]._lock);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 7992635d405..82910cc69ba 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -142,6 +142,7 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr,
char *buffer_data;
struct scsi_mode_data data;
struct scsi_sense_hdr sshdr;
+ const char *temp = "temporary ";
int len;
if (sdp->type != TYPE_DISK)
@@ -150,6 +151,13 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr,
* it's not worth the risk */
return -EINVAL;
+ if (strncmp(buf, temp, sizeof(temp) - 1) == 0) {
+ buf += sizeof(temp) - 1;
+ sdkp->cache_override = 1;
+ } else {
+ sdkp->cache_override = 0;
+ }
+
for (i = 0; i < ARRAY_SIZE(sd_cache_types); i++) {
len = strlen(sd_cache_types[i]);
if (strncmp(sd_cache_types[i], buf, len) == 0 &&
@@ -162,6 +170,13 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr,
return -EINVAL;
rcd = ct & 0x01 ? 1 : 0;
wce = ct & 0x02 ? 1 : 0;
+
+ if (sdkp->cache_override) {
+ sdkp->WCE = wce;
+ sdkp->RCD = rcd;
+ return count;
+ }
+
if (scsi_mode_sense(sdp, 0x08, 8, buffer, sizeof(buffer), SD_TIMEOUT,
SD_MAX_RETRIES, &data, NULL))
return -EINVAL;
@@ -2319,6 +2334,10 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
int old_rcd = sdkp->RCD;
int old_dpofua = sdkp->DPOFUA;
+
+ if (sdkp->cache_override)
+ return;
+
first_len = 4;
if (sdp->skip_ms_page_8) {
if (sdp->type == TYPE_RBC)
@@ -2812,6 +2831,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
sdkp->capacity = 0;
sdkp->media_present = 1;
sdkp->write_prot = 0;
+ sdkp->cache_override = 0;
sdkp->WCE = 0;
sdkp->RCD = 0;
sdkp->ATO = 0;
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 74a1e4ca540..2386aeb41fe 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -73,6 +73,7 @@ struct scsi_disk {
u8 protection_type;/* Data Integrity Field */
u8 provisioning_mode;
unsigned ATO : 1; /* state of disk ATO bit */
+ unsigned cache_override : 1; /* temp override of WCE,RCD */
unsigned WCE : 1; /* state of disk WCE bit */
unsigned RCD : 1; /* state of disk RCD bit, unused */
unsigned DPOFUA : 1; /* state of disk DPOFUA bit */
diff --git a/drivers/staging/imx-drm/ipu-v3/ipu-common.c b/drivers/staging/imx-drm/ipu-v3/ipu-common.c
index 366f259e375..6efe4e1b499 100644
--- a/drivers/staging/imx-drm/ipu-v3/ipu-common.c
+++ b/drivers/staging/imx-drm/ipu-v3/ipu-common.c
@@ -25,8 +25,8 @@
#include <linux/clk.h>
#include <linux/list.h>
#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/of_device.h>
-#include <asm/mach/irq.h>
#include "imx-ipu-v3.h"
#include "ipu-prv.h"
diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
index bc5e9da4758..a94e66f52b3 100644
--- a/drivers/staging/vt6656/hostap.c
+++ b/drivers/staging/vt6656/hostap.c
@@ -133,7 +133,7 @@ static int hostap_disable_hostapd(struct vnt_private *pDevice, int rtnl_locked)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Netdevice %s unregistered\n",
pDevice->dev->name, pDevice->apdev->name);
}
- kfree(pDevice->apdev);
+ free_netdev(pDevice->apdev);
pDevice->apdev = NULL;
pDevice->bEnable8021x = false;
pDevice->bEnableHostWEP = false;
diff --git a/drivers/staging/vt6656/iwctl.c b/drivers/staging/vt6656/iwctl.c
index 69971f35e49..60b50d00715 100644
--- a/drivers/staging/vt6656/iwctl.c
+++ b/drivers/staging/vt6656/iwctl.c
@@ -1348,9 +1348,12 @@ int iwctl_siwpower(struct net_device *dev, struct iw_request_info *info,
return rc;
}
+ spin_lock_irq(&pDevice->lock);
+
if (wrq->disabled) {
pDevice->ePSMode = WMAC_POWER_CAM;
PSvDisablePowerSaving(pDevice);
+ spin_unlock_irq(&pDevice->lock);
return rc;
}
if ((wrq->flags & IW_POWER_TYPE) == IW_POWER_TIMEOUT) {
@@ -1361,6 +1364,9 @@ int iwctl_siwpower(struct net_device *dev, struct iw_request_info *info,
pDevice->ePSMode = WMAC_POWER_FAST;
PSvEnablePowerSaving((void *)pDevice, pMgmt->wListenInterval);
}
+
+ spin_unlock_irq(&pDevice->lock);
+
switch (wrq->flags & IW_POWER_MODE) {
case IW_POWER_UNICAST_R:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWPOWER: IW_POWER_UNICAST_R \n");
diff --git a/drivers/staging/zsmalloc/Kconfig b/drivers/staging/zsmalloc/Kconfig
index 90845656579..7fab032298f 100644
--- a/drivers/staging/zsmalloc/Kconfig
+++ b/drivers/staging/zsmalloc/Kconfig
@@ -1,5 +1,5 @@
config ZSMALLOC
- tristate "Memory allocator for compressed pages"
+ bool "Memory allocator for compressed pages"
default n
help
zsmalloc is a slab-based memory allocator designed to store
diff --git a/drivers/staging/zsmalloc/zsmalloc-main.c b/drivers/staging/zsmalloc/zsmalloc-main.c
index e78d262c524..324e123335d 100644
--- a/drivers/staging/zsmalloc/zsmalloc-main.c
+++ b/drivers/staging/zsmalloc/zsmalloc-main.c
@@ -656,11 +656,8 @@ static inline void __zs_unmap_object(struct mapping_area *area,
struct page *pages[2], int off, int size)
{
unsigned long addr = (unsigned long)area->vm_addr;
- unsigned long end = addr + (PAGE_SIZE * 2);
- flush_cache_vunmap(addr, end);
- unmap_kernel_range_noflush(addr, PAGE_SIZE * 2);
- flush_tlb_kernel_range(addr, end);
+ unmap_kernel_range(addr, PAGE_SIZE * 2);
}
#else /* USE_PGTABLE_MAPPING */
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index 0b52a237130..805f3d26c4e 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -819,7 +819,7 @@ static int iscsit_attach_ooo_cmdsn(
/*
* CmdSN is greater than the tail of the list.
*/
- if (ooo_tail->cmdsn < ooo_cmdsn->cmdsn)
+ if (iscsi_sna_lt(ooo_tail->cmdsn, ooo_cmdsn->cmdsn))
list_add_tail(&ooo_cmdsn->ooo_list,
&sess->sess_ooo_cmdsn_list);
else {
@@ -829,11 +829,12 @@ static int iscsit_attach_ooo_cmdsn(
*/
list_for_each_entry(ooo_tmp, &sess->sess_ooo_cmdsn_list,
ooo_list) {
- if (ooo_tmp->cmdsn < ooo_cmdsn->cmdsn)
+ if (iscsi_sna_lt(ooo_tmp->cmdsn, ooo_cmdsn->cmdsn))
continue;
+ /* Insert before this entry */
list_add(&ooo_cmdsn->ooo_list,
- &ooo_tmp->ooo_list);
+ ooo_tmp->ooo_list.prev);
break;
}
}
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index ca2be406f14..93ae9103510 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -712,9 +712,9 @@ static int iscsi_add_notunderstood_response(
}
INIT_LIST_HEAD(&extra_response->er_list);
- strncpy(extra_response->key, key, strlen(key) + 1);
- strncpy(extra_response->value, NOTUNDERSTOOD,
- strlen(NOTUNDERSTOOD) + 1);
+ strlcpy(extra_response->key, key, sizeof(extra_response->key));
+ strlcpy(extra_response->value, NOTUNDERSTOOD,
+ sizeof(extra_response->value));
list_add_tail(&extra_response->er_list,
&param_list->extra_response_list);
@@ -1583,8 +1583,6 @@ int iscsi_decode_text_input(
if (phase & PHASE_SECURITY) {
if (iscsi_check_for_auth_key(key) > 0) {
- char *tmpptr = key + strlen(key);
- *tmpptr = '=';
kfree(tmpbuf);
return 1;
}
diff --git a/drivers/target/iscsi/iscsi_target_parameters.h b/drivers/target/iscsi/iscsi_target_parameters.h
index 1e1b7504a76..2c536a0c29e 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.h
+++ b/drivers/target/iscsi/iscsi_target_parameters.h
@@ -1,8 +1,10 @@
#ifndef ISCSI_PARAMETERS_H
#define ISCSI_PARAMETERS_H
+#include <scsi/iscsi_proto.h>
+
struct iscsi_extra_response {
- char key[64];
+ char key[KEY_MAXLEN];
char value[32];
struct list_head er_list;
} ____cacheline_aligned;
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 17a6acbc3ab..12191d825d7 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -148,13 +148,9 @@ static int fd_configure_device(struct se_device *dev)
*/
inode = file->f_mapping->host;
if (S_ISBLK(inode->i_mode)) {
- struct request_queue *q = bdev_get_queue(inode->i_bdev);
unsigned long long dev_size;
- dev->dev_attrib.hw_block_size =
- bdev_logical_block_size(inode->i_bdev);
- dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
-
+ fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev);
/*
* Determine the number of bytes from i_size_read() minus
* one (1) logical sector from underlying struct block_device
@@ -174,12 +170,11 @@ static int fd_configure_device(struct se_device *dev)
goto fail;
}
- dev->dev_attrib.hw_block_size = FD_BLOCKSIZE;
- dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS;
+ fd_dev->fd_block_size = FD_BLOCKSIZE;
}
- fd_dev->fd_block_size = dev->dev_attrib.hw_block_size;
-
+ dev->dev_attrib.hw_block_size = fd_dev->fd_block_size;
+ dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS;
dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
@@ -589,11 +584,12 @@ static sector_t fd_get_blocks(struct se_device *dev)
* to handle underlying block_device resize operations.
*/
if (S_ISBLK(i->i_mode))
- dev_size = (i_size_read(i) - fd_dev->fd_block_size);
+ dev_size = i_size_read(i);
else
dev_size = fd_dev->fd_dev_size;
- return div_u64(dev_size, dev->dev_attrib.block_size);
+ return div_u64(dev_size - dev->dev_attrib.block_size,
+ dev->dev_attrib.block_size);
}
static struct sbc_ops fd_sbc_ops = {
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 8bcc514ec8b..e1af9d54112 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -679,6 +679,8 @@ iblock_execute_rw(struct se_cmd *cmd)
rw = WRITE_FUA;
else if (!(q->flush_flags & REQ_FLUSH))
rw = WRITE_FUA;
+ else
+ rw = WRITE;
} else {
rw = WRITE;
}
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 3243ea790ea..fc9a5a07b69 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -222,6 +222,7 @@ struct se_session *transport_init_session(void)
INIT_LIST_HEAD(&se_sess->sess_list);
INIT_LIST_HEAD(&se_sess->sess_acl_list);
INIT_LIST_HEAD(&se_sess->sess_cmd_list);
+ INIT_LIST_HEAD(&se_sess->sess_wait_list);
spin_lock_init(&se_sess->sess_cmd_lock);
kref_init(&se_sess->sess_kref);
@@ -2213,21 +2214,19 @@ static void target_release_cmd_kref(struct kref *kref)
{
struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
struct se_session *se_sess = se_cmd->se_sess;
- unsigned long flags;
- spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
if (list_empty(&se_cmd->se_cmd_list)) {
- spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+ spin_unlock(&se_sess->sess_cmd_lock);
se_cmd->se_tfo->release_cmd(se_cmd);
return;
}
if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
- spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+ spin_unlock(&se_sess->sess_cmd_lock);
complete(&se_cmd->cmd_wait_comp);
return;
}
list_del(&se_cmd->se_cmd_list);
- spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+ spin_unlock(&se_sess->sess_cmd_lock);
se_cmd->se_tfo->release_cmd(se_cmd);
}
@@ -2238,7 +2237,8 @@ static void target_release_cmd_kref(struct kref *kref)
*/
int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
{
- return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref);
+ return kref_put_spinlock_irqsave(&se_cmd->cmd_kref, target_release_cmd_kref,
+ &se_sess->sess_cmd_lock);
}
EXPORT_SYMBOL(target_put_sess_cmd);
@@ -2253,11 +2253,14 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
unsigned long flags;
spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
-
- WARN_ON(se_sess->sess_tearing_down);
+ if (se_sess->sess_tearing_down) {
+ spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+ return;
+ }
se_sess->sess_tearing_down = 1;
+ list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
- list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list)
+ list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list)
se_cmd->cmd_wait_set = 1;
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
@@ -2274,9 +2277,10 @@ void target_wait_for_sess_cmds(
{
struct se_cmd *se_cmd, *tmp_cmd;
bool rc = false;
+ unsigned long flags;
list_for_each_entry_safe(se_cmd, tmp_cmd,
- &se_sess->sess_cmd_list, se_cmd_list) {
+ &se_sess->sess_wait_list, se_cmd_list) {
list_del(&se_cmd->se_cmd_list);
pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
@@ -2304,6 +2308,11 @@ void target_wait_for_sess_cmds(
se_cmd->se_tfo->release_cmd(se_cmd);
}
+
+ spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
+ WARN_ON(!list_empty(&se_sess->sess_cmd_list));
+ spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+
}
EXPORT_SYMBOL(target_wait_for_sess_cmds);
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 05e72bea9b0..1f8cba6dfa2 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -1588,6 +1588,14 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
ldata->real_raw = 0;
}
n_tty_set_room(tty);
+ /*
+ * Fix tty hang when I_IXON(tty) is cleared, but the tty
+ * been stopped by STOP_CHAR(tty) before it.
+ */
+ if (!I_IXON(tty) && old && (old->c_iflag & IXON) && !tty->flow_stopped) {
+ start_tty(tty);
+ }
+
/* The termios change make the tty ready for I/O */
wake_up_interruptible(&tty->write_wait);
wake_up_interruptible(&tty->read_wait);
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index c24b4db243b..125e0fd0674 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -682,6 +682,9 @@ static int ptmx_open(struct inode *inode, struct file *filp)
nonseekable_open(inode, filp);
+ /* We refuse fsnotify events on ptmx, since it's a shared resource */
+ filp->f_mode |= FMODE_NONOTIFY;
+
retval = tty_alloc_file(filp);
if (retval)
return retval;
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 381d815b990..88a5ee744bd 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -1944,6 +1944,8 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport)
mutex_unlock(&port->mutex);
return 0;
}
+ put_device(tty_dev);
+
if (console_suspend_enabled || !uart_console(uport))
uport->suspended = 1;
@@ -2009,9 +2011,11 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport)
disable_irq_wake(uport->irq);
uport->irq_wake = 0;
}
+ put_device(tty_dev);
mutex_unlock(&port->mutex);
return 0;
}
+ put_device(tty_dev);
uport->suspended = 0;
/*
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index b0452688308..a9cd0b9353d 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -941,10 +941,10 @@ void start_tty(struct tty_struct *tty)
EXPORT_SYMBOL(start_tty);
+/* We limit tty time update visibility to every 8 seconds or so. */
static void tty_update_time(struct timespec *time)
{
- unsigned long sec = get_seconds();
- sec -= sec % 60;
+ unsigned long sec = get_seconds() & ~7;
if ((long)(sec - time->tv_sec) > 0)
time->tv_sec = sec;
}
diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
index b7eb86ad6bf..8a7eb77233b 100644
--- a/drivers/usb/atm/cxacru.c
+++ b/drivers/usb/atm/cxacru.c
@@ -686,7 +686,8 @@ static int cxacru_cm_get_array(struct cxacru_data *instance, enum cxacru_cm_requ
{
int ret, len;
__le32 *buf;
- int offb, offd;
+ int offb;
+ unsigned int offd;
const int stride = CMD_PACKET_SIZE / (4 * 2) - 1;
int buflen = ((size - 1) / stride + 1 + size * 2) * 4;
diff --git a/drivers/usb/chipidea/Kconfig b/drivers/usb/chipidea/Kconfig
index 608a2aeb400..b2df442eb3e 100644
--- a/drivers/usb/chipidea/Kconfig
+++ b/drivers/usb/chipidea/Kconfig
@@ -20,7 +20,7 @@ config USB_CHIPIDEA_UDC
config USB_CHIPIDEA_HOST
bool "ChipIdea host controller"
depends on USB=y || USB=USB_CHIPIDEA
- depends on USB_EHCI_HCD
+ depends on USB_EHCI_HCD=y
select USB_EHCI_ROOT_HUB_TT
help
Say Y here to enable host controller functionality of the
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index f64fbea1cf2..d86333b8c43 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -461,6 +461,8 @@ static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
mReq->ptr->page[i] =
(mReq->req.dma + i * CI13XXX_PAGE_SIZE) & ~TD_RESERVED_MASK;
+ wmb();
+
if (!list_empty(&mEp->qh.queue)) {
struct ci13xxx_req *mReqPrev;
int n = hw_ep_bit(mEp->num, mEp->dir);
@@ -561,6 +563,12 @@ __acquires(mEp->lock)
struct ci13xxx_req *mReq = \
list_entry(mEp->qh.queue.next,
struct ci13xxx_req, queue);
+
+ if (mReq->zptr) {
+ dma_pool_free(mEp->td_pool, mReq->zptr, mReq->zdma);
+ mReq->zptr = NULL;
+ }
+
list_del_init(&mReq->queue);
mReq->req.status = -ESHUTDOWN;
diff --git a/drivers/usb/chipidea/udc.h b/drivers/usb/chipidea/udc.h
index 4ff2384d7ca..d12e8b59b11 100644
--- a/drivers/usb/chipidea/udc.h
+++ b/drivers/usb/chipidea/udc.h
@@ -40,7 +40,7 @@ struct ci13xxx_td {
#define TD_CURR_OFFSET (0x0FFFUL << 0)
#define TD_FRAME_NUM (0x07FFUL << 0)
#define TD_RESERVED_MASK (0x0FFFUL << 0)
-} __attribute__ ((packed));
+} __attribute__ ((packed, aligned(4)));
/* DMA layout of queue heads */
struct ci13xxx_qh {
@@ -57,7 +57,7 @@ struct ci13xxx_qh {
/* 9 */
u32 RESERVED;
struct usb_ctrlrequest setup;
-} __attribute__ ((packed));
+} __attribute__ ((packed, aligned(4)));
/**
* struct ci13xxx_req - usb request representation
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 8823e98989f..caefc800f29 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -739,6 +739,8 @@ static int check_ctrlrecip(struct dev_state *ps, unsigned int requesttype,
index &= 0xff;
switch (requesttype & USB_RECIP_MASK) {
case USB_RECIP_ENDPOINT:
+ if ((index & ~USB_DIR_IN) == 0)
+ return 0;
ret = findintfep(ps->dev, index);
if (ret >= 0)
ret = checkintf(ps, ret);
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 3113c1d7144..e14346a4e64 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -88,6 +88,9 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Edirol SD-20 */
{ USB_DEVICE(0x0582, 0x0027), .driver_info = USB_QUIRK_RESET_RESUME },
+ /* Alcor Micro Corp. Hub */
+ { USB_DEVICE(0x058f, 0x9254), .driver_info = USB_QUIRK_RESET_RESUME },
+
/* appletouch */
{ USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME },
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index e8d77689a32..1e04b791ef1 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -196,9 +196,9 @@ static void dwc3_pci_remove(struct pci_dev *pci)
{
struct dwc3_pci *glue = pci_get_drvdata(pci);
+ platform_device_unregister(glue->dwc3);
platform_device_unregister(glue->usb2_phy);
platform_device_unregister(glue->usb3_phy);
- platform_device_unregister(glue->dwc3);
pci_set_drvdata(pci, NULL);
pci_disable_device(pci);
}
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 82e160e96fc..bab96305493 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1637,10 +1637,20 @@ static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
dep = dwc->eps[epnum];
- dwc3_free_trb_pool(dep);
- if (epnum != 0 && epnum != 1)
+ /*
+ * Physical endpoints 0 and 1 are special; they form the
+ * bi-directional USB endpoint 0.
+ *
+ * For those two physical endpoints, we don't allocate a TRB
+ * pool nor do we add them the endpoints list. Due to that, we
+ * shouldn't do these two operations otherwise we would end up
+ * with all sorts of bugs when removing dwc3.ko.
+ */
+ if (epnum != 0 && epnum != 1) {
+ dwc3_free_trb_pool(dep);
list_del(&dep->endpoint.ep_list);
+ }
kfree(dep);
}
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 416a6dce5e1..83b5a172592 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -670,9 +670,6 @@ int ehci_setup(struct usb_hcd *hcd)
if (retval)
return retval;
- if (ehci_is_TDI(ehci))
- tdi_reset(ehci);
-
ehci_reset(ehci);
return 0;
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index 010f686d888..5a00ef3a7bb 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -213,7 +213,7 @@ static inline unsigned char tt_start_uframe(struct ehci_hcd *ehci, __hc32 mask)
}
static const unsigned char
-max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 125, 25 };
+max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 30, 0 };
/* carryover low/fullspeed bandwidth that crosses uframe boundries */
static inline void carryover_tt_bandwidth(unsigned short tt_usecs[8])
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 180a2b01db5..007137fe14d 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -233,14 +233,14 @@ static int ohci_urb_enqueue (
urb->start_frame = frame;
}
} else if (ed->type == PIPE_ISOCHRONOUS) {
- u16 next = ohci_frame_no(ohci) + 2;
+ u16 next = ohci_frame_no(ohci) + 1;
u16 frame = ed->last_iso + ed->interval;
/* Behind the scheduling threshold? */
if (unlikely(tick_before(frame, next))) {
/* USB_ISO_ASAP: Round up to the first available slot */
- if (urb->transfer_flags & URB_ISO_ASAP)
+ if (urb->transfer_flags & URB_ISO_ASAP) {
frame += (next - frame + ed->interval - 1) &
-ed->interval;
@@ -248,21 +248,25 @@ static int ohci_urb_enqueue (
* Not ASAP: Use the next slot in the stream. If
* the entire URB falls before the threshold, fail.
*/
- else if (tick_before(frame + ed->interval *
+ } else {
+ if (tick_before(frame + ed->interval *
(urb->number_of_packets - 1), next)) {
- retval = -EXDEV;
- usb_hcd_unlink_urb_from_ep(hcd, urb);
- goto fail;
- }
+ retval = -EXDEV;
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
+ goto fail;
+ }
- /*
- * Some OHCI hardware doesn't handle late TDs
- * correctly. After retiring them it proceeds to
- * the next ED instead of the next TD. Therefore
- * we have to omit the late TDs entirely.
- */
- urb_priv->td_cnt = DIV_ROUND_UP(next - frame,
- ed->interval);
+ /*
+ * Some OHCI hardware doesn't handle late TDs
+ * correctly. After retiring them it proceeds
+ * to the next ED instead of the next TD.
+ * Therefore we have to omit the late TDs
+ * entirely.
+ */
+ urb_priv->td_cnt = DIV_ROUND_UP(
+ (u16) (next - frame),
+ ed->interval);
+ }
}
urb->start_frame = frame;
}
diff --git a/drivers/usb/host/uhci-hub.c b/drivers/usb/host/uhci-hub.c
index f87bee6d278..9189bc984c9 100644
--- a/drivers/usb/host/uhci-hub.c
+++ b/drivers/usb/host/uhci-hub.c
@@ -225,7 +225,8 @@ static int uhci_hub_status_data(struct usb_hcd *hcd, char *buf)
/* auto-stop if nothing connected for 1 second */
if (any_ports_active(uhci))
uhci->rh_state = UHCI_RH_RUNNING;
- else if (time_after_eq(jiffies, uhci->auto_stop_time))
+ else if (time_after_eq(jiffies, uhci->auto_stop_time) &&
+ !uhci->wait_for_hp)
suspend_rh(uhci, UHCI_RH_AUTO_STOPPED);
break;
diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c
index f0976d8190b..041c6ddb695 100644
--- a/drivers/usb/host/uhci-q.c
+++ b/drivers/usb/host/uhci-q.c
@@ -1287,7 +1287,7 @@ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb,
return -EINVAL; /* Can't change the period */
} else {
- next = uhci->frame_number + 2;
+ next = uhci->frame_number + 1;
/* Find the next unused frame */
if (list_empty(&qh->queue)) {
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 6dc238c592b..905f38c31a4 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1423,15 +1423,17 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
ep_ctx->ep_info2 |= cpu_to_le32(xhci_get_endpoint_type(udev, ep));
/* Set the max packet size and max burst */
+ max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
+ max_burst = 0;
switch (udev->speed) {
case USB_SPEED_SUPER:
- max_packet = usb_endpoint_maxp(&ep->desc);
- ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet));
/* dig out max burst from ep companion desc */
- max_packet = ep->ss_ep_comp.bMaxBurst;
- ep_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(max_packet));
+ max_burst = ep->ss_ep_comp.bMaxBurst;
break;
case USB_SPEED_HIGH:
+ /* Some devices get this wrong */
+ if (usb_endpoint_xfer_bulk(&ep->desc))
+ max_packet = 512;
/* bits 11:12 specify the number of additional transaction
* opportunities per microframe (USB 2.0, section 9.6.6)
*/
@@ -1439,17 +1441,16 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
usb_endpoint_xfer_int(&ep->desc)) {
max_burst = (usb_endpoint_maxp(&ep->desc)
& 0x1800) >> 11;
- ep_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(max_burst));
}
- /* Fall through */
+ break;
case USB_SPEED_FULL:
case USB_SPEED_LOW:
- max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
- ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet));
break;
default:
BUG();
}
+ ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet) |
+ MAX_BURST(max_burst));
max_esit_payload = xhci_get_max_esit_payload(xhci, udev, ep);
ep_ctx->tx_info = cpu_to_le32(MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload));
@@ -1826,6 +1827,9 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
}
spin_unlock_irqrestore(&xhci->lock, flags);
+ if (!xhci->rh_bw)
+ goto no_bw;
+
num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
for (i = 0; i < num_ports; i++) {
struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
@@ -1844,6 +1848,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
}
}
+no_bw:
xhci->num_usb2_ports = 0;
xhci->num_usb3_ports = 0;
xhci->num_active_eps = 0;
@@ -2255,6 +2260,9 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
u32 page_size, temp;
int i;
+ INIT_LIST_HEAD(&xhci->lpm_failed_devs);
+ INIT_LIST_HEAD(&xhci->cancel_cmd_list);
+
page_size = xhci_readl(xhci, &xhci->op_regs->page_size);
xhci_dbg(xhci, "Supported page size register = 0x%x\n", page_size);
for (i = 0; i < 16; i++) {
@@ -2333,7 +2341,6 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, flags);
if (!xhci->cmd_ring)
goto fail;
- INIT_LIST_HEAD(&xhci->cancel_cmd_list);
xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring);
xhci_dbg(xhci, "First segment DMA is 0x%llx\n",
(unsigned long long)xhci->cmd_ring->first_seg->dma);
@@ -2444,8 +2451,6 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
if (xhci_setup_port_arrays(xhci, flags))
goto fail;
- INIT_LIST_HEAD(&xhci->lpm_failed_devs);
-
/* Enable USB 3.0 device notifications for function remote wake, which
* is necessary for allowing USB 3.0 devices to do remote wakeup from
* U3 (device suspend).
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 1a30c380043..cc24e39b97d 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -221,6 +221,14 @@ static void xhci_pci_remove(struct pci_dev *dev)
static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
+
+ /*
+ * Systems with the TI redriver that loses port status change events
+ * need to have the registers polled during D3, so avoid D3cold.
+ */
+ if (xhci_compliance_mode_recovery_timer_quirk_check())
+ pdev->no_d3cold = true;
return xhci_suspend(xhci);
}
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 53b8f89a0b1..82b08091be6 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -466,7 +466,7 @@ static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci)
* Systems:
* Vendor: Hewlett-Packard -> System Models: Z420, Z620 and Z820
*/
-static bool compliance_mode_recovery_timer_quirk_check(void)
+bool xhci_compliance_mode_recovery_timer_quirk_check(void)
{
const char *dmi_product_name, *dmi_sys_vendor;
@@ -517,7 +517,7 @@ int xhci_init(struct usb_hcd *hcd)
xhci_dbg(xhci, "Finished xhci_init\n");
/* Initializing Compliance Mode Recovery Data If Needed */
- if (compliance_mode_recovery_timer_quirk_check()) {
+ if (xhci_compliance_mode_recovery_timer_quirk_check()) {
xhci->quirks |= XHCI_COMP_MODE_QUIRK;
compliance_mode_recovery_timer_init(xhci);
}
@@ -952,6 +952,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
struct usb_hcd *hcd = xhci_to_hcd(xhci);
struct usb_hcd *secondary_hcd;
int retval = 0;
+ bool comp_timer_running = false;
/* Wait a bit if either of the roothubs need to settle from the
* transition into bus suspend.
@@ -989,6 +990,13 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
/* If restore operation fails, re-initialize the HC during resume */
if ((temp & STS_SRE) || hibernated) {
+
+ if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
+ !(xhci_all_ports_seen_u0(xhci))) {
+ del_timer_sync(&xhci->comp_mode_recovery_timer);
+ xhci_dbg(xhci, "Compliance Mode Recovery Timer deleted!\n");
+ }
+
/* Let the USB core know _both_ roothubs lost power. */
usb_root_hub_lost_power(xhci->main_hcd->self.root_hub);
usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub);
@@ -1031,6 +1039,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
retval = xhci_init(hcd->primary_hcd);
if (retval)
return retval;
+ comp_timer_running = true;
+
xhci_dbg(xhci, "Start the primary HCD\n");
retval = xhci_run(hcd->primary_hcd);
if (!retval) {
@@ -1072,7 +1082,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
* to suffer the Compliance Mode issue again. It doesn't matter if
* ports have entered previously to U0 before system's suspension.
*/
- if (xhci->quirks & XHCI_COMP_MODE_QUIRK)
+ if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running)
compliance_mode_recovery_timer_init(xhci);
/* Re-enable port polling. */
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 63582719e0f..bde9f22ccd8 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1853,4 +1853,7 @@ struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci,
struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx);
struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int ep_index);
+/* xHCI quirks */
+bool xhci_compliance_mode_recovery_timer_quirk_check(void);
+
#endif /* __LINUX_XHCI_HCD_H */
diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
index 0fc6e5fc745..ba6a5d6e618 100644
--- a/drivers/usb/misc/appledisplay.c
+++ b/drivers/usb/misc/appledisplay.c
@@ -63,6 +63,7 @@ static const struct usb_device_id appledisplay_table[] = {
{ APPLEDISPLAY_DEVICE(0x9219) },
{ APPLEDISPLAY_DEVICE(0x921c) },
{ APPLEDISPLAY_DEVICE(0x921d) },
+ { APPLEDISPLAY_DEVICE(0x9236) },
/* Terminating entry */
{ }
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 1ce1fcf3f3e..6f26fd8a4cc 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -1232,7 +1232,6 @@ void musb_host_tx(struct musb *musb, u8 epnum)
void __iomem *mbase = musb->mregs;
struct dma_channel *dma;
bool transfer_pending = false;
- static bool use_sg;
musb_ep_select(mbase, epnum);
tx_csr = musb_readw(epio, MUSB_TXCSR);
@@ -1463,9 +1462,9 @@ done:
* NULL.
*/
if (!urb->transfer_buffer)
- use_sg = true;
+ qh->use_sg = true;
- if (use_sg) {
+ if (qh->use_sg) {
/* sg_miter_start is already done in musb_ep_program */
if (!sg_miter_next(&qh->sg_miter)) {
dev_err(musb->controller, "error: sg list empty\n");
@@ -1484,9 +1483,9 @@ done:
qh->segsize = length;
- if (use_sg) {
+ if (qh->use_sg) {
if (offset + length >= urb->transfer_buffer_length)
- use_sg = false;
+ qh->use_sg = false;
}
musb_ep_select(mbase, epnum);
@@ -1552,7 +1551,6 @@ void musb_host_rx(struct musb *musb, u8 epnum)
bool done = false;
u32 status;
struct dma_channel *dma;
- static bool use_sg;
unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG;
musb_ep_select(mbase, epnum);
@@ -1878,12 +1876,12 @@ void musb_host_rx(struct musb *musb, u8 epnum)
* NULL.
*/
if (!urb->transfer_buffer) {
- use_sg = true;
+ qh->use_sg = true;
sg_miter_start(&qh->sg_miter, urb->sg, 1,
sg_flags);
}
- if (use_sg) {
+ if (qh->use_sg) {
if (!sg_miter_next(&qh->sg_miter)) {
dev_err(musb->controller, "error: sg list empty\n");
sg_miter_stop(&qh->sg_miter);
@@ -1913,8 +1911,8 @@ finish:
urb->actual_length += xfer_len;
qh->offset += xfer_len;
if (done) {
- if (use_sg)
- use_sg = false;
+ if (qh->use_sg)
+ qh->use_sg = false;
if (urb->status == -EINPROGRESS)
urb->status = status;
diff --git a/drivers/usb/musb/musb_host.h b/drivers/usb/musb/musb_host.h
index 5a9c8feec10..738f7eb60df 100644
--- a/drivers/usb/musb/musb_host.h
+++ b/drivers/usb/musb/musb_host.h
@@ -74,6 +74,7 @@ struct musb_qh {
u16 frame; /* for periodic schedule */
unsigned iso_idx; /* in urb->iso_frame_desc[] */
struct sg_mapping_iter sg_miter; /* for highmem in PIO mode */
+ bool use_sg; /* to track urb using sglist */
};
/* map from control or bulk queue head to the first qh on that ring */
diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c
index 4775f8209e5..513d45f9438 100644
--- a/drivers/usb/serial/ark3116.c
+++ b/drivers/usb/serial/ark3116.c
@@ -43,7 +43,7 @@
#define DRIVER_NAME "ark3116"
/* usb timeout of 1 second */
-#define ARK_TIMEOUT (1*HZ)
+#define ARK_TIMEOUT 1000
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x6547, 0x0232) },
diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
index ba7352e4187..9b145d335c6 100644
--- a/drivers/usb/serial/cypress_m8.c
+++ b/drivers/usb/serial/cypress_m8.c
@@ -65,6 +65,7 @@ static const struct usb_device_id id_table_earthmate[] = {
static const struct usb_device_id id_table_cyphidcomrs232[] = {
{ USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) },
{ USB_DEVICE(VENDOR_ID_POWERCOM, PRODUCT_ID_UPS) },
+ { USB_DEVICE(VENDOR_ID_FRWD, PRODUCT_ID_CYPHIDCOM_FRWD) },
{ } /* Terminating entry */
};
@@ -78,6 +79,7 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB_LT20) },
{ USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) },
{ USB_DEVICE(VENDOR_ID_POWERCOM, PRODUCT_ID_UPS) },
+ { USB_DEVICE(VENDOR_ID_FRWD, PRODUCT_ID_CYPHIDCOM_FRWD) },
{ USB_DEVICE(VENDOR_ID_DAZZLE, PRODUCT_ID_CA42) },
{ } /* Terminating entry */
};
@@ -230,6 +232,12 @@ static struct usb_serial_driver * const serial_drivers[] = {
* Cypress serial helper functions
*****************************************************************************/
+/* FRWD Dongle hidcom needs to skip reset and speed checks */
+static inline bool is_frwd(struct usb_device *dev)
+{
+ return ((le16_to_cpu(dev->descriptor.idVendor) == VENDOR_ID_FRWD) &&
+ (le16_to_cpu(dev->descriptor.idProduct) == PRODUCT_ID_CYPHIDCOM_FRWD));
+}
static int analyze_baud_rate(struct usb_serial_port *port, speed_t new_rate)
{
@@ -239,6 +247,10 @@ static int analyze_baud_rate(struct usb_serial_port *port, speed_t new_rate)
if (unstable_bauds)
return new_rate;
+ /* FRWD Dongle uses 115200 bps */
+ if (is_frwd(port->serial->dev))
+ return new_rate;
+
/*
* The general purpose firmware for the Cypress M8 allows for
* a maximum speed of 57600bps (I have no idea whether DeLorme
@@ -449,7 +461,11 @@ static int cypress_generic_port_probe(struct usb_serial_port *port)
return -ENOMEM;
}
- usb_reset_configuration(serial->dev);
+ /* Skip reset for FRWD device. It is a workaound:
+ device hangs if it receives SET_CONFIGURE in Configured
+ state. */
+ if (!is_frwd(serial->dev))
+ usb_reset_configuration(serial->dev);
priv->cmd_ctrl = 0;
priv->line_control = 0;
diff --git a/drivers/usb/serial/cypress_m8.h b/drivers/usb/serial/cypress_m8.h
index 67cf6082688..b461311a2ae 100644
--- a/drivers/usb/serial/cypress_m8.h
+++ b/drivers/usb/serial/cypress_m8.h
@@ -24,6 +24,10 @@
#define VENDOR_ID_CYPRESS 0x04b4
#define PRODUCT_ID_CYPHIDCOM 0x5500
+/* FRWD Dongle - a GPS sports watch */
+#define VENDOR_ID_FRWD 0x6737
+#define PRODUCT_ID_CYPHIDCOM_FRWD 0x0001
+
/* Powercom UPS, chip CY7C63723 */
#define VENDOR_ID_POWERCOM 0x0d9f
#define PRODUCT_ID_UPS 0x0002
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 9886180e45f..e8e28379c8b 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -189,7 +189,10 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(FTDI_VID, FTDI_OPENDCC_THROTTLE_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GATEWAY_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GBM_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GBM_BOOST_PID) },
{ USB_DEVICE(NEWPORT_VID, NEWPORT_AGILIS_PID) },
+ { USB_DEVICE(NEWPORT_VID, NEWPORT_CONEX_CC_PID) },
+ { USB_DEVICE(NEWPORT_VID, NEWPORT_CONEX_AGP_PID) },
{ USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) },
{ USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SPROG_II) },
@@ -870,7 +873,9 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(FTDI_VID, FTDI_DOTEC_PID) },
{ USB_DEVICE(QIHARDWARE_VID, MILKYMISTONE_JTAGSERIAL_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
- { USB_DEVICE(ST_VID, ST_STMCLT1030_PID),
+ { USB_DEVICE(ST_VID, ST_STMCLT_2232_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ { USB_DEVICE(ST_VID, ST_STMCLT_4232_PID),
.driver_info = (kernel_ulong_t)&ftdi_stmclite_quirk },
{ USB_DEVICE(FTDI_VID, FTDI_RF_R106) },
{ USB_DEVICE(FTDI_VID, FTDI_DISTORTEC_JTAG_LOCK_PICK_PID),
@@ -926,8 +931,8 @@ static int ftdi_get_icount(struct tty_struct *tty,
static int ftdi_ioctl(struct tty_struct *tty,
unsigned int cmd, unsigned long arg);
static void ftdi_break_ctl(struct tty_struct *tty, int break_state);
-static int ftdi_chars_in_buffer(struct tty_struct *tty);
-static int ftdi_get_modem_status(struct tty_struct *tty,
+static bool ftdi_tx_empty(struct usb_serial_port *port);
+static int ftdi_get_modem_status(struct usb_serial_port *port,
unsigned char status[2]);
static unsigned short int ftdi_232am_baud_base_to_divisor(int baud, int base);
@@ -963,7 +968,7 @@ static struct usb_serial_driver ftdi_sio_device = {
.ioctl = ftdi_ioctl,
.set_termios = ftdi_set_termios,
.break_ctl = ftdi_break_ctl,
- .chars_in_buffer = ftdi_chars_in_buffer,
+ .tx_empty = ftdi_tx_empty,
};
static struct usb_serial_driver * const serial_drivers[] = {
@@ -1792,20 +1797,24 @@ static int ftdi_8u2232c_probe(struct usb_serial *serial)
}
/*
- * First and second port on STMCLiteadaptors is reserved for JTAG interface
- * and the forth port for pio
+ * First two ports on JTAG adaptors using an FT4232 such as STMicroelectronics's
+ * ST Micro Connect Lite are reserved for JTAG or other non-UART interfaces and
+ * can be accessed from userspace.
+ * The next two ports are enabled as UARTs by default, where port 2 is
+ * a conventional RS-232 UART.
*/
static int ftdi_stmclite_probe(struct usb_serial *serial)
{
struct usb_device *udev = serial->dev;
struct usb_interface *interface = serial->interface;
- if (interface == udev->actconfig->interface[2])
- return 0;
-
- dev_info(&udev->dev, "Ignoring serial port reserved for JTAG\n");
+ if (interface == udev->actconfig->interface[0] ||
+ interface == udev->actconfig->interface[1]) {
+ dev_info(&udev->dev, "Ignoring serial port reserved for JTAG\n");
+ return -ENODEV;
+ }
- return -ENODEV;
+ return 0;
}
/*
@@ -2083,27 +2092,18 @@ static void ftdi_break_ctl(struct tty_struct *tty, int break_state)
}
-static int ftdi_chars_in_buffer(struct tty_struct *tty)
+static bool ftdi_tx_empty(struct usb_serial_port *port)
{
- struct usb_serial_port *port = tty->driver_data;
- int chars;
unsigned char buf[2];
int ret;
- chars = usb_serial_generic_chars_in_buffer(tty);
- if (chars)
- goto out;
-
- /* Check if hardware buffer is empty. */
- ret = ftdi_get_modem_status(tty, buf);
+ ret = ftdi_get_modem_status(port, buf);
if (ret == 2) {
if (!(buf[1] & FTDI_RS_TEMT))
- chars = 1;
+ return false;
}
-out:
- dev_dbg(&port->dev, "%s - %d\n", __func__, chars);
- return chars;
+ return true;
}
/* old_termios contains the original termios settings and tty->termios contains
@@ -2295,10 +2295,9 @@ no_c_cflag_changes:
* Returns the number of status bytes retrieved (device dependant), or
* negative error code.
*/
-static int ftdi_get_modem_status(struct tty_struct *tty,
+static int ftdi_get_modem_status(struct usb_serial_port *port,
unsigned char status[2])
{
- struct usb_serial_port *port = tty->driver_data;
struct ftdi_private *priv = usb_get_serial_port_data(port);
unsigned char *buf;
int len;
@@ -2363,7 +2362,7 @@ static int ftdi_tiocmget(struct tty_struct *tty)
unsigned char buf[2];
int ret;
- ret = ftdi_get_modem_status(tty, buf);
+ ret = ftdi_get_modem_status(port, buf);
if (ret < 0)
return ret;
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index e79861eeed4..6dd79253205 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -74,6 +74,7 @@
#define FTDI_OPENDCC_THROTTLE_PID 0xBFDA
#define FTDI_OPENDCC_GATEWAY_PID 0xBFDB
#define FTDI_OPENDCC_GBM_PID 0xBFDC
+#define FTDI_OPENDCC_GBM_BOOST_PID 0xBFDD
/* NZR SEM 16+ USB (http://www.nzr.de) */
#define FTDI_NZR_SEM_USB_PID 0xC1E0 /* NZR SEM-LOG16+ */
@@ -771,6 +772,8 @@
*/
#define NEWPORT_VID 0x104D
#define NEWPORT_AGILIS_PID 0x3000
+#define NEWPORT_CONEX_CC_PID 0x3002
+#define NEWPORT_CONEX_AGP_PID 0x3006
/* Interbiometrics USB I/O Board */
/* Developed for Interbiometrics by Rudolf Gugler */
@@ -1150,7 +1153,8 @@
* STMicroelectonics
*/
#define ST_VID 0x0483
-#define ST_STMCLT1030_PID 0x3747 /* ST Micro Connect Lite STMCLT1030 */
+#define ST_STMCLT_2232_PID 0x3746
+#define ST_STMCLT_4232_PID 0x3747
/*
* Papouch products (http://www.papouch.com/)
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index 4c5c23f1cae..7f4342b0087 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -264,6 +264,37 @@ int usb_serial_generic_chars_in_buffer(struct tty_struct *tty)
}
EXPORT_SYMBOL_GPL(usb_serial_generic_chars_in_buffer);
+void usb_serial_generic_wait_until_sent(struct tty_struct *tty, long timeout)
+{
+ struct usb_serial_port *port = tty->driver_data;
+ unsigned int bps;
+ unsigned long period;
+ unsigned long expire;
+
+ bps = tty_get_baud_rate(tty);
+ if (!bps)
+ bps = 9600; /* B0 */
+ /*
+ * Use a poll-period of roughly the time it takes to send one
+ * character or at least one jiffy.
+ */
+ period = max_t(unsigned long, (10 * HZ / bps), 1);
+ period = min_t(unsigned long, period, timeout);
+
+ dev_dbg(&port->dev, "%s - timeout = %u ms, period = %u ms\n",
+ __func__, jiffies_to_msecs(timeout),
+ jiffies_to_msecs(period));
+ expire = jiffies + timeout;
+ while (!port->serial->type->tx_empty(port)) {
+ schedule_timeout_interruptible(period);
+ if (signal_pending(current))
+ break;
+ if (time_after(jiffies, expire))
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(usb_serial_generic_wait_until_sent);
+
static int usb_serial_generic_submit_read_urb(struct usb_serial_port *port,
int index, gfp_t mem_flags)
{
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index 7777172206d..f26decda9f2 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -2033,8 +2033,6 @@ static int edge_chars_in_buffer(struct tty_struct *tty)
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
int chars = 0;
unsigned long flags;
- int ret;
-
if (edge_port == NULL)
return 0;
@@ -2042,16 +2040,22 @@ static int edge_chars_in_buffer(struct tty_struct *tty)
chars = kfifo_len(&edge_port->write_fifo);
spin_unlock_irqrestore(&edge_port->ep_lock, flags);
- if (!chars) {
- ret = tx_active(edge_port);
- if (ret > 0)
- chars = ret;
- }
-
dev_dbg(&port->dev, "%s - returns %d\n", __func__, chars);
return chars;
}
+static bool edge_tx_empty(struct usb_serial_port *port)
+{
+ struct edgeport_port *edge_port = usb_get_serial_port_data(port);
+ int ret;
+
+ ret = tx_active(edge_port);
+ if (ret > 0)
+ return false;
+
+ return true;
+}
+
static void edge_throttle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
@@ -2392,7 +2396,7 @@ static int get_serial_info(struct edgeport_port *edge_port,
cwait = edge_port->port->port.closing_wait;
if (cwait != ASYNC_CLOSING_WAIT_NONE)
- cwait = jiffies_to_msecs(closing_wait) / 10;
+ cwait = jiffies_to_msecs(cwait) / 10;
memset(&tmp, 0, sizeof(tmp));
@@ -2622,6 +2626,7 @@ static struct usb_serial_driver edgeport_1port_device = {
.write = edge_write,
.write_room = edge_write_room,
.chars_in_buffer = edge_chars_in_buffer,
+ .tx_empty = edge_tx_empty,
.break_ctl = edge_break,
.read_int_callback = edge_interrupt_callback,
.read_bulk_callback = edge_bulk_in_callback,
@@ -2653,6 +2658,7 @@ static struct usb_serial_driver edgeport_2port_device = {
.write = edge_write,
.write_room = edge_write_room,
.chars_in_buffer = edge_chars_in_buffer,
+ .tx_empty = edge_tx_empty,
.break_ctl = edge_break,
.read_int_callback = edge_interrupt_callback,
.read_bulk_callback = edge_bulk_in_callback,
diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
index ff77027160a..5687e2652b5 100644
--- a/drivers/usb/serial/iuu_phoenix.c
+++ b/drivers/usb/serial/iuu_phoenix.c
@@ -289,7 +289,7 @@ static int bulk_immediate(struct usb_serial_port *port, u8 *buf, u8 count)
usb_bulk_msg(serial->dev,
usb_sndbulkpipe(serial->dev,
port->bulk_out_endpointAddress), buf,
- count, &actual, HZ * 1);
+ count, &actual, 1000);
if (status != IUU_OPERATION_OK)
dev_dbg(&port->dev, "%s - error = %2x\n", __func__, status);
@@ -309,7 +309,7 @@ static int read_immediate(struct usb_serial_port *port, u8 *buf, u8 count)
usb_bulk_msg(serial->dev,
usb_rcvbulkpipe(serial->dev,
port->bulk_in_endpointAddress), buf,
- count, &actual, HZ * 1);
+ count, &actual, 1000);
if (status != IUU_OPERATION_OK)
dev_dbg(&port->dev, "%s - error = %2x\n", __func__, status);
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index 1fd1935c831..34ed26e9d8c 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -1594,7 +1594,7 @@ static int keyspan_usa26_send_setup(struct usb_serial *serial,
d_details = s_priv->device_details;
device_port = port->number - port->serial->minor;
- outcont_urb = d_details->outcont_endpoints[port->number];
+ outcont_urb = d_details->outcont_endpoints[device_port];
this_urb = p_priv->outcont_urb;
dev_dbg(&port->dev, "%s - endpoint %d\n", __func__, usb_pipeendpoint(this_urb->pipe));
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index e0ebec3b5d6..09236891ffe 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -40,7 +40,7 @@
#define DRIVER_DESC "Moschip USB Serial Driver"
/* default urb timeout */
-#define MOS_WDR_TIMEOUT (HZ * 5)
+#define MOS_WDR_TIMEOUT 5000
#define MOS_MAX_PORT 0x02
#define MOS_WRITE 0x0E
@@ -228,11 +228,22 @@ static int read_mos_reg(struct usb_serial *serial, unsigned int serial_portnum,
__u8 requesttype = (__u8)0xc0;
__u16 index = get_reg_index(reg);
__u16 value = get_reg_value(reg, serial_portnum);
- int status = usb_control_msg(usbdev, pipe, request, requesttype, value,
- index, data, 1, MOS_WDR_TIMEOUT);
- if (status < 0)
+ u8 *buf;
+ int status;
+
+ buf = kmalloc(1, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ status = usb_control_msg(usbdev, pipe, request, requesttype, value,
+ index, buf, 1, MOS_WDR_TIMEOUT);
+ if (status == 1)
+ *data = *buf;
+ else if (status < 0)
dev_err(&usbdev->dev,
"mos7720: usb_control_msg() failed: %d", status);
+ kfree(buf);
+
return status;
}
@@ -1633,7 +1644,7 @@ static void change_port_settings(struct tty_struct *tty,
mos7720_port->shadowMCR |= (UART_MCR_XONANY);
/* To set hardware flow control to the specified *
* serial port, in SP1/2_CONTROL_REG */
- if (port->number)
+ if (port_number)
write_mos_reg(serial, dummy, SP_CONTROL_REG, 0x01);
else
write_mos_reg(serial, dummy, SP_CONTROL_REG, 0x02);
@@ -1992,7 +2003,7 @@ static int mos7720_startup(struct usb_serial *serial)
/* setting configuration feature to one */
usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
- (__u8)0x03, 0x00, 0x01, 0x00, NULL, 0x00, 5*HZ);
+ (__u8)0x03, 0x00, 0x01, 0x00, NULL, 0x00, 5000);
/* start the interrupt urb */
ret_val = usb_submit_urb(serial->port[0]->interrupt_in_urb, GFP_KERNEL);
@@ -2035,7 +2046,7 @@ static void mos7720_release(struct usb_serial *serial)
/* wait for synchronous usb calls to return */
if (mos_parport->msg_pending)
wait_for_completion_timeout(&mos_parport->syncmsg_compl,
- MOS_WDR_TIMEOUT);
+ msecs_to_jiffies(MOS_WDR_TIMEOUT));
parport_remove_port(mos_parport->pp);
usb_set_serial_data(serial, NULL);
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index b8051fa6191..2c14a31d234 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -2255,13 +2255,21 @@ static int mos7840_ioctl(struct tty_struct *tty,
static int mos7810_check(struct usb_serial *serial)
{
int i, pass_count = 0;
+ u8 *buf;
__u16 data = 0, mcr_data = 0;
__u16 test_pattern = 0x55AA;
+ int res;
+
+ buf = kmalloc(VENDOR_READ_LENGTH, GFP_KERNEL);
+ if (!buf)
+ return 0; /* failed to identify 7810 */
/* Store MCR setting */
- usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
+ res = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
MCS_RDREQ, MCS_RD_RTYPE, 0x0300, MODEM_CONTROL_REGISTER,
- &mcr_data, VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT);
+ buf, VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT);
+ if (res == VENDOR_READ_LENGTH)
+ mcr_data = *buf;
for (i = 0; i < 16; i++) {
/* Send the 1-bit test pattern out to MCS7810 test pin */
@@ -2271,9 +2279,12 @@ static int mos7810_check(struct usb_serial *serial)
MODEM_CONTROL_REGISTER, NULL, 0, MOS_WDR_TIMEOUT);
/* Read the test pattern back */
- usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
- MCS_RDREQ, MCS_RD_RTYPE, 0, GPIO_REGISTER, &data,
- VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT);
+ res = usb_control_msg(serial->dev,
+ usb_rcvctrlpipe(serial->dev, 0), MCS_RDREQ,
+ MCS_RD_RTYPE, 0, GPIO_REGISTER, buf,
+ VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT);
+ if (res == VENDOR_READ_LENGTH)
+ data = *buf;
/* If this is a MCS7810 device, both test patterns must match */
if (((test_pattern >> i) ^ (~data >> 1)) & 0x0001)
@@ -2287,6 +2298,8 @@ static int mos7810_check(struct usb_serial *serial)
MCS_WR_RTYPE, 0x0300 | mcr_data, MODEM_CONTROL_REGISTER, NULL,
0, MOS_WDR_TIMEOUT);
+ kfree(buf);
+
if (pass_count == 16)
return 1;
@@ -2296,11 +2309,17 @@ static int mos7810_check(struct usb_serial *serial)
static int mos7840_calc_num_ports(struct usb_serial *serial)
{
__u16 data = 0x00;
+ u8 *buf;
int mos7840_num_ports;
- usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
- MCS_RDREQ, MCS_RD_RTYPE, 0, GPIO_REGISTER, &data,
- VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT);
+ buf = kzalloc(VENDOR_READ_LENGTH, GFP_KERNEL);
+ if (buf) {
+ usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
+ MCS_RDREQ, MCS_RD_RTYPE, 0, GPIO_REGISTER, buf,
+ VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT);
+ data = *buf;
+ kfree(buf);
+ }
if (serial->dev->descriptor.idProduct == MOSCHIP_DEVICE_ID_7810 ||
serial->dev->descriptor.idProduct == MOSCHIP_DEVICE_ID_7820) {
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 558adfc0500..9162db20498 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -196,6 +196,7 @@ static void option_instat_callback(struct urb *urb);
#define DELL_PRODUCT_5800_MINICARD_VZW 0x8195 /* Novatel E362 */
#define DELL_PRODUCT_5800_V2_MINICARD_VZW 0x8196 /* Novatel E362 */
+#define DELL_PRODUCT_5804_MINICARD_ATT 0x819b /* Novatel E371 */
#define KYOCERA_VENDOR_ID 0x0c88
#define KYOCERA_PRODUCT_KPC650 0x17da
@@ -249,13 +250,7 @@ static void option_instat_callback(struct urb *urb);
#define ZTE_PRODUCT_MF622 0x0001
#define ZTE_PRODUCT_MF628 0x0015
#define ZTE_PRODUCT_MF626 0x0031
-#define ZTE_PRODUCT_CDMA_TECH 0xfffe
-#define ZTE_PRODUCT_AC8710 0xfff1
-#define ZTE_PRODUCT_AC2726 0xfff5
-#define ZTE_PRODUCT_AC8710T 0xffff
#define ZTE_PRODUCT_MC2718 0xffe8
-#define ZTE_PRODUCT_AD3812 0xffeb
-#define ZTE_PRODUCT_MC2716 0xffed
#define BENQ_VENDOR_ID 0x04a5
#define BENQ_PRODUCT_H10 0x4068
@@ -341,12 +336,13 @@ static void option_instat_callback(struct urb *urb);
#define CINTERION_PRODUCT_EU3_E 0x0051
#define CINTERION_PRODUCT_EU3_P 0x0052
#define CINTERION_PRODUCT_PH8 0x0053
-#define CINTERION_PRODUCT_AH6 0x0055
-#define CINTERION_PRODUCT_PLS8 0x0060
+#define CINTERION_PRODUCT_AHXX 0x0055
+#define CINTERION_PRODUCT_PLXX 0x0060
/* Olivetti products */
#define OLIVETTI_VENDOR_ID 0x0b3c
#define OLIVETTI_PRODUCT_OLICARD100 0xc000
+#define OLIVETTI_PRODUCT_OLICARD145 0xc003
/* Celot products */
#define CELOT_VENDOR_ID 0x211f
@@ -493,18 +489,10 @@ static const struct option_blacklist_info zte_k3765_z_blacklist = {
.reserved = BIT(4),
};
-static const struct option_blacklist_info zte_ad3812_z_blacklist = {
- .sendsetup = BIT(0) | BIT(1) | BIT(2),
-};
-
static const struct option_blacklist_info zte_mc2718_z_blacklist = {
.sendsetup = BIT(1) | BIT(2) | BIT(3) | BIT(4),
};
-static const struct option_blacklist_info zte_mc2716_z_blacklist = {
- .sendsetup = BIT(1) | BIT(2) | BIT(3),
-};
-
static const struct option_blacklist_info huawei_cdc12_blacklist = {
.reserved = BIT(1) | BIT(2),
};
@@ -591,6 +579,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x14ac, 0xff, 0xff, 0xff), /* Huawei E1820 */
+ .driver_info = (kernel_ulong_t) &net_intf1_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0xff, 0xff) },
@@ -770,6 +760,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_VZW) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */
{ USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5800_MINICARD_VZW, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5800_V2_MINICARD_VZW, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5804_MINICARD_ATT, 0xff, 0xff, 0xff) },
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) },
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) },
@@ -794,7 +785,6 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_INTERFACE_CLASS(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1012, 0xff) },
{ USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) },
{ USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
- { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6280) }, /* BP3-USB & BP3-EXT HSDPA */
@@ -965,6 +955,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0330, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0395, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0412, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G */
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0414, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0417, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff),
@@ -1194,16 +1186,9 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&net_intf3_blacklist },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710T, 0xff, 0xff, 0xff) },
+ /* NOTE: most ZTE CDMA devices should be driven by zte_ev, not option */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2718, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&zte_mc2718_z_blacklist },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AD3812, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&zte_ad3812_z_blacklist },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&zte_mc2716_z_blacklist },
{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) },
{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) },
{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) },
@@ -1263,8 +1248,9 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_E) },
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) },
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8) },
- { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AH6) },
- { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLS8) },
+ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX) },
+ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLXX),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) },
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDM) },
@@ -1273,6 +1259,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
+ { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD145) },
{ USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
{ USB_DEVICE(ONDA_VENDOR_ID, ONDA_MT825UP) }, /* ONDA MT825UP modem */
{ USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/
@@ -1350,6 +1337,12 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180),
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE(CHANGHONG_VENDOR_ID, CHANGHONG_PRODUCT_CH690) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d01, 0xff, 0x02, 0x01) }, /* D-Link DWM-156 (variant) */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d01, 0xff, 0x00, 0x00) }, /* D-Link DWM-156 (variant) */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d02, 0xff, 0x02, 0x01) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d02, 0xff, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x02, 0x01) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 59b32b78212..bd794b43898 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -118,6 +118,7 @@ static const struct usb_device_id id_table[] = {
{USB_DEVICE(0x1199, 0x901b)}, /* Sierra Wireless MC7770 */
{USB_DEVICE(0x12D1, 0x14F0)}, /* Sony Gobi 3000 QDL */
{USB_DEVICE(0x12D1, 0x14F1)}, /* Sony Gobi 3000 Composite */
+ {USB_DEVICE(0x0AF0, 0x8120)}, /* Option GTM681W */
/* non Gobi Qualcomm serial devices */
{USB_DEVICE_INTERFACE_NUMBER(0x0f3d, 0x68a2, 0)}, /* Sierra Wireless MC7700 Device Management */
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 5d9b178484f..42de53ce5b8 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -379,6 +379,22 @@ static int serial_chars_in_buffer(struct tty_struct *tty)
return count;
}
+static void serial_wait_until_sent(struct tty_struct *tty, int timeout)
+{
+ struct usb_serial_port *port = tty->driver_data;
+ struct usb_serial *serial = port->serial;
+
+ dev_dbg(tty->dev, "%s\n", __func__);
+
+ if (!port->serial->type->wait_until_sent)
+ return;
+
+ mutex_lock(&serial->disc_mutex);
+ if (!serial->disconnected)
+ port->serial->type->wait_until_sent(tty, timeout);
+ mutex_unlock(&serial->disc_mutex);
+}
+
static void serial_throttle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
@@ -1204,6 +1220,7 @@ static const struct tty_operations serial_ops = {
.unthrottle = serial_unthrottle,
.break_ctl = serial_break,
.chars_in_buffer = serial_chars_in_buffer,
+ .wait_until_sent = serial_wait_until_sent,
.tiocmget = serial_tiocmget,
.tiocmset = serial_tiocmset,
.get_icount = serial_get_icount,
@@ -1329,6 +1346,8 @@ static void fixup_generic(struct usb_serial_driver *device)
set_to_generic_if_null(device, close);
set_to_generic_if_null(device, write_room);
set_to_generic_if_null(device, chars_in_buffer);
+ if (device->tx_empty)
+ set_to_generic_if_null(device, wait_until_sent);
set_to_generic_if_null(device, read_bulk_callback);
set_to_generic_if_null(device, write_bulk_callback);
set_to_generic_if_null(device, disconnect);
diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
index 1129aa73c23..c24ee1712a9 100644
--- a/drivers/usb/serial/visor.c
+++ b/drivers/usb/serial/visor.c
@@ -566,10 +566,19 @@ static int treo_attach(struct usb_serial *serial)
*/
#define COPY_PORT(dest, src) \
do { \
+ int i; \
+ \
+ for (i = 0; i < ARRAY_SIZE(src->read_urbs); ++i) { \
+ dest->read_urbs[i] = src->read_urbs[i]; \
+ dest->read_urbs[i]->context = dest; \
+ dest->bulk_in_buffers[i] = src->bulk_in_buffers[i]; \
+ } \
dest->read_urb = src->read_urb; \
dest->bulk_in_endpointAddress = src->bulk_in_endpointAddress;\
dest->bulk_in_buffer = src->bulk_in_buffer; \
+ dest->bulk_in_size = src->bulk_in_size; \
dest->interrupt_in_urb = src->interrupt_in_urb; \
+ dest->interrupt_in_urb->context = dest; \
dest->interrupt_in_endpointAddress = \
src->interrupt_in_endpointAddress;\
dest->interrupt_in_buffer = src->interrupt_in_buffer; \
diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
index b9fca3586d7..347caad47a1 100644
--- a/drivers/usb/serial/whiteheat.c
+++ b/drivers/usb/serial/whiteheat.c
@@ -649,7 +649,7 @@ static void firm_setup_port(struct tty_struct *tty)
struct whiteheat_port_settings port_settings;
unsigned int cflag = tty->termios.c_cflag;
- port_settings.port = port->number + 1;
+ port_settings.port = port->number - port->serial->minor + 1;
/* get the byte size */
switch (cflag & CSIZE) {
diff --git a/drivers/usb/serial/zte_ev.c b/drivers/usb/serial/zte_ev.c
index 39ee7373b4e..fca4c752a4e 100644
--- a/drivers/usb/serial/zte_ev.c
+++ b/drivers/usb/serial/zte_ev.c
@@ -41,9 +41,6 @@ static int zte_ev_usb_serial_open(struct tty_struct *tty,
int len;
unsigned char *buf;
- if (port->number != 0)
- return -ENODEV;
-
buf = kmalloc(MAX_SETUP_DATA_SIZE, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -53,7 +50,7 @@ static int zte_ev_usb_serial_open(struct tty_struct *tty,
result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
0x22, 0x21,
0x0001, 0x0000, NULL, len,
- HZ * USB_CTRL_GET_TIMEOUT);
+ USB_CTRL_GET_TIMEOUT);
dev_dbg(dev, "result = %d\n", result);
/* send 2st cmd and recieve data */
@@ -65,7 +62,7 @@ static int zte_ev_usb_serial_open(struct tty_struct *tty,
result = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
0x21, 0xa1,
0x0000, 0x0000, buf, len,
- HZ * USB_CTRL_GET_TIMEOUT);
+ USB_CTRL_GET_TIMEOUT);
debug_data(dev, __func__, len, buf, result);
/* send 3 cmd */
@@ -84,7 +81,7 @@ static int zte_ev_usb_serial_open(struct tty_struct *tty,
result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
0x20, 0x21,
0x0000, 0x0000, buf, len,
- HZ * USB_CTRL_GET_TIMEOUT);
+ USB_CTRL_GET_TIMEOUT);
debug_data(dev, __func__, len, buf, result);
/* send 4 cmd */
@@ -95,7 +92,7 @@ static int zte_ev_usb_serial_open(struct tty_struct *tty,
result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
0x22, 0x21,
0x0003, 0x0000, NULL, len,
- HZ * USB_CTRL_GET_TIMEOUT);
+ USB_CTRL_GET_TIMEOUT);
dev_dbg(dev, "result = %d\n", result);
/* send 5 cmd */
@@ -107,7 +104,7 @@ static int zte_ev_usb_serial_open(struct tty_struct *tty,
result = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
0x21, 0xa1,
0x0000, 0x0000, buf, len,
- HZ * USB_CTRL_GET_TIMEOUT);
+ USB_CTRL_GET_TIMEOUT);
debug_data(dev, __func__, len, buf, result);
/* send 6 cmd */
@@ -126,7 +123,7 @@ static int zte_ev_usb_serial_open(struct tty_struct *tty,
result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
0x20, 0x21,
0x0000, 0x0000, buf, len,
- HZ * USB_CTRL_GET_TIMEOUT);
+ USB_CTRL_GET_TIMEOUT);
debug_data(dev, __func__, len, buf, result);
kfree(buf);
@@ -166,9 +163,6 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port)
int len;
unsigned char *buf;
- if (port->number != 0)
- return;
-
buf = kmalloc(MAX_SETUP_DATA_SIZE, GFP_KERNEL);
if (!buf)
return;
@@ -178,7 +172,7 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port)
result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
0x22, 0x21,
0x0002, 0x0000, NULL, len,
- HZ * USB_CTRL_GET_TIMEOUT);
+ USB_CTRL_GET_TIMEOUT);
dev_dbg(dev, "result = %d\n", result);
/* send 2st ctl cmd(CTL 21 22 03 00 00 00 00 00 ) */
@@ -186,7 +180,7 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port)
result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
0x22, 0x21,
0x0003, 0x0000, NULL, len,
- HZ * USB_CTRL_GET_TIMEOUT);
+ USB_CTRL_GET_TIMEOUT);
dev_dbg(dev, "result = %d\n", result);
/* send 3st cmd and recieve data */
@@ -198,7 +192,7 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port)
result = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
0x21, 0xa1,
0x0000, 0x0000, buf, len,
- HZ * USB_CTRL_GET_TIMEOUT);
+ USB_CTRL_GET_TIMEOUT);
debug_data(dev, __func__, len, buf, result);
/* send 4 cmd */
@@ -217,7 +211,7 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port)
result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
0x20, 0x21,
0x0000, 0x0000, buf, len,
- HZ * USB_CTRL_GET_TIMEOUT);
+ USB_CTRL_GET_TIMEOUT);
debug_data(dev, __func__, len, buf, result);
/* send 5 cmd */
@@ -228,7 +222,7 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port)
result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
0x22, 0x21,
0x0003, 0x0000, NULL, len,
- HZ * USB_CTRL_GET_TIMEOUT);
+ USB_CTRL_GET_TIMEOUT);
dev_dbg(dev, "result = %d\n", result);
/* send 6 cmd */
@@ -240,7 +234,7 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port)
result = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
0x21, 0xa1,
0x0000, 0x0000, buf, len,
- HZ * USB_CTRL_GET_TIMEOUT);
+ USB_CTRL_GET_TIMEOUT);
debug_data(dev, __func__, len, buf, result);
/* send 7 cmd */
@@ -259,7 +253,7 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port)
result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
0x20, 0x21,
0x0000, 0x0000, buf, len,
- HZ * USB_CTRL_GET_TIMEOUT);
+ USB_CTRL_GET_TIMEOUT);
debug_data(dev, __func__, len, buf, result);
/* send 8 cmd */
@@ -270,7 +264,7 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port)
result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
0x22, 0x21,
0x0003, 0x0000, NULL, len,
- HZ * USB_CTRL_GET_TIMEOUT);
+ USB_CTRL_GET_TIMEOUT);
dev_dbg(dev, "result = %d\n", result);
kfree(buf);
@@ -279,11 +273,29 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port)
}
static const struct usb_device_id id_table[] = {
- { USB_DEVICE(0x19d2, 0xffff) }, /* AC8700 */
- { USB_DEVICE(0x19d2, 0xfffe) },
- { USB_DEVICE(0x19d2, 0xfffd) }, /* MG880 */
+ /* AC8710, AC8710T */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xffff, 0xff, 0xff, 0xff) },
+ /* AC8700 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xfffe, 0xff, 0xff, 0xff) },
+ /* MG880 */
+ { USB_DEVICE(0x19d2, 0xfffd) },
+ { USB_DEVICE(0x19d2, 0xfffc) },
+ { USB_DEVICE(0x19d2, 0xfffb) },
+ /* AC2726, AC8710_V3 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xfff1, 0xff, 0xff, 0xff) },
+ { USB_DEVICE(0x19d2, 0xfff6) },
+ { USB_DEVICE(0x19d2, 0xfff7) },
+ { USB_DEVICE(0x19d2, 0xfff8) },
+ { USB_DEVICE(0x19d2, 0xfff9) },
+ { USB_DEVICE(0x19d2, 0xffee) },
+ /* AC2716, MC2716 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xffed, 0xff, 0xff, 0xff) },
+ /* AD3812 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xffeb, 0xff, 0xff, 0xff) },
+ { USB_DEVICE(0x19d2, 0xffec) },
{ USB_DEVICE(0x05C6, 0x3197) },
{ USB_DEVICE(0x05C6, 0x6000) },
+ { USB_DEVICE(0x05C6, 0x9008) },
{ },
};
MODULE_DEVICE_TABLE(usb, id_table);
diff --git a/drivers/usb/storage/cypress_atacb.c b/drivers/usb/storage/cypress_atacb.c
index 070b5c0ebbf..d9440882cdc 100644
--- a/drivers/usb/storage/cypress_atacb.c
+++ b/drivers/usb/storage/cypress_atacb.c
@@ -248,14 +248,26 @@ static int cypress_probe(struct usb_interface *intf,
{
struct us_data *us;
int result;
+ struct usb_device *device;
result = usb_stor_probe1(&us, intf, id,
(id - cypress_usb_ids) + cypress_unusual_dev_list);
if (result)
return result;
- us->protocol_name = "Transparent SCSI with Cypress ATACB";
- us->proto_handler = cypress_atacb_passthrough;
+ /* Among CY7C68300 chips, the A revision does not support Cypress ATACB
+ * Filter out this revision from EEPROM default descriptor values
+ */
+ device = interface_to_usbdev(intf);
+ if (device->descriptor.iManufacturer != 0x38 ||
+ device->descriptor.iProduct != 0x4e ||
+ device->descriptor.iSerialNumber != 0x64) {
+ us->protocol_name = "Transparent SCSI with Cypress ATACB";
+ us->proto_handler = cypress_atacb_passthrough;
+ } else {
+ us->protocol_name = "Transparent SCSI";
+ us->proto_handler = usb_stor_transparent_scsi_command;
+ }
result = usb_stor_probe2(us);
return result;
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index d640352641c..914793379a4 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -57,6 +57,11 @@ config OF_VIDEOMODE
config HDMI
bool
+config VEXPRESS_DVI_CONTROL
+ bool "Versatile Express DVI control"
+ depends on FB && VEXPRESS_CONFIG
+ default y
+
menuconfig FB
tristate "Support for frame buffer devices"
---help---
@@ -352,6 +357,21 @@ config FB_ARMCLCD
here and read <file:Documentation/kbuild/modules.txt>. The module
will be called amba-clcd.
+config FB_ARMHDLCD
+ tristate "ARM High Definition LCD support"
+ depends on FB && ARM
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ help
+ This framebuffer device driver is for the ARM High Definition
+ Colour LCD controller.
+
+ If you want to compile this as a module (=code which can be
+ inserted into and removed from the running kernel), say M
+ here and read <file:Documentation/kbuild/modules.txt>. The module
+ will be called arm-hdlcd.
+
config FB_ACORN
bool "Acorn VIDC support"
depends on (FB = y) && ARM && ARCH_ACORN
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index 9df387334cb..47c3a6bbaa1 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -99,6 +99,7 @@ obj-$(CONFIG_FB_ATMEL) += atmel_lcdfb.o
obj-$(CONFIG_FB_PVR2) += pvr2fb.o
obj-$(CONFIG_FB_VOODOO1) += sstfb.o
obj-$(CONFIG_FB_ARMCLCD) += amba-clcd.o
+obj-$(CONFIG_FB_ARMHDLCD) += arm-hdlcd.o
obj-$(CONFIG_FB_GOLDFISH) += goldfishfb.o
obj-$(CONFIG_FB_68328) += 68328fb.o
obj-$(CONFIG_FB_GBE) += gbefb.o
@@ -175,3 +176,6 @@ obj-$(CONFIG_DISPLAY_TIMING) += display_timing.o
obj-$(CONFIG_OF_DISPLAY_TIMING) += of_display_timing.o
obj-$(CONFIG_VIDEOMODE) += videomode.o
obj-$(CONFIG_OF_VIDEOMODE) += of_videomode.o
+
+# platform specific output drivers
+obj-$(CONFIG_VEXPRESS_DVI_CONTROL) += vexpress-dvi.o
diff --git a/drivers/video/amba-clcd.c b/drivers/video/amba-clcd.c
index 0a2cce7285b..94a1998338d 100644
--- a/drivers/video/amba-clcd.c
+++ b/drivers/video/amba-clcd.c
@@ -16,7 +16,10 @@
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/memblock.h>
#include <linux/mm.h>
+#include <linux/of.h>
#include <linux/fb.h>
#include <linux/init.h>
#include <linux/ioport.h>
@@ -30,6 +33,16 @@
#define to_clcd(info) container_of(info, struct clcd_fb, fb)
+#ifdef CONFIG_ARM
+#define clcdfb_dma_alloc dma_alloc_writecombine
+#define clcdfb_dma_free dma_free_writecombine
+#define clcdfb_dma_mmap dma_mmap_writecombine
+#else
+#define clcdfb_dma_alloc dma_alloc_coherent
+#define clcdfb_dma_free dma_free_coherent
+#define clcdfb_dma_mmap dma_mmap_coherent
+#endif
+
/* This is limited to 16 characters when displayed by X startup */
static const char *clcd_name = "CLCD FB";
@@ -392,6 +405,44 @@ static int clcdfb_blank(int blank_mode, struct fb_info *info)
return 0;
}
+int clcdfb_mmap_dma(struct clcd_fb *fb, struct vm_area_struct *vma)
+{
+ return clcdfb_dma_mmap(&fb->dev->dev, vma,
+ fb->fb.screen_base,
+ fb->fb.fix.smem_start,
+ fb->fb.fix.smem_len);
+}
+
+int clcdfb_mmap_io(struct clcd_fb *fb, struct vm_area_struct *vma)
+{
+ unsigned long user_count, count, pfn, off;
+
+ user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+ count = PAGE_ALIGN(fb->fb.fix.smem_len) >> PAGE_SHIFT;
+ pfn = fb->fb.fix.smem_start >> PAGE_SHIFT;
+ off = vma->vm_pgoff;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ if (off < count && user_count <= (count - off))
+ return remap_pfn_range(vma, vma->vm_start, pfn + off,
+ user_count << PAGE_SHIFT,
+ vma->vm_page_prot);
+
+ return -ENXIO;
+}
+
+void clcdfb_remove_dma(struct clcd_fb *fb)
+{
+ clcdfb_dma_free(&fb->dev->dev, fb->fb.fix.smem_len,
+ fb->fb.screen_base, fb->fb.fix.smem_start);
+}
+
+void clcdfb_remove_io(struct clcd_fb *fb)
+{
+ iounmap(fb->fb.screen_base);
+}
+
static int clcdfb_mmap(struct fb_info *info,
struct vm_area_struct *vma)
{
@@ -542,14 +593,239 @@ static int clcdfb_register(struct clcd_fb *fb)
return ret;
}
+struct string_lookup {
+ const char *string;
+ const u32 val;
+};
+
+static struct string_lookup vmode_lookups[] = {
+ { "FB_VMODE_NONINTERLACED", FB_VMODE_NONINTERLACED},
+ { "FB_VMODE_INTERLACED", FB_VMODE_INTERLACED},
+ { "FB_VMODE_DOUBLE", FB_VMODE_DOUBLE},
+ { "FB_VMODE_ODD_FLD_FIRST", FB_VMODE_ODD_FLD_FIRST},
+ { NULL, 0 },
+};
+
+static struct string_lookup tim2_lookups[] = {
+ { "TIM2_CLKSEL", TIM2_CLKSEL},
+ { "TIM2_IVS", TIM2_IVS},
+ { "TIM2_IHS", TIM2_IHS},
+ { "TIM2_IPC", TIM2_IPC},
+ { "TIM2_IOE", TIM2_IOE},
+ { "TIM2_BCD", TIM2_BCD},
+ { NULL, 0},
+};
+static struct string_lookup cntl_lookups[] = {
+ {"CNTL_LCDEN", CNTL_LCDEN},
+ {"CNTL_LCDBPP1", CNTL_LCDBPP1},
+ {"CNTL_LCDBPP2", CNTL_LCDBPP2},
+ {"CNTL_LCDBPP4", CNTL_LCDBPP4},
+ {"CNTL_LCDBPP8", CNTL_LCDBPP8},
+ {"CNTL_LCDBPP16", CNTL_LCDBPP16},
+ {"CNTL_LCDBPP16_565", CNTL_LCDBPP16_565},
+ {"CNTL_LCDBPP16_444", CNTL_LCDBPP16_444},
+ {"CNTL_LCDBPP24", CNTL_LCDBPP24},
+ {"CNTL_LCDBW", CNTL_LCDBW},
+ {"CNTL_LCDTFT", CNTL_LCDTFT},
+ {"CNTL_LCDMONO8", CNTL_LCDMONO8},
+ {"CNTL_LCDDUAL", CNTL_LCDDUAL},
+ {"CNTL_BGR", CNTL_BGR},
+ {"CNTL_BEBO", CNTL_BEBO},
+ {"CNTL_BEPO", CNTL_BEPO},
+ {"CNTL_LCDPWR", CNTL_LCDPWR},
+ {"CNTL_LCDVCOMP(1)", CNTL_LCDVCOMP(1)},
+ {"CNTL_LCDVCOMP(2)", CNTL_LCDVCOMP(2)},
+ {"CNTL_LCDVCOMP(3)", CNTL_LCDVCOMP(3)},
+ {"CNTL_LCDVCOMP(4)", CNTL_LCDVCOMP(4)},
+ {"CNTL_LCDVCOMP(5)", CNTL_LCDVCOMP(5)},
+ {"CNTL_LCDVCOMP(6)", CNTL_LCDVCOMP(6)},
+ {"CNTL_LCDVCOMP(7)", CNTL_LCDVCOMP(7)},
+ {"CNTL_LDMAFIFOTIME", CNTL_LDMAFIFOTIME},
+ {"CNTL_WATERMARK", CNTL_WATERMARK},
+ { NULL, 0},
+};
+static struct string_lookup caps_lookups[] = {
+ {"CLCD_CAP_RGB444", CLCD_CAP_RGB444},
+ {"CLCD_CAP_RGB5551", CLCD_CAP_RGB5551},
+ {"CLCD_CAP_RGB565", CLCD_CAP_RGB565},
+ {"CLCD_CAP_RGB888", CLCD_CAP_RGB888},
+ {"CLCD_CAP_BGR444", CLCD_CAP_BGR444},
+ {"CLCD_CAP_BGR5551", CLCD_CAP_BGR5551},
+ {"CLCD_CAP_BGR565", CLCD_CAP_BGR565},
+ {"CLCD_CAP_BGR888", CLCD_CAP_BGR888},
+ {"CLCD_CAP_444", CLCD_CAP_444},
+ {"CLCD_CAP_5551", CLCD_CAP_5551},
+ {"CLCD_CAP_565", CLCD_CAP_565},
+ {"CLCD_CAP_888", CLCD_CAP_888},
+ {"CLCD_CAP_RGB", CLCD_CAP_RGB},
+ {"CLCD_CAP_BGR", CLCD_CAP_BGR},
+ {"CLCD_CAP_ALL", CLCD_CAP_ALL},
+ { NULL, 0},
+};
+
+u32 parse_setting(struct string_lookup *lookup, const char *name)
+{
+ int i = 0;
+ while (lookup[i].string != NULL) {
+ if (strcmp(lookup[i].string, name) == 0)
+ return lookup[i].val;
+ ++i;
+ }
+ return -EINVAL;
+}
+
+u32 get_string_lookup(struct device_node *node, const char *name,
+ struct string_lookup *lookup)
+{
+ const char *string;
+ int count, i, ret = 0;
+
+ count = of_property_count_strings(node, name);
+ if (count >= 0)
+ for (i = 0; i < count; i++)
+ if (of_property_read_string_index(node, name, i,
+ &string) == 0)
+ ret |= parse_setting(lookup, string);
+ return ret;
+}
+
+int get_val(struct device_node *node, const char *string)
+{
+ u32 ret = 0;
+
+ if (of_property_read_u32(node, string, &ret))
+ ret = -1;
+ return ret;
+}
+
+struct clcd_panel *getPanel(struct device_node *node)
+{
+ static struct clcd_panel panel;
+
+ panel.mode.refresh = get_val(node, "refresh");
+ panel.mode.xres = get_val(node, "xres");
+ panel.mode.yres = get_val(node, "yres");
+ panel.mode.pixclock = get_val(node, "pixclock");
+ panel.mode.left_margin = get_val(node, "left_margin");
+ panel.mode.right_margin = get_val(node, "right_margin");
+ panel.mode.upper_margin = get_val(node, "upper_margin");
+ panel.mode.lower_margin = get_val(node, "lower_margin");
+ panel.mode.hsync_len = get_val(node, "hsync_len");
+ panel.mode.vsync_len = get_val(node, "vsync_len");
+ panel.mode.sync = get_val(node, "sync");
+ panel.bpp = get_val(node, "bpp");
+ panel.width = (signed short) get_val(node, "width");
+ panel.height = (signed short) get_val(node, "height");
+
+ panel.mode.vmode = get_string_lookup(node, "vmode", vmode_lookups);
+ panel.tim2 = get_string_lookup(node, "tim2", tim2_lookups);
+ panel.cntl = get_string_lookup(node, "cntl", cntl_lookups);
+ panel.caps = get_string_lookup(node, "caps", caps_lookups);
+
+ return &panel;
+}
+
+struct clcd_panel *clcdfb_get_panel(const char *name)
+{
+ struct device_node *node = NULL;
+ const char *mode;
+ struct clcd_panel *panel = NULL;
+
+ do {
+ node = of_find_compatible_node(node, NULL, "panel");
+ if (node)
+ if (of_property_read_string(node, "mode", &mode) == 0)
+ if (strcmp(mode, name) == 0) {
+ panel = getPanel(node);
+ panel->mode.name = name;
+ }
+ } while (node != NULL);
+
+ return panel;
+}
+
+#ifdef CONFIG_OF
+static int clcdfb_dt_init(struct clcd_fb *fb)
+{
+ int err = 0;
+ struct device_node *node;
+ const char *mode;
+ dma_addr_t dma;
+ u32 use_dma;
+ const __be32 *prop;
+ int len, na, ns;
+ phys_addr_t fb_base, fb_size;
+
+ node = fb->dev->dev.of_node;
+ if (!node)
+ return -ENODEV;
+
+ na = of_n_addr_cells(node);
+ ns = of_n_size_cells(node);
+
+ if (WARN_ON(of_property_read_string(node, "mode", &mode)))
+ return -ENODEV;
+
+ fb->panel = clcdfb_get_panel(mode);
+ if (!fb->panel)
+ return -EINVAL;
+ fb->fb.fix.smem_len = fb->panel->mode.xres * fb->panel->mode.yres * 2;
+
+ fb->board->name = "Device Tree CLCD PL111";
+ fb->board->caps = CLCD_CAP_5551 | CLCD_CAP_565;
+ fb->board->check = clcdfb_check;
+ fb->board->decode = clcdfb_decode;
+
+ if (of_property_read_u32(node, "use_dma", &use_dma))
+ use_dma = 0;
+
+ if (use_dma) {
+ fb->fb.screen_base = clcdfb_dma_alloc(&fb->dev->dev,
+ fb->fb.fix.smem_len,
+ &dma, GFP_KERNEL);
+ if (!fb->fb.screen_base) {
+ pr_err("CLCD: unable to map framebuffer\n");
+ return -ENOMEM;
+ }
+
+ fb->fb.fix.smem_start = dma;
+ fb->board->mmap = clcdfb_mmap_dma;
+ fb->board->remove = clcdfb_remove_dma;
+ } else {
+ prop = of_get_property(node, "framebuffer", &len);
+ if (WARN_ON(!prop || len < (na + ns) * sizeof(*prop)))
+ return -EINVAL;
+
+ fb_base = of_read_number(prop, na);
+ fb_size = of_read_number(prop + na, ns);
+
+ fb->fb.fix.smem_start = fb_base;
+ fb->fb.screen_base = ioremap_wc(fb_base, fb_size);
+ fb->board->mmap = clcdfb_mmap_io;
+ fb->board->remove = clcdfb_remove_io;
+ }
+
+ return err;
+}
+#endif /* CONFIG_OF */
+
static int clcdfb_probe(struct amba_device *dev, const struct amba_id *id)
{
struct clcd_board *board = dev->dev.platform_data;
struct clcd_fb *fb;
int ret;
- if (!board)
- return -EINVAL;
+ if (!board) {
+#ifdef CONFIG_OF
+ if (dev->dev.of_node) {
+ board = kzalloc(sizeof(struct clcd_board), GFP_KERNEL);
+ if (!board)
+ return -ENOMEM;
+ board->setup = clcdfb_dt_init;
+ } else
+#endif
+ return -EINVAL;
+ }
ret = amba_request_regions(dev, NULL);
if (ret) {
diff --git a/drivers/video/arm-hdlcd.c b/drivers/video/arm-hdlcd.c
new file mode 100644
index 00000000000..f9c4e7490c0
--- /dev/null
+++ b/drivers/video/arm-hdlcd.c
@@ -0,0 +1,839 @@
+/*
+ * drivers/video/arm-hdlcd.c
+ *
+ * Copyright (C) 2011 ARM Limited
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ *
+ * ARM HDLCD Controller
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <linux/mm.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/fb.h>
+#include <linux/clk.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/memblock.h>
+#include <linux/arm-hdlcd.h>
+#ifdef HDLCD_COUNT_BUFFERUNDERRUNS
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#endif
+
+#include "edid.h"
+
+#ifdef CONFIG_SERIAL_AMBA_PCU_UART
+int get_edid(u8 *msgbuf);
+#else
+#endif
+
+#define to_hdlcd_device(info) container_of(info, struct hdlcd_device, fb)
+
+static struct of_device_id hdlcd_of_matches[] = {
+ { .compatible = "arm,hdlcd" },
+ {},
+};
+
+/* Framebuffer size. */
+static unsigned long framebuffer_size;
+
+#ifdef HDLCD_COUNT_BUFFERUNDERRUNS
+static unsigned long buffer_underrun_events;
+static DEFINE_SPINLOCK(hdlcd_underrun_lock);
+
+static void hdlcd_underrun_set(unsigned long val)
+{
+ spin_lock(&hdlcd_underrun_lock);
+ buffer_underrun_events = val;
+ spin_unlock(&hdlcd_underrun_lock);
+}
+
+static unsigned long hdlcd_underrun_get(void)
+{
+ unsigned long val;
+ spin_lock(&hdlcd_underrun_lock);
+ val = buffer_underrun_events;
+ spin_unlock(&hdlcd_underrun_lock);
+ return val;
+}
+
+#ifdef CONFIG_PROC_FS
+static int hdlcd_underrun_show(struct seq_file *m, void *v)
+{
+ unsigned char underrun_string[32];
+ snprintf(underrun_string, 32, "%lu\n", hdlcd_underrun_get());
+ seq_puts(m, underrun_string);
+ return 0;
+}
+
+static int proc_hdlcd_underrun_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, hdlcd_underrun_show, NULL);
+}
+
+static const struct file_operations proc_hdlcd_underrun_operations = {
+ .open = proc_hdlcd_underrun_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int hdlcd_underrun_init(void)
+{
+ hdlcd_underrun_set(0);
+ proc_create("hdlcd_underrun", 0, NULL, &proc_hdlcd_underrun_operations);
+ return 0;
+}
+static void hdlcd_underrun_close(void)
+{
+ remove_proc_entry("hdlcd_underrun", NULL);
+}
+#else
+static int hdlcd_underrun_init(void) { return 0; }
+static void hdlcd_underrun_close(void) { }
+#endif
+#endif
+
+static char *fb_mode = "1680x1050-32@60\0\0\0\0\0";
+
+static struct fb_var_screeninfo cached_var_screeninfo;
+
+static struct fb_videomode hdlcd_default_mode = {
+ .refresh = 60,
+ .xres = 1680,
+ .yres = 1050,
+ .pixclock = 8403,
+ .left_margin = 80,
+ .right_margin = 48,
+ .upper_margin = 21,
+ .lower_margin = 3,
+ .hsync_len = 32,
+ .vsync_len = 6,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_NONINTERLACED
+};
+
+static inline void hdlcd_enable(struct hdlcd_device *hdlcd)
+{
+ dev_dbg(hdlcd->dev, "HDLCD: output enabled\n");
+ writel(1, hdlcd->base + HDLCD_REG_COMMAND);
+}
+
+static inline void hdlcd_disable(struct hdlcd_device *hdlcd)
+{
+ dev_dbg(hdlcd->dev, "HDLCD: output disabled\n");
+ writel(0, hdlcd->base + HDLCD_REG_COMMAND);
+}
+
+static int hdlcd_set_bitfields(struct hdlcd_device *hdlcd,
+ struct fb_var_screeninfo *var)
+{
+ int ret = 0;
+
+ memset(&var->transp, 0, sizeof(var->transp));
+ var->red.msb_right = 0;
+ var->green.msb_right = 0;
+ var->blue.msb_right = 0;
+ var->blue.offset = 0;
+
+ switch (var->bits_per_pixel) {
+ case 8:
+ /* pseudocolor */
+ var->red.length = 8;
+ var->green.length = 8;
+ var->blue.length = 8;
+ break;
+ case 16:
+ /* 565 format */
+ var->red.length = 5;
+ var->green.length = 6;
+ var->blue.length = 5;
+ break;
+ case 32:
+ var->transp.length = 8;
+ case 24:
+ var->red.length = 8;
+ var->green.length = 8;
+ var->blue.length = 8;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ if (!ret) {
+ if(var->bits_per_pixel != 32)
+ {
+ var->green.offset = var->blue.length;
+ var->red.offset = var->green.offset + var->green.length;
+ }
+ else
+ {
+ /* Previously, the byte ordering for 32-bit color was
+ * (msb)<alpha><red><green><blue>(lsb)
+ * but this does not match what android expects and
+ * the colors are odd. Instead, use
+ * <alpha><blue><green><red>
+ * Since we tell fb what we are doing, console
+ * , X and directfb access should work fine.
+ */
+ var->green.offset = var->red.length;
+ var->blue.offset = var->green.offset + var->green.length;
+ var->transp.offset = var->blue.offset + var->blue.length;
+ }
+ }
+
+ return ret;
+}
+
+static int hdlcd_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
+{
+ struct hdlcd_device *hdlcd = to_hdlcd_device(info);
+ int bytes_per_pixel = var->bits_per_pixel / 8;
+
+#ifdef HDLCD_NO_VIRTUAL_SCREEN
+ var->yres_virtual = var->yres;
+#else
+ var->yres_virtual = 2 * var->yres;
+#endif
+
+ if ((var->xres_virtual * bytes_per_pixel * var->yres_virtual) > hdlcd->fb.fix.smem_len)
+ return -ENOMEM;
+
+ if (var->xres > HDLCD_MAX_XRES || var->yres > HDLCD_MAX_YRES)
+ return -EINVAL;
+
+ /* make sure the bitfields are set appropriately */
+ return hdlcd_set_bitfields(hdlcd, var);
+}
+
+/* prototype */
+static int hdlcd_pan_display(struct fb_var_screeninfo *var,
+ struct fb_info *info);
+
+#define WRITE_HDLCD_REG(reg, value) writel((value), hdlcd->base + (reg))
+#define READ_HDLCD_REG(reg) readl(hdlcd->base + (reg))
+
+static int hdlcd_set_par(struct fb_info *info)
+{
+ struct hdlcd_device *hdlcd = to_hdlcd_device(info);
+ int bytes_per_pixel = hdlcd->fb.var.bits_per_pixel / 8;
+ int polarities;
+ int old_yoffset;
+
+ /* check for shortcuts */
+ old_yoffset = cached_var_screeninfo.yoffset;
+ cached_var_screeninfo.yoffset = info->var.yoffset;
+ if (!memcmp(&info->var, &cached_var_screeninfo,
+ sizeof(struct fb_var_screeninfo))) {
+ if(old_yoffset != info->var.yoffset) {
+ /* we only changed yoffset, and we already
+ * already recorded it a couple lines up
+ */
+ hdlcd_pan_display(&info->var, info);
+ }
+ /* or no change */
+ return 0;
+ }
+
+ hdlcd->fb.fix.line_length = hdlcd->fb.var.xres * bytes_per_pixel;
+
+ if (hdlcd->fb.var.bits_per_pixel >= 16)
+ hdlcd->fb.fix.visual = FB_VISUAL_TRUECOLOR;
+ else
+ hdlcd->fb.fix.visual = FB_VISUAL_PSEUDOCOLOR;
+
+ memcpy(&cached_var_screeninfo, &info->var, sizeof(struct fb_var_screeninfo));
+
+ polarities = HDLCD_POLARITY_DATAEN |
+#ifndef CONFIG_ARCH_TUSCAN
+ HDLCD_POLARITY_PIXELCLK |
+#endif
+ HDLCD_POLARITY_DATA;
+ polarities |= (hdlcd->fb.var.sync & FB_SYNC_HOR_HIGH_ACT) ? HDLCD_POLARITY_HSYNC : 0;
+ polarities |= (hdlcd->fb.var.sync & FB_SYNC_VERT_HIGH_ACT) ? HDLCD_POLARITY_VSYNC : 0;
+
+ hdlcd_disable(hdlcd);
+
+ WRITE_HDLCD_REG(HDLCD_REG_FB_LINE_LENGTH, hdlcd->fb.var.xres * bytes_per_pixel);
+ WRITE_HDLCD_REG(HDLCD_REG_FB_LINE_PITCH, hdlcd->fb.var.xres * bytes_per_pixel);
+ WRITE_HDLCD_REG(HDLCD_REG_FB_LINE_COUNT, hdlcd->fb.var.yres - 1);
+ WRITE_HDLCD_REG(HDLCD_REG_V_SYNC, hdlcd->fb.var.vsync_len - 1);
+ WRITE_HDLCD_REG(HDLCD_REG_V_BACK_PORCH, hdlcd->fb.var.upper_margin - 1);
+ WRITE_HDLCD_REG(HDLCD_REG_V_DATA, hdlcd->fb.var.yres - 1);
+ WRITE_HDLCD_REG(HDLCD_REG_V_FRONT_PORCH, hdlcd->fb.var.lower_margin - 1);
+ WRITE_HDLCD_REG(HDLCD_REG_H_SYNC, hdlcd->fb.var.hsync_len - 1);
+ WRITE_HDLCD_REG(HDLCD_REG_H_BACK_PORCH, hdlcd->fb.var.left_margin - 1);
+ WRITE_HDLCD_REG(HDLCD_REG_H_DATA, hdlcd->fb.var.xres - 1);
+ WRITE_HDLCD_REG(HDLCD_REG_H_FRONT_PORCH, hdlcd->fb.var.right_margin - 1);
+ WRITE_HDLCD_REG(HDLCD_REG_POLARITIES, polarities);
+ WRITE_HDLCD_REG(HDLCD_REG_PIXEL_FORMAT, (bytes_per_pixel - 1) << 3);
+#ifdef HDLCD_RED_DEFAULT_COLOUR
+ WRITE_HDLCD_REG(HDLCD_REG_RED_SELECT, (0x00ff0000 | (hdlcd->fb.var.red.length & 0xf) << 8) \
+ | hdlcd->fb.var.red.offset);
+#else
+ WRITE_HDLCD_REG(HDLCD_REG_RED_SELECT, ((hdlcd->fb.var.red.length & 0xf) << 8) | hdlcd->fb.var.red.offset);
+#endif
+ WRITE_HDLCD_REG(HDLCD_REG_GREEN_SELECT, ((hdlcd->fb.var.green.length & 0xf) << 8) | hdlcd->fb.var.green.offset);
+ WRITE_HDLCD_REG(HDLCD_REG_BLUE_SELECT, ((hdlcd->fb.var.blue.length & 0xf) << 8) | hdlcd->fb.var.blue.offset);
+
+ clk_prepare(hdlcd->clk);
+ clk_set_rate(hdlcd->clk, (1000000000 / hdlcd->fb.var.pixclock) * 1000);
+ clk_enable(hdlcd->clk);
+
+ hdlcd_enable(hdlcd);
+
+ return 0;
+}
+
+static int hdlcd_setcolreg(unsigned int regno, unsigned int red, unsigned int green,
+ unsigned int blue, unsigned int transp, struct fb_info *info)
+{
+ if (regno < 16) {
+ u32 *pal = info->pseudo_palette;
+
+ pal[regno] = ((red >> 8) << info->var.red.offset) |
+ ((green >> 8) << info->var.green.offset) |
+ ((blue >> 8) << info->var.blue.offset);
+ }
+
+ return 0;
+}
+
+static irqreturn_t hdlcd_irq(int irq, void *data)
+{
+ struct hdlcd_device *hdlcd = data;
+ unsigned long irq_mask, irq_status;
+
+ irq_mask = READ_HDLCD_REG(HDLCD_REG_INT_MASK);
+ irq_status = READ_HDLCD_REG(HDLCD_REG_INT_STATUS);
+
+ /* acknowledge interrupt(s) */
+ WRITE_HDLCD_REG(HDLCD_REG_INT_CLEAR, irq_status);
+#ifdef HDLCD_COUNT_BUFFERUNDERRUNS
+ if (irq_status & HDLCD_INTERRUPT_UNDERRUN) {
+ /* increment the count */
+ hdlcd_underrun_set(hdlcd_underrun_get() + 1);
+ }
+#endif
+ if (irq_status & HDLCD_INTERRUPT_VSYNC) {
+ /* disable future VSYNC interrupts */
+ WRITE_HDLCD_REG(HDLCD_REG_INT_MASK, irq_mask & ~HDLCD_INTERRUPT_VSYNC);
+
+ complete(&hdlcd->vsync_completion);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int hdlcd_wait_for_vsync(struct fb_info *info)
+{
+ struct hdlcd_device *hdlcd = to_hdlcd_device(info);
+ unsigned long irq_mask;
+ int err;
+
+ /* enable VSYNC interrupt */
+ irq_mask = READ_HDLCD_REG(HDLCD_REG_INT_MASK);
+ WRITE_HDLCD_REG(HDLCD_REG_INT_MASK, irq_mask | HDLCD_INTERRUPT_VSYNC);
+
+ err = wait_for_completion_interruptible_timeout(&hdlcd->vsync_completion,
+ msecs_to_jiffies(100));
+
+ if (!err)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int hdlcd_blank(int blank_mode, struct fb_info *info)
+{
+ struct hdlcd_device *hdlcd = to_hdlcd_device(info);
+
+ switch (blank_mode) {
+ case FB_BLANK_POWERDOWN:
+ clk_disable(hdlcd->clk);
+ case FB_BLANK_NORMAL:
+ hdlcd_disable(hdlcd);
+ break;
+ case FB_BLANK_UNBLANK:
+ clk_enable(hdlcd->clk);
+ hdlcd_enable(hdlcd);
+ break;
+ case FB_BLANK_VSYNC_SUSPEND:
+ case FB_BLANK_HSYNC_SUSPEND:
+ default:
+ return 1;
+ }
+
+ return 0;
+}
+
+static void hdlcd_mmap_open(struct vm_area_struct *vma)
+{
+}
+
+static void hdlcd_mmap_close(struct vm_area_struct *vma)
+{
+}
+
+static struct vm_operations_struct hdlcd_mmap_ops = {
+ .open = hdlcd_mmap_open,
+ .close = hdlcd_mmap_close,
+};
+
+static int hdlcd_mmap(struct fb_info *info, struct vm_area_struct *vma)
+{
+ struct hdlcd_device *hdlcd = to_hdlcd_device(info);
+ unsigned long off;
+ unsigned long start;
+ unsigned long len = hdlcd->fb.fix.smem_len;
+
+ if (vma->vm_end - vma->vm_start == 0)
+ return 0;
+ if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
+ return -EINVAL;
+
+ off = vma->vm_pgoff << PAGE_SHIFT;
+ if ((off >= len) || (vma->vm_end - vma->vm_start + off) > len)
+ return -EINVAL;
+
+ start = hdlcd->fb.fix.smem_start;
+ off += start;
+
+ vma->vm_pgoff = off >> PAGE_SHIFT;
+ vma->vm_flags |= VM_IO;
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ vma->vm_ops = &hdlcd_mmap_ops;
+ if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot))
+ return -EAGAIN;
+
+ return 0;
+}
+
+static int hdlcd_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
+{
+ struct hdlcd_device *hdlcd = to_hdlcd_device(info);
+
+ hdlcd->fb.var.yoffset = var->yoffset;
+ WRITE_HDLCD_REG(HDLCD_REG_FB_BASE, hdlcd->fb.fix.smem_start +
+ (var->yoffset * hdlcd->fb.fix.line_length));
+
+ hdlcd_wait_for_vsync(info);
+
+ return 0;
+}
+
+static int hdlcd_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
+{
+ int err;
+
+ switch (cmd) {
+ case FBIO_WAITFORVSYNC:
+ err = hdlcd_wait_for_vsync(info);
+ break;
+ default:
+ err = -ENOIOCTLCMD;
+ break;
+ }
+
+ return err;
+}
+
+static struct fb_ops hdlcd_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = hdlcd_check_var,
+ .fb_set_par = hdlcd_set_par,
+ .fb_setcolreg = hdlcd_setcolreg,
+ .fb_blank = hdlcd_blank,
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+ .fb_imageblit = cfb_imageblit,
+ .fb_mmap = hdlcd_mmap,
+ .fb_pan_display = hdlcd_pan_display,
+ .fb_ioctl = hdlcd_ioctl,
+ .fb_compat_ioctl = hdlcd_ioctl
+};
+
+static int hdlcd_setup(struct hdlcd_device *hdlcd)
+{
+ u32 version;
+ int err = -EFAULT;
+
+ hdlcd->fb.device = hdlcd->dev;
+
+ hdlcd->clk = clk_get(hdlcd->dev, NULL);
+ if (IS_ERR(hdlcd->clk)) {
+ dev_err(hdlcd->dev, "HDLCD: unable to find clock data\n");
+ return PTR_ERR(hdlcd->clk);
+ }
+
+ hdlcd->base = ioremap_nocache(hdlcd->fb.fix.mmio_start, hdlcd->fb.fix.mmio_len);
+ if (!hdlcd->base) {
+ dev_err(hdlcd->dev, "HDLCD: unable to map registers\n");
+ goto remap_err;
+ }
+
+ hdlcd->fb.pseudo_palette = kmalloc(sizeof(u32) * 16, GFP_KERNEL);
+ if (!hdlcd->fb.pseudo_palette) {
+ dev_err(hdlcd->dev, "HDLCD: unable to allocate pseudo_palette memory\n");
+ err = -ENOMEM;
+ goto kmalloc_err;
+ }
+
+ version = readl(hdlcd->base + HDLCD_REG_VERSION);
+ if ((version & HDLCD_PRODUCT_MASK) != HDLCD_PRODUCT_ID) {
+ dev_err(hdlcd->dev, "HDLCD: unknown product id: 0x%x\n", version);
+ err = -EINVAL;
+ goto kmalloc_err;
+ }
+ dev_info(hdlcd->dev, "HDLCD: found ARM HDLCD version r%dp%d\n",
+ (version & HDLCD_VERSION_MAJOR_MASK) >> 8,
+ version & HDLCD_VERSION_MINOR_MASK);
+
+ strcpy(hdlcd->fb.fix.id, "hdlcd");
+ hdlcd->fb.fbops = &hdlcd_ops;
+ hdlcd->fb.flags = FBINFO_FLAG_DEFAULT/* | FBINFO_VIRTFB*/;
+
+ hdlcd->fb.fix.type = FB_TYPE_PACKED_PIXELS;
+ hdlcd->fb.fix.type_aux = 0;
+ hdlcd->fb.fix.xpanstep = 0;
+ hdlcd->fb.fix.ypanstep = 1;
+ hdlcd->fb.fix.ywrapstep = 0;
+ hdlcd->fb.fix.accel = FB_ACCEL_NONE;
+
+ hdlcd->fb.var.nonstd = 0;
+ hdlcd->fb.var.activate = FB_ACTIVATE_NOW;
+ hdlcd->fb.var.height = -1;
+ hdlcd->fb.var.width = -1;
+ hdlcd->fb.var.accel_flags = 0;
+
+ init_completion(&hdlcd->vsync_completion);
+
+ if (hdlcd->edid) {
+ /* build modedb from EDID */
+ fb_edid_to_monspecs(hdlcd->edid, &hdlcd->fb.monspecs);
+ fb_videomode_to_modelist(hdlcd->fb.monspecs.modedb,
+ hdlcd->fb.monspecs.modedb_len,
+ &hdlcd->fb.modelist);
+ fb_find_mode(&hdlcd->fb.var, &hdlcd->fb, fb_mode,
+ hdlcd->fb.monspecs.modedb,
+ hdlcd->fb.monspecs.modedb_len,
+ &hdlcd_default_mode, 32);
+ } else {
+ hdlcd->fb.monspecs.hfmin = 0;
+ hdlcd->fb.monspecs.hfmax = 100000;
+ hdlcd->fb.monspecs.vfmin = 0;
+ hdlcd->fb.monspecs.vfmax = 400;
+ hdlcd->fb.monspecs.dclkmin = 1000000;
+ hdlcd->fb.monspecs.dclkmax = 100000000;
+ fb_find_mode(&hdlcd->fb.var, &hdlcd->fb, fb_mode, NULL, 0, &hdlcd_default_mode, 32);
+ }
+
+ dev_info(hdlcd->dev, "using %dx%d-%d@%d mode\n", hdlcd->fb.var.xres,
+ hdlcd->fb.var.yres, hdlcd->fb.var.bits_per_pixel,
+ hdlcd->fb.mode ? hdlcd->fb.mode->refresh : 60);
+ hdlcd->fb.var.xres_virtual = hdlcd->fb.var.xres;
+#ifdef HDLCD_NO_VIRTUAL_SCREEN
+ hdlcd->fb.var.yres_virtual = hdlcd->fb.var.yres;
+#else
+ hdlcd->fb.var.yres_virtual = hdlcd->fb.var.yres * 2;
+#endif
+
+ /* initialise and set the palette */
+ if (fb_alloc_cmap(&hdlcd->fb.cmap, NR_PALETTE, 0)) {
+ dev_err(hdlcd->dev, "failed to allocate cmap memory\n");
+ err = -ENOMEM;
+ goto setup_err;
+ }
+ fb_set_cmap(&hdlcd->fb.cmap, &hdlcd->fb);
+
+ /* Allow max number of outstanding requests with the largest beat burst */
+ WRITE_HDLCD_REG(HDLCD_REG_BUS_OPTIONS, HDLCD_BUS_MAX_OUTSTAND | HDLCD_BUS_BURST_16);
+ /* Set the framebuffer base to start of allocated memory */
+ WRITE_HDLCD_REG(HDLCD_REG_FB_BASE, hdlcd->fb.fix.smem_start);
+#ifdef HDLCD_COUNT_BUFFERUNDERRUNS
+ /* turn on underrun interrupt for counting */
+ WRITE_HDLCD_REG(HDLCD_REG_INT_MASK, HDLCD_INTERRUPT_UNDERRUN);
+#else
+ /* Ensure interrupts are disabled */
+ WRITE_HDLCD_REG(HDLCD_REG_INT_MASK, 0);
+#endif
+ if (!register_framebuffer(&hdlcd->fb)) {
+ fb_set_var(&hdlcd->fb, &hdlcd->fb.var);
+ clk_enable(hdlcd->clk);
+ return 0;
+ }
+
+ dev_err(hdlcd->dev, "HDLCD: cannot register framebuffer\n");
+
+ fb_dealloc_cmap(&hdlcd->fb.cmap);
+setup_err:
+ iounmap(hdlcd->base);
+kmalloc_err:
+ kfree(hdlcd->fb.pseudo_palette);
+remap_err:
+ clk_put(hdlcd->clk);
+ return err;
+}
+
+static inline unsigned char atohex(u8 data)
+{
+ if (!isxdigit(data))
+ return 0;
+ /* truncate the upper nibble and add 9 to non-digit values */
+ return (data > 0x39) ? ((data & 0xf) + 9) : (data & 0xf);
+}
+
+/* EDID data is passed from devicetree in a literal string that can contain spaces and
+ the hexadecimal dump of the data */
+static int parse_edid_data(struct hdlcd_device *hdlcd, const u8 *edid_data, int data_len)
+{
+ int i, j;
+
+ if (!edid_data)
+ return -EINVAL;
+
+ hdlcd->edid = kzalloc(EDID_LENGTH, GFP_KERNEL);
+ if (!hdlcd->edid)
+ return -ENOMEM;
+
+ for (i = 0, j = 0; i < data_len; i++) {
+ if (isspace(edid_data[i]))
+ continue;
+ hdlcd->edid[j++] = atohex(edid_data[i]);
+ if (j >= EDID_LENGTH)
+ break;
+ }
+
+ if (j < EDID_LENGTH) {
+ kfree(hdlcd->edid);
+ hdlcd->edid = NULL;
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hdlcd_probe(struct platform_device *pdev)
+{
+ int err = 0, i;
+ struct hdlcd_device *hdlcd;
+ struct resource *mem;
+#ifdef CONFIG_OF
+ struct device_node *of_node;
+#endif
+
+ memset(&cached_var_screeninfo, 0, sizeof(struct fb_var_screeninfo));
+
+ dev_dbg(&pdev->dev, "HDLCD: probing\n");
+
+ hdlcd = kzalloc(sizeof(*hdlcd), GFP_KERNEL);
+ if (!hdlcd)
+ return -ENOMEM;
+
+#ifdef CONFIG_OF
+ of_node = pdev->dev.of_node;
+ if (of_node) {
+ int len;
+ const u8 *edid;
+ const __be32 *prop = of_get_property(of_node, "mode", &len);
+ if (prop)
+ strncpy(fb_mode, (char *)prop, len);
+ prop = of_get_property(of_node, "framebuffer", &len);
+ if (prop) {
+ hdlcd->fb.fix.smem_start = of_read_ulong(prop,
+ of_n_addr_cells(of_node));
+ prop += of_n_addr_cells(of_node);
+ framebuffer_size = of_read_ulong(prop,
+ of_n_size_cells(of_node));
+ if (framebuffer_size > HDLCD_MAX_FRAMEBUFFER_SIZE)
+ framebuffer_size = HDLCD_MAX_FRAMEBUFFER_SIZE;
+ dev_dbg(&pdev->dev, "HDLCD: phys_addr = 0x%lx, size = 0x%lx\n",
+ hdlcd->fb.fix.smem_start, framebuffer_size);
+ }
+ edid = of_get_property(of_node, "edid", &len);
+ if (edid) {
+ err = parse_edid_data(hdlcd, edid, len);
+#ifdef CONFIG_SERIAL_AMBA_PCU_UART
+ } else {
+ /* ask the firmware to fetch the EDID */
+ dev_dbg(&pdev->dev, "HDLCD: Requesting EDID data\n");
+ hdlcd->edid = kzalloc(EDID_LENGTH, GFP_KERNEL);
+ if (!hdlcd->edid)
+ return -ENOMEM;
+ err = get_edid(hdlcd->edid);
+#endif /* CONFIG_SERIAL_AMBA_PCU_UART */
+ }
+ if (err)
+ dev_info(&pdev->dev, "HDLCD: Failed to parse EDID data\n");
+ }
+#endif /* CONFIG_OF */
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem) {
+ dev_err(&pdev->dev, "HDLCD: cannot get platform resources\n");
+ err = -EINVAL;
+ goto resource_err;
+ }
+
+ i = platform_get_irq(pdev, 0);
+ if (i < 0) {
+ dev_err(&pdev->dev, "HDLCD: no irq defined for vsync\n");
+ err = -ENOENT;
+ goto resource_err;
+ } else {
+ err = request_irq(i, hdlcd_irq, 0, dev_name(&pdev->dev), hdlcd);
+ if (err) {
+ dev_err(&pdev->dev, "HDLCD: unable to request irq\n");
+ goto resource_err;
+ }
+ hdlcd->irq = i;
+ }
+
+ if (!request_mem_region(mem->start, resource_size(mem), dev_name(&pdev->dev))) {
+ err = -ENXIO;
+ goto request_err;
+ }
+
+ if (!hdlcd->fb.fix.smem_start) {
+ dev_err(&pdev->dev, "platform did not allocate frame buffer memory\n");
+ err = -ENOMEM;
+ goto memalloc_err;
+ }
+ hdlcd->fb.screen_base = ioremap_wc(hdlcd->fb.fix.smem_start, framebuffer_size);
+ if (!hdlcd->fb.screen_base) {
+ dev_err(&pdev->dev, "unable to ioremap framebuffer\n");
+ err = -ENOMEM;
+ goto probe_err;
+ }
+
+ hdlcd->fb.screen_size = framebuffer_size;
+ hdlcd->fb.fix.smem_len = framebuffer_size;
+ hdlcd->fb.fix.mmio_start = mem->start;
+ hdlcd->fb.fix.mmio_len = resource_size(mem);
+
+ /* Clear the framebuffer */
+ memset(hdlcd->fb.screen_base, 0, framebuffer_size);
+
+ hdlcd->dev = &pdev->dev;
+
+ dev_dbg(&pdev->dev, "HDLCD: framebuffer virt base %p, phys base 0x%lX\n",
+ hdlcd->fb.screen_base, (unsigned long)hdlcd->fb.fix.smem_start);
+
+ err = hdlcd_setup(hdlcd);
+
+ if (err)
+ goto probe_err;
+
+ platform_set_drvdata(pdev, hdlcd);
+ return 0;
+
+probe_err:
+ iounmap(hdlcd->fb.screen_base);
+ memblock_free(hdlcd->fb.fix.smem_start, hdlcd->fb.fix.smem_start);
+
+memalloc_err:
+ release_mem_region(mem->start, resource_size(mem));
+
+request_err:
+ free_irq(hdlcd->irq, hdlcd);
+
+resource_err:
+ kfree(hdlcd);
+
+ return err;
+}
+
+static int hdlcd_remove(struct platform_device *pdev)
+{
+ struct hdlcd_device *hdlcd = platform_get_drvdata(pdev);
+
+ clk_disable(hdlcd->clk);
+ clk_unprepare(hdlcd->clk);
+ clk_put(hdlcd->clk);
+
+ /* unmap memory */
+ iounmap(hdlcd->fb.screen_base);
+ iounmap(hdlcd->base);
+
+ /* deallocate fb memory */
+ fb_dealloc_cmap(&hdlcd->fb.cmap);
+ kfree(hdlcd->fb.pseudo_palette);
+ memblock_free(hdlcd->fb.fix.smem_start, hdlcd->fb.fix.smem_start);
+ release_mem_region(hdlcd->fb.fix.mmio_start, hdlcd->fb.fix.mmio_len);
+
+ free_irq(hdlcd->irq, NULL);
+ kfree(hdlcd);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int hdlcd_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ /* not implemented yet */
+ return 0;
+}
+
+static int hdlcd_resume(struct platform_device *pdev)
+{
+ /* not implemented yet */
+ return 0;
+}
+#else
+#define hdlcd_suspend NULL
+#define hdlcd_resume NULL
+#endif
+
+static struct platform_driver hdlcd_driver = {
+ .probe = hdlcd_probe,
+ .remove = hdlcd_remove,
+ .suspend = hdlcd_suspend,
+ .resume = hdlcd_resume,
+ .driver = {
+ .name = "hdlcd",
+ .owner = THIS_MODULE,
+ .of_match_table = hdlcd_of_matches,
+ },
+};
+
+static int __init hdlcd_init(void)
+{
+#ifdef HDLCD_COUNT_BUFFERUNDERRUNS
+ int err = platform_driver_register(&hdlcd_driver);
+ if (!err)
+ hdlcd_underrun_init();
+ return err;
+#else
+ return platform_driver_register(&hdlcd_driver);
+#endif
+}
+
+void __exit hdlcd_exit(void)
+{
+#ifdef HDLCD_COUNT_BUFFERUNDERRUNS
+ hdlcd_underrun_close();
+#endif
+ platform_driver_unregister(&hdlcd_driver);
+}
+
+module_init(hdlcd_init);
+module_exit(hdlcd_exit);
+
+MODULE_AUTHOR("Liviu Dudau");
+MODULE_DESCRIPTION("ARM HDLCD core driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 3cd67592782..a92783e480e 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -1228,6 +1228,8 @@ static void fbcon_deinit(struct vc_data *vc)
finished:
fbcon_free_font(p, free_font);
+ if (free_font)
+ vc->vc_font.data = NULL;
if (!con_is_bound(&fb_con))
fbcon_exit();
diff --git a/drivers/video/vexpress-dvi.c b/drivers/video/vexpress-dvi.c
new file mode 100644
index 00000000000..f08753450ee
--- /dev/null
+++ b/drivers/video/vexpress-dvi.c
@@ -0,0 +1,220 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2012 ARM Limited
+ */
+
+#define pr_fmt(fmt) "vexpress-dvi: " fmt
+
+#include <linux/fb.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/vexpress.h>
+
+
+static struct vexpress_config_func *vexpress_dvimode_func;
+
+static struct {
+ u32 xres, yres, mode;
+} vexpress_dvi_dvimodes[] = {
+ { 640, 480, 0 }, /* VGA */
+ { 800, 600, 1 }, /* SVGA */
+ { 1024, 768, 2 }, /* XGA */
+ { 1280, 1024, 3 }, /* SXGA */
+ { 1600, 1200, 4 }, /* UXGA */
+ { 1920, 1080, 5 }, /* HD1080 */
+};
+
+static void vexpress_dvi_mode_set(struct fb_info *info, u32 xres, u32 yres)
+{
+ int err = -ENOENT;
+ int i;
+
+ if (!vexpress_dvimode_func)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(vexpress_dvi_dvimodes); i++) {
+ if (vexpress_dvi_dvimodes[i].xres == xres &&
+ vexpress_dvi_dvimodes[i].yres == yres) {
+ pr_debug("mode: %ux%u = %d\n", xres, yres,
+ vexpress_dvi_dvimodes[i].mode);
+ err = vexpress_config_write(vexpress_dvimode_func, 0,
+ vexpress_dvi_dvimodes[i].mode);
+ break;
+ }
+ }
+
+ if (err)
+ pr_warn("Failed to set %ux%u mode! (%d)\n", xres, yres, err);
+}
+
+
+static struct vexpress_config_func *vexpress_muxfpga_func;
+static int vexpress_dvi_fb = -1;
+
+static int vexpress_dvi_mux_set(struct fb_info *info)
+{
+ int err;
+ u32 site = vexpress_get_site_by_dev(info->device);
+
+ if (!vexpress_muxfpga_func)
+ return -ENXIO;
+
+ err = vexpress_config_write(vexpress_muxfpga_func, 0, site);
+ if (!err) {
+ pr_debug("Selected MUXFPGA input %d (fb%d)\n", site,
+ info->node);
+ vexpress_dvi_fb = info->node;
+ vexpress_dvi_mode_set(info, info->var.xres,
+ info->var.yres);
+ } else {
+ pr_warn("Failed to select MUXFPGA input %d (fb%d)! (%d)\n",
+ site, info->node, err);
+ }
+
+ return err;
+}
+
+static int vexpress_dvi_fb_select(int fb)
+{
+ int err;
+ struct fb_info *info;
+
+ /* fb0 is the default */
+ if (fb < 0)
+ fb = 0;
+
+ info = registered_fb[fb];
+ if (!info || !lock_fb_info(info))
+ return -ENODEV;
+
+ err = vexpress_dvi_mux_set(info);
+
+ unlock_fb_info(info);
+
+ return err;
+}
+
+static ssize_t vexpress_dvi_fb_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", vexpress_dvi_fb);
+}
+
+static ssize_t vexpress_dvi_fb_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ long value;
+ int err = kstrtol(buf, 0, &value);
+
+ if (!err)
+ err = vexpress_dvi_fb_select(value);
+
+ return err ? err : count;
+}
+
+DEVICE_ATTR(fb, S_IRUGO | S_IWUSR, vexpress_dvi_fb_show,
+ vexpress_dvi_fb_store);
+
+
+static int vexpress_dvi_fb_event_notify(struct notifier_block *self,
+ unsigned long action, void *data)
+{
+ struct fb_event *event = data;
+ struct fb_info *info = event->info;
+ struct fb_videomode *mode = event->data;
+
+ switch (action) {
+ case FB_EVENT_FB_REGISTERED:
+ if (vexpress_dvi_fb < 0)
+ vexpress_dvi_mux_set(info);
+ break;
+ case FB_EVENT_MODE_CHANGE:
+ case FB_EVENT_MODE_CHANGE_ALL:
+ if (info->node == vexpress_dvi_fb)
+ vexpress_dvi_mode_set(info, mode->xres, mode->yres);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block vexpress_dvi_fb_notifier = {
+ .notifier_call = vexpress_dvi_fb_event_notify,
+};
+static bool vexpress_dvi_fb_notifier_registered;
+
+
+enum vexpress_dvi_func { FUNC_MUXFPGA, FUNC_DVIMODE };
+
+static struct of_device_id vexpress_dvi_of_match[] = {
+ {
+ .compatible = "arm,vexpress-muxfpga",
+ .data = (void *)FUNC_MUXFPGA,
+ }, {
+ .compatible = "arm,vexpress-dvimode",
+ .data = (void *)FUNC_DVIMODE,
+ },
+ {}
+};
+
+static int vexpress_dvi_probe(struct platform_device *pdev)
+{
+ enum vexpress_dvi_func func;
+ const struct of_device_id *match =
+ of_match_device(vexpress_dvi_of_match, &pdev->dev);
+
+ if (match)
+ func = (enum vexpress_dvi_func)match->data;
+ else
+ func = pdev->id_entry->driver_data;
+
+ switch (func) {
+ case FUNC_MUXFPGA:
+ vexpress_muxfpga_func =
+ vexpress_config_func_get_by_dev(&pdev->dev);
+ device_create_file(&pdev->dev, &dev_attr_fb);
+ break;
+ case FUNC_DVIMODE:
+ vexpress_dvimode_func =
+ vexpress_config_func_get_by_dev(&pdev->dev);
+ break;
+ }
+
+ if (!vexpress_dvi_fb_notifier_registered) {
+ fb_register_client(&vexpress_dvi_fb_notifier);
+ vexpress_dvi_fb_notifier_registered = true;
+ }
+
+ vexpress_dvi_fb_select(vexpress_dvi_fb);
+
+ return 0;
+}
+
+static const struct platform_device_id vexpress_dvi_id_table[] = {
+ { .name = "vexpress-muxfpga", .driver_data = FUNC_MUXFPGA, },
+ { .name = "vexpress-dvimode", .driver_data = FUNC_DVIMODE, },
+ {}
+};
+
+static struct platform_driver vexpress_dvi_driver = {
+ .probe = vexpress_dvi_probe,
+ .driver = {
+ .name = "vexpress-dvi",
+ .of_match_table = vexpress_dvi_of_match,
+ },
+ .id_table = vexpress_dvi_id_table,
+};
+
+static int __init vexpress_dvi_init(void)
+{
+ return platform_driver_register(&vexpress_dvi_driver);
+}
+device_initcall(vexpress_dvi_init);
diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
index 08b48bbf9f4..faf4e189fe4 100644
--- a/drivers/watchdog/watchdog_dev.c
+++ b/drivers/watchdog/watchdog_dev.c
@@ -523,6 +523,7 @@ int watchdog_dev_register(struct watchdog_device *watchdog)
int err, devno;
if (watchdog->id == 0) {
+ old_wdd = watchdog;
watchdog_miscdev.parent = watchdog->parent;
err = misc_register(&watchdog_miscdev);
if (err != 0) {
@@ -531,9 +532,9 @@ int watchdog_dev_register(struct watchdog_device *watchdog)
if (err == -EBUSY)
pr_err("%s: a legacy watchdog module is probably present.\n",
watchdog->info->identity);
+ old_wdd = NULL;
return err;
}
- old_wdd = watchdog;
}
/* Fill in the data structures */
diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
index 01443ce43ee..13ddec92341 100644
--- a/fs/autofs4/expire.c
+++ b/fs/autofs4/expire.c
@@ -61,15 +61,6 @@ static int autofs4_mount_busy(struct vfsmount *mnt, struct dentry *dentry)
/* This is an autofs submount, we can't expire it */
if (autofs_type_indirect(sbi->type))
goto done;
-
- /*
- * Otherwise it's an offset mount and we need to check
- * if we can umount its mount, if there is one.
- */
- if (!d_mountpoint(path.dentry)) {
- status = 0;
- goto done;
- }
}
/* Update the expiry counter if fs is busy */
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index b7a0641ead7..116abec7a29 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -40,16 +40,19 @@ struct kmem_cache *btrfs_delayed_extent_op_cachep;
* compare two delayed tree backrefs with same bytenr and type
*/
static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2,
- struct btrfs_delayed_tree_ref *ref1)
+ struct btrfs_delayed_tree_ref *ref1, int type)
{
- if (ref1->root < ref2->root)
- return -1;
- if (ref1->root > ref2->root)
- return 1;
- if (ref1->parent < ref2->parent)
- return -1;
- if (ref1->parent > ref2->parent)
- return 1;
+ if (type == BTRFS_TREE_BLOCK_REF_KEY) {
+ if (ref1->root < ref2->root)
+ return -1;
+ if (ref1->root > ref2->root)
+ return 1;
+ } else {
+ if (ref1->parent < ref2->parent)
+ return -1;
+ if (ref1->parent > ref2->parent)
+ return 1;
+ }
return 0;
}
@@ -113,7 +116,8 @@ static int comp_entry(struct btrfs_delayed_ref_node *ref2,
if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
ref1->type == BTRFS_SHARED_BLOCK_REF_KEY) {
return comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref2),
- btrfs_delayed_node_to_tree_ref(ref1));
+ btrfs_delayed_node_to_tree_ref(ref1),
+ ref1->type);
} else if (ref1->type == BTRFS_EXTENT_DATA_REF_KEY ||
ref1->type == BTRFS_SHARED_DATA_REF_KEY) {
return comp_data_refs(btrfs_delayed_node_to_data_ref(ref2),
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 09c58a35b42..cc6ce3e58af 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -6502,7 +6502,9 @@ out:
* block must be cow'd
*/
static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans,
- struct inode *inode, u64 offset, u64 len)
+ struct inode *inode, u64 offset, u64 *len,
+ u64 *orig_start, u64 *orig_block_len,
+ u64 *ram_bytes)
{
struct btrfs_path *path;
int ret;
@@ -6559,8 +6561,12 @@ static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans,
disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
backref_offset = btrfs_file_extent_offset(leaf, fi);
+ *orig_start = key.offset - backref_offset;
+ *orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi);
+ *ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
+
extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
- if (extent_end < offset + len) {
+ if (extent_end < offset + *len) {
/* extent doesn't include our full range, must cow */
goto out;
}
@@ -6584,13 +6590,14 @@ static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans,
*/
disk_bytenr += backref_offset;
disk_bytenr += offset - key.offset;
- num_bytes = min(offset + len, extent_end) - offset;
+ num_bytes = min(offset + *len, extent_end) - offset;
if (csum_exist_in_range(root, disk_bytenr, num_bytes))
goto out;
/*
* all of the above have passed, it is safe to overwrite this extent
* without cow
*/
+ *len = num_bytes;
ret = 1;
out:
btrfs_free_path(path);
@@ -6789,7 +6796,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
em->block_start != EXTENT_MAP_HOLE)) {
int type;
int ret;
- u64 block_start;
+ u64 block_start, orig_start, orig_block_len, ram_bytes;
if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
type = BTRFS_ORDERED_PREALLOC;
@@ -6807,10 +6814,8 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
if (IS_ERR(trans))
goto must_cow;
- if (can_nocow_odirect(trans, inode, start, len) == 1) {
- u64 orig_start = em->orig_start;
- u64 orig_block_len = em->orig_block_len;
-
+ if (can_nocow_odirect(trans, inode, start, &len, &orig_start,
+ &orig_block_len, &ram_bytes) == 1) {
if (type == BTRFS_ORDERED_PREALLOC) {
free_extent_map(em);
em = create_pinned_em(inode, start, len,
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 2c02310ff2d..f49b62f833b 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -1796,7 +1796,11 @@ static noinline int copy_to_sk(struct btrfs_root *root,
item_off = btrfs_item_ptr_offset(leaf, i);
item_len = btrfs_item_size_nr(leaf, i);
- if (item_len > BTRFS_SEARCH_ARGS_BUFSIZE)
+ btrfs_item_key_to_cpu(leaf, key, i);
+ if (!key_in_sk(key, sk))
+ continue;
+
+ if (sizeof(sh) + item_len > BTRFS_SEARCH_ARGS_BUFSIZE)
item_len = 0;
if (sizeof(sh) + item_len + *sk_offset >
@@ -1805,10 +1809,6 @@ static noinline int copy_to_sk(struct btrfs_root *root,
goto overflow;
}
- btrfs_item_key_to_cpu(leaf, key, i);
- if (!key_in_sk(key, sk))
- continue;
-
sh.objectid = key->objectid;
sh.offset = key->offset;
sh.type = key->type;
diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
index 210fce2df30..47c1155fac7 100644
--- a/fs/cifs/cifs_dfs_ref.c
+++ b/fs/cifs/cifs_dfs_ref.c
@@ -18,6 +18,7 @@
#include <linux/slab.h>
#include <linux/vfs.h>
#include <linux/fs.h>
+#include <linux/inet.h>
#include "cifsglob.h"
#include "cifsproto.h"
#include "cifsfs.h"
@@ -150,7 +151,8 @@ char *cifs_compose_mount_options(const char *sb_mountdata,
* assuming that we have 'unc=' and 'ip=' in
* the original sb_mountdata
*/
- md_len = strlen(sb_mountdata) + rc + strlen(ref->node_name) + 12;
+ md_len = strlen(sb_mountdata) + rc + strlen(ref->node_name) + 12 +
+ INET6_ADDRSTRLEN;
mountdata = kzalloc(md_len+1, GFP_KERNEL);
if (mountdata == NULL) {
rc = -ENOMEM;
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 21b3a291c32..6c4b4806404 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -3332,8 +3332,8 @@ build_unc_path_to_root(const struct smb_vol *vol,
pos = full_path + unc_len;
if (pplen) {
- *pos++ = CIFS_DIR_SEP(cifs_sb);
- strncpy(pos, vol->prepath, pplen);
+ *pos = CIFS_DIR_SEP(cifs_sb);
+ strncpy(pos + 1, vol->prepath, pplen);
pos += pplen;
}
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 20887bf6312..cb8842944f9 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -169,7 +169,8 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
if (fattr->cf_flags & CIFS_FATTR_DFS_REFERRAL)
inode->i_flags |= S_AUTOMOUNT;
- cifs_set_ops(inode);
+ if (inode->i_state & I_NEW)
+ cifs_set_ops(inode);
}
void
diff --git a/fs/dcache.c b/fs/dcache.c
index e8bc3420d63..e689268046c 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1230,8 +1230,10 @@ void shrink_dcache_parent(struct dentry * parent)
LIST_HEAD(dispose);
int found;
- while ((found = select_parent(parent, &dispose)) != 0)
+ while ((found = select_parent(parent, &dispose)) != 0) {
shrink_dentry_list(&dispose);
+ cond_resched();
+ }
}
EXPORT_SYMBOL(shrink_dcache_parent);
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
index 63b1f54b6a1..3d7fb593f08 100644
--- a/fs/ecryptfs/file.c
+++ b/fs/ecryptfs/file.c
@@ -294,6 +294,12 @@ static int ecryptfs_release(struct inode *inode, struct file *file)
static int
ecryptfs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{
+ int rc;
+
+ rc = filemap_write_and_wait(file->f_mapping);
+ if (rc)
+ return rc;
+
return vfs_fsync(ecryptfs_file_to_lower(file), datasync);
}
diff --git a/fs/exec.c b/fs/exec.c
index a96a4885bbb..6d56ff2d578 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -613,7 +613,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
* when the old and new regions overlap clear from new_end.
*/
free_pgd_range(&tlb, new_end, old_end, new_end,
- vma->vm_next ? vma->vm_next->vm_start : 0);
+ vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
} else {
/*
* otherwise, clean from old_start; this is done to not touch
@@ -622,7 +622,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
* for the others its just a little faster.
*/
free_pgd_range(&tlb, old_start, old_end, new_end,
- vma->vm_next ? vma->vm_next->vm_start : 0);
+ vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
}
tlb_finish_mmu(&tlb, new_end, old_end);
@@ -898,11 +898,13 @@ static int de_thread(struct task_struct *tsk)
sig->notify_count = -1; /* for exit_notify() */
for (;;) {
+ threadgroup_change_begin(tsk);
write_lock_irq(&tasklist_lock);
if (likely(leader->exit_state))
break;
__set_current_state(TASK_KILLABLE);
write_unlock_irq(&tasklist_lock);
+ threadgroup_change_end(tsk);
schedule();
if (unlikely(__fatal_signal_pending(tsk)))
goto killed;
@@ -960,6 +962,7 @@ static int de_thread(struct task_struct *tsk)
if (unlikely(leader->ptrace))
__wake_up_parent(leader, leader->parent);
write_unlock_irq(&tasklist_lock);
+ threadgroup_change_end(tsk);
release_task(leader);
}
diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig
index 987358740cb..efea5d5c44c 100644
--- a/fs/ext4/Kconfig
+++ b/fs/ext4/Kconfig
@@ -71,4 +71,5 @@ config EXT4_DEBUG
Enables run-time debugging support for the ext4 filesystem.
If you select Y here, then you will be able to turn on debugging
- with a command such as "echo 1 > /sys/kernel/debug/ext4/mballoc-debug"
+ with a command such as:
+ echo 1 > /sys/module/ext4/parameters/mballoc_debug
diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h
index 4c216b1bf20..aeed0bac693 100644
--- a/fs/ext4/ext4_jbd2.h
+++ b/fs/ext4/ext4_jbd2.h
@@ -194,16 +194,20 @@ static inline void ext4_journal_callback_add(handle_t *handle,
* ext4_journal_callback_del: delete a registered callback
* @handle: active journal transaction handle on which callback was registered
* @jce: registered journal callback entry to unregister
+ * Return true if object was sucessfully removed
*/
-static inline void ext4_journal_callback_del(handle_t *handle,
+static inline bool ext4_journal_callback_try_del(handle_t *handle,
struct ext4_journal_cb_entry *jce)
{
+ bool deleted;
struct ext4_sb_info *sbi =
EXT4_SB(handle->h_transaction->t_journal->j_private);
spin_lock(&sbi->s_md_lock);
+ deleted = !list_empty(&jce->jce_list);
list_del_init(&jce->jce_list);
spin_unlock(&sbi->s_md_lock);
+ return deleted;
}
int
diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
index 3278e64e57b..e0ba8a408de 100644
--- a/fs/ext4/fsync.c
+++ b/fs/ext4/fsync.c
@@ -166,8 +166,7 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
if (journal->j_flags & JBD2_BARRIER &&
!jbd2_trans_will_send_data_barrier(journal, commit_tid))
needs_barrier = true;
- jbd2_log_start_commit(journal, commit_tid);
- ret = jbd2_log_wait_commit(journal, commit_tid);
+ ret = jbd2_complete_transaction(journal, commit_tid);
if (needs_barrier) {
err = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
if (!ret)
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index b3a5213bc73..d69e9540cd2 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -55,21 +55,21 @@ static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw,
__u16 csum_hi = 0;
__u32 csum;
- csum_lo = raw->i_checksum_lo;
+ csum_lo = le16_to_cpu(raw->i_checksum_lo);
raw->i_checksum_lo = 0;
if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) {
- csum_hi = raw->i_checksum_hi;
+ csum_hi = le16_to_cpu(raw->i_checksum_hi);
raw->i_checksum_hi = 0;
}
csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw,
EXT4_INODE_SIZE(inode->i_sb));
- raw->i_checksum_lo = csum_lo;
+ raw->i_checksum_lo = cpu_to_le16(csum_lo);
if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
- raw->i_checksum_hi = csum_hi;
+ raw->i_checksum_hi = cpu_to_le16(csum_hi);
return csum;
}
@@ -210,8 +210,7 @@ void ext4_evict_inode(struct inode *inode)
journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
tid_t commit_tid = EXT4_I(inode)->i_datasync_tid;
- jbd2_log_start_commit(journal, commit_tid);
- jbd2_log_wait_commit(journal, commit_tid);
+ jbd2_complete_transaction(journal, commit_tid);
filemap_write_and_wait(&inode->i_data);
}
truncate_inode_pages(&inode->i_data, 0);
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index ee6614bdb63..f3190ab4178 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -1994,7 +1994,11 @@ repeat:
group = ac->ac_g_ex.fe_group;
for (i = 0; i < ngroups; group++, i++) {
- if (group == ngroups)
+ /*
+ * Artificially restricted ngroups for non-extent
+ * files makes group > ngroups possible on first loop.
+ */
+ if (group >= ngroups)
group = 0;
/* This now checks without needing the buddy page */
@@ -4420,11 +4424,11 @@ ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
node = rb_prev(new_node);
if (node) {
entry = rb_entry(node, struct ext4_free_data, efd_node);
- if (can_merge(entry, new_entry)) {
+ if (can_merge(entry, new_entry) &&
+ ext4_journal_callback_try_del(handle, &entry->efd_jce)) {
new_entry->efd_start_cluster = entry->efd_start_cluster;
new_entry->efd_count += entry->efd_count;
rb_erase(node, &(db->bb_free_root));
- ext4_journal_callback_del(handle, &entry->efd_jce);
kmem_cache_free(ext4_free_data_cachep, entry);
}
}
@@ -4432,10 +4436,10 @@ ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
node = rb_next(new_node);
if (node) {
entry = rb_entry(node, struct ext4_free_data, efd_node);
- if (can_merge(new_entry, entry)) {
+ if (can_merge(new_entry, entry) &&
+ ext4_journal_callback_try_del(handle, &entry->efd_jce)) {
new_entry->efd_count += entry->efd_count;
rb_erase(node, &(db->bb_free_root));
- ext4_journal_callback_del(handle, &entry->efd_jce);
kmem_cache_free(ext4_free_data_cachep, entry);
}
}
diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
index f9b551561d2..b3b1f7d9944 100644
--- a/fs/ext4/mmp.c
+++ b/fs/ext4/mmp.c
@@ -7,7 +7,7 @@
#include "ext4.h"
/* Checksumming functions */
-static __u32 ext4_mmp_csum(struct super_block *sb, struct mmp_struct *mmp)
+static __le32 ext4_mmp_csum(struct super_block *sb, struct mmp_struct *mmp)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
int offset = offsetof(struct mmp_struct, mmp_checksum);
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index c169477a62c..3beae6a3ef2 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -1341,6 +1341,8 @@ static void ext4_update_super(struct super_block *sb,
/* Update the global fs size fields */
sbi->s_groups_count += flex_gd->count;
+ sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count,
+ (EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
/* Update the reserved block counts only once the new group is
* active. */
@@ -1880,6 +1882,10 @@ retry:
return 0;
ext4_get_group_no_and_offset(sb, n_blocks_count - 1, &n_group, &offset);
+ if (n_group > (0xFFFFFFFFUL / EXT4_INODES_PER_GROUP(sb))) {
+ ext4_warning(sb, "resize would cause inodes_count overflow");
+ return -EINVAL;
+ }
ext4_get_group_no_and_offset(sb, o_blocks_count - 1, &o_group, &offset);
n_desc_blocks = num_desc_blocks(sb, n_group + 1);
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 5d6d5357812..febbe0e1802 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -353,10 +353,13 @@ static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn)
struct super_block *sb = journal->j_private;
struct ext4_sb_info *sbi = EXT4_SB(sb);
int error = is_journal_aborted(journal);
- struct ext4_journal_cb_entry *jce, *tmp;
+ struct ext4_journal_cb_entry *jce;
+ BUG_ON(txn->t_state == T_FINISHED);
spin_lock(&sbi->s_md_lock);
- list_for_each_entry_safe(jce, tmp, &txn->t_private_list, jce_list) {
+ while (!list_empty(&txn->t_private_list)) {
+ jce = list_entry(txn->t_private_list.next,
+ struct ext4_journal_cb_entry, jce_list);
list_del_init(&jce->jce_list);
spin_unlock(&sbi->s_md_lock);
jce->jce_func(sb, jce, error);
@@ -3698,6 +3701,9 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
sbi->s_err_report.function = print_daily_error_info;
sbi->s_err_report.data = (unsigned long) sb;
+ /* Register extent status tree shrinker */
+ ext4_es_register_shrinker(sb);
+
err = percpu_counter_init(&sbi->s_freeclusters_counter,
ext4_count_free_clusters(sb));
if (!err) {
@@ -3723,9 +3729,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
sbi->s_max_writeback_mb_bump = 128;
sbi->s_extent_max_zeroout_kb = 32;
- /* Register extent status tree shrinker */
- ext4_es_register_shrinker(sb);
-
/*
* set up enough so that it can read an inode
*/
@@ -4010,6 +4013,7 @@ failed_mount_wq:
sbi->s_journal = NULL;
}
failed_mount3:
+ ext4_es_unregister_shrinker(sb);
del_timer(&sbi->s_err_report);
if (sbi->s_flex_groups)
ext4_kvfree(sbi->s_flex_groups);
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 5d8c498e800..5111c429466 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -1223,6 +1223,19 @@ static int fat_read_root(struct inode *inode)
return 0;
}
+static unsigned long calc_fat_clusters(struct super_block *sb)
+{
+ struct msdos_sb_info *sbi = MSDOS_SB(sb);
+
+ /* Divide first to avoid overflow */
+ if (sbi->fat_bits != 12) {
+ unsigned long ent_per_sec = sb->s_blocksize * 8 / sbi->fat_bits;
+ return ent_per_sec * sbi->fat_length;
+ }
+
+ return sbi->fat_length * sb->s_blocksize * 8 / sbi->fat_bits;
+}
+
/*
* Read the super block of an MS-DOS FS.
*/
@@ -1436,7 +1449,7 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
sbi->dirty = b->fat16.state & FAT_STATE_DIRTY;
/* check that FAT table does not overflow */
- fat_clusters = sbi->fat_length * sb->s_blocksize * 8 / sbi->fat_bits;
+ fat_clusters = calc_fat_clusters(sb);
total_clusters = min(total_clusters, fat_clusters - FAT_START_ENT);
if (total_clusters > MAX_FAT(sb)) {
if (!silent)
diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
index 8179e8bc4a3..40d13c70ef5 100644
--- a/fs/fscache/stats.c
+++ b/fs/fscache/stats.c
@@ -287,5 +287,5 @@ const struct file_operations fscache_stats_fops = {
.open = fscache_stats_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = seq_release,
+ .release = single_release,
};
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index ff15522481d..185c47987be 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -180,6 +180,8 @@ u64 fuse_get_attr_version(struct fuse_conn *fc)
static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
{
struct inode *inode;
+ struct dentry *parent;
+ struct fuse_conn *fc;
inode = ACCESS_ONCE(entry->d_inode);
if (inode && is_bad_inode(inode))
@@ -187,10 +189,8 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
else if (fuse_dentry_time(entry) < get_jiffies_64()) {
int err;
struct fuse_entry_out outarg;
- struct fuse_conn *fc;
struct fuse_req *req;
struct fuse_forget_link *forget;
- struct dentry *parent;
u64 attr_version;
/* For negative dentries, always do a fresh lookup */
@@ -241,8 +241,14 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
entry_attr_timeout(&outarg),
attr_version);
fuse_change_entry_timeout(entry, &outarg);
+ } else if (inode) {
+ fc = get_fuse_conn(inode);
+ if (fc->readdirplus_auto) {
+ parent = dget_parent(entry);
+ fuse_advise_use_readdirplus(parent->d_inode);
+ dput(parent);
+ }
}
- fuse_advise_use_readdirplus(inode);
return 1;
}
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 137185c3884..a215d22fb3a 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -864,10 +864,11 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
fc->dont_mask = 1;
if (arg->flags & FUSE_AUTO_INVAL_DATA)
fc->auto_inval_data = 1;
- if (arg->flags & FUSE_DO_READDIRPLUS)
+ if (arg->flags & FUSE_DO_READDIRPLUS) {
fc->do_readdirplus = 1;
- if (arg->flags & FUSE_READDIRPLUS_AUTO)
- fc->readdirplus_auto = 1;
+ if (arg->flags & FUSE_READDIRPLUS_AUTO)
+ fc->readdirplus_auto = 1;
+ }
} else {
ra_pages = fc->max_read / PAGE_CACHE_SIZE;
fc->no_lock = 1;
diff --git a/fs/hpfs/file.c b/fs/hpfs/file.c
index 9f9dbeceeee..5d325c5a81e 100644
--- a/fs/hpfs/file.c
+++ b/fs/hpfs/file.c
@@ -109,10 +109,14 @@ static void hpfs_write_failed(struct address_space *mapping, loff_t to)
{
struct inode *inode = mapping->host;
+ hpfs_lock(inode->i_sb);
+
if (to > inode->i_size) {
truncate_pagecache(inode, to, inode->i_size);
hpfs_truncate(inode);
}
+
+ hpfs_unlock(inode->i_sb);
}
static int hpfs_write_begin(struct file *file, struct address_space *mapping,
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 523464e6284..a3f868ae3fd 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -909,11 +909,8 @@ static int can_do_hugetlb_shm(void)
static int get_hstate_idx(int page_size_log)
{
- struct hstate *h;
+ struct hstate *h = hstate_sizelog(page_size_log);
- if (!page_size_log)
- return default_hstate_idx;
- h = size_to_hstate(1 << page_size_log);
if (!h)
return -1;
return h - hstates;
@@ -929,9 +926,12 @@ static struct dentry_operations anon_ops = {
.d_dname = hugetlb_dname
};
-struct file *hugetlb_file_setup(const char *name, unsigned long addr,
- size_t size, vm_flags_t acctflag,
- struct user_struct **user,
+/*
+ * Note that size should be aligned to proper hugepage size in caller side,
+ * otherwise hugetlb_reserve_pages reserves one less hugepages than intended.
+ */
+struct file *hugetlb_file_setup(const char *name, size_t size,
+ vm_flags_t acctflag, struct user_struct **user,
int creat_flags, int page_size_log)
{
struct file *file = ERR_PTR(-ENOMEM);
@@ -939,8 +939,6 @@ struct file *hugetlb_file_setup(const char *name, unsigned long addr,
struct path path;
struct super_block *sb;
struct qstr quick_string;
- struct hstate *hstate;
- unsigned long num_pages;
int hstate_idx;
hstate_idx = get_hstate_idx(page_size_log);
@@ -980,12 +978,10 @@ struct file *hugetlb_file_setup(const char *name, unsigned long addr,
if (!inode)
goto out_dentry;
- hstate = hstate_inode(inode);
- size += addr & ~huge_page_mask(hstate);
- num_pages = ALIGN(size, huge_page_size(hstate)) >>
- huge_page_shift(hstate);
file = ERR_PTR(-ENOMEM);
- if (hugetlb_reserve_pages(inode, 0, num_pages, NULL, acctflag))
+ if (hugetlb_reserve_pages(inode, 0,
+ size >> huge_page_shift(hstate_inode(inode)), NULL,
+ acctflag))
goto out_inode;
d_instantiate(path.dentry, inode);
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 750c70148ef..0f53946f13c 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -382,7 +382,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
int space_left = 0;
int first_tag = 0;
int tag_flag;
- int i, to_free = 0;
+ int i;
int tag_bytes = journal_tag_bytes(journal);
struct buffer_head *cbh = NULL; /* For transactional checksums */
__u32 crc32_sum = ~0;
@@ -1134,7 +1134,7 @@ restart_loop:
journal->j_stats.run.rs_blocks_logged += stats.run.rs_blocks_logged;
spin_unlock(&journal->j_history_lock);
- commit_transaction->t_state = T_FINISHED;
+ commit_transaction->t_state = T_COMMIT_CALLBACK;
J_ASSERT(commit_transaction == journal->j_committing_transaction);
journal->j_commit_sequence = commit_transaction->t_tid;
journal->j_committing_transaction = NULL;
@@ -1149,38 +1149,44 @@ restart_loop:
journal->j_average_commit_time*3) / 4;
else
journal->j_average_commit_time = commit_time;
+
write_unlock(&journal->j_state_lock);
- if (commit_transaction->t_checkpoint_list == NULL &&
- commit_transaction->t_checkpoint_io_list == NULL) {
- __jbd2_journal_drop_transaction(journal, commit_transaction);
- to_free = 1;
+ if (journal->j_checkpoint_transactions == NULL) {
+ journal->j_checkpoint_transactions = commit_transaction;
+ commit_transaction->t_cpnext = commit_transaction;
+ commit_transaction->t_cpprev = commit_transaction;
} else {
- if (journal->j_checkpoint_transactions == NULL) {
- journal->j_checkpoint_transactions = commit_transaction;
- commit_transaction->t_cpnext = commit_transaction;
- commit_transaction->t_cpprev = commit_transaction;
- } else {
- commit_transaction->t_cpnext =
- journal->j_checkpoint_transactions;
- commit_transaction->t_cpprev =
- commit_transaction->t_cpnext->t_cpprev;
- commit_transaction->t_cpnext->t_cpprev =
- commit_transaction;
- commit_transaction->t_cpprev->t_cpnext =
+ commit_transaction->t_cpnext =
+ journal->j_checkpoint_transactions;
+ commit_transaction->t_cpprev =
+ commit_transaction->t_cpnext->t_cpprev;
+ commit_transaction->t_cpnext->t_cpprev =
+ commit_transaction;
+ commit_transaction->t_cpprev->t_cpnext =
commit_transaction;
- }
}
spin_unlock(&journal->j_list_lock);
-
+ /* Drop all spin_locks because commit_callback may be block.
+ * __journal_remove_checkpoint() can not destroy transaction
+ * under us because it is not marked as T_FINISHED yet */
if (journal->j_commit_callback)
journal->j_commit_callback(journal, commit_transaction);
trace_jbd2_end_commit(journal, commit_transaction);
jbd_debug(1, "JBD2: commit %d complete, head %d\n",
journal->j_commit_sequence, journal->j_tail_sequence);
- if (to_free)
- jbd2_journal_free_transaction(commit_transaction);
+ write_lock(&journal->j_state_lock);
+ spin_lock(&journal->j_list_lock);
+ commit_transaction->t_state = T_FINISHED;
+ /* Recheck checkpoint lists after j_list_lock was dropped */
+ if (commit_transaction->t_checkpoint_list == NULL &&
+ commit_transaction->t_checkpoint_io_list == NULL) {
+ __jbd2_journal_drop_transaction(journal, commit_transaction);
+ jbd2_journal_free_transaction(commit_transaction);
+ }
+ spin_unlock(&journal->j_list_lock);
+ write_unlock(&journal->j_state_lock);
wake_up(&journal->j_wait_done_commit);
}
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index ed10991ab00..886ec2faa9b 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -710,6 +710,37 @@ int jbd2_log_wait_commit(journal_t *journal, tid_t tid)
}
/*
+ * When this function returns the transaction corresponding to tid
+ * will be completed. If the transaction has currently running, start
+ * committing that transaction before waiting for it to complete. If
+ * the transaction id is stale, it is by definition already completed,
+ * so just return SUCCESS.
+ */
+int jbd2_complete_transaction(journal_t *journal, tid_t tid)
+{
+ int need_to_wait = 1;
+
+ read_lock(&journal->j_state_lock);
+ if (journal->j_running_transaction &&
+ journal->j_running_transaction->t_tid == tid) {
+ if (journal->j_commit_request != tid) {
+ /* transaction not yet started, so request it */
+ read_unlock(&journal->j_state_lock);
+ jbd2_log_start_commit(journal, tid);
+ goto wait_commit;
+ }
+ } else if (!(journal->j_committing_transaction &&
+ journal->j_committing_transaction->t_tid == tid))
+ need_to_wait = 0;
+ read_unlock(&journal->j_state_lock);
+ if (!need_to_wait)
+ return 0;
+wait_commit:
+ return jbd2_log_wait_commit(journal, tid);
+}
+EXPORT_SYMBOL(jbd2_complete_transaction);
+
+/*
* Log buffer allocation routines:
*/
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c
index b7dc47ba675..77554b61d12 100644
--- a/fs/jfs/inode.c
+++ b/fs/jfs/inode.c
@@ -125,7 +125,7 @@ int jfs_write_inode(struct inode *inode, struct writeback_control *wbc)
{
int wait = wbc->sync_mode == WB_SYNC_ALL;
- if (test_cflag(COMMIT_Nolink, inode))
+ if (inode->i_nlink == 0)
return 0;
/*
* If COMMIT_DIRTY is not set, the inode isn't really dirty.
diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c
index 2eb952c41a6..cbe48ea9318 100644
--- a/fs/jfs/jfs_logmgr.c
+++ b/fs/jfs/jfs_logmgr.c
@@ -1058,7 +1058,8 @@ static int lmLogSync(struct jfs_log * log, int hard_sync)
*/
void jfs_syncpt(struct jfs_log *log, int hard_sync)
{ LOG_LOCK(log);
- lmLogSync(log, hard_sync);
+ if (!test_bit(log_QUIESCE, &log->flag))
+ lmLogSync(log, hard_sync);
LOG_UNLOCK(log);
}
diff --git a/fs/lockd/clntlock.c b/fs/lockd/clntlock.c
index 0796c45d0d4..01bfe766275 100644
--- a/fs/lockd/clntlock.c
+++ b/fs/lockd/clntlock.c
@@ -144,6 +144,9 @@ int nlmclnt_block(struct nlm_wait *block, struct nlm_rqst *req, long timeout)
timeout);
if (ret < 0)
return -ERESTARTSYS;
+ /* Reset the lock status after a server reboot so we resend */
+ if (block->b_status == nlm_lck_denied_grace_period)
+ block->b_status = nlm_lck_blocked;
req->a_res.status = block->b_status;
return 0;
}
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
index 7e529c3c45c..9760ecb9b60 100644
--- a/fs/lockd/clntproc.c
+++ b/fs/lockd/clntproc.c
@@ -550,9 +550,6 @@ again:
status = nlmclnt_block(block, req, NLMCLNT_POLL_TIMEOUT);
if (status < 0)
break;
- /* Resend the blocking lock request after a server reboot */
- if (resp->status == nlm_lck_denied_grace_period)
- continue;
if (resp->status != nlm_lck_blocked)
break;
}
diff --git a/fs/namei.c b/fs/namei.c
index 57ae9c8c66b..85e40d1c0a8 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -2740,7 +2740,7 @@ static int do_last(struct nameidata *nd, struct path *path,
if (error)
return error;
- audit_inode(name, dir, 0);
+ audit_inode(name, dir, LOOKUP_PARENT);
error = -EISDIR;
/* trailing slashes? */
if (nd->last.name[nd->last.len])
diff --git a/fs/namespace.c b/fs/namespace.c
index 341d3f56408..e945b81be7d 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -2238,12 +2238,11 @@ long do_mount(const char *dev_name, const char *dir_name,
retval = security_sb_mount(dev_name, &path,
type_page, flags, data_page);
+ if (!retval && !may_mount())
+ retval = -EPERM;
if (retval)
goto dput_out;
- if (!may_mount())
- return -EPERM;
-
/* Default to relatime unless overriden */
if (!(flags & MS_NOATIME))
mnt_flags |= MNT_RELATIME;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 0ad025eb523..261e9b9912f 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -1022,7 +1022,7 @@ static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
struct nfs4_state *state = opendata->state;
struct nfs_inode *nfsi = NFS_I(state->inode);
struct nfs_delegation *delegation;
- int open_mode = opendata->o_arg.open_flags & (O_EXCL|O_TRUNC);
+ int open_mode = opendata->o_arg.open_flags;
fmode_t fmode = opendata->o_arg.fmode;
nfs4_stateid stateid;
int ret = -EAGAIN;
@@ -1380,6 +1380,12 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state
case -ENOMEM:
err = 0;
goto out;
+ case -NFS4ERR_DELAY:
+ case -NFS4ERR_GRACE:
+ set_bit(NFS_DELEGATED_STATE, &state->flags);
+ ssleep(1);
+ err = -EAGAIN;
+ goto out;
}
set_bit(NFS_DELEGATED_STATE, &state->flags);
err = nfs4_handle_exception(server, err, &exception);
@@ -4547,9 +4553,9 @@ static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *
if (status != 0)
goto out;
/* Is this a delegated lock? */
- if (test_bit(NFS_DELEGATED_STATE, &state->flags))
- goto out;
lsp = request->fl_u.nfs4_fl.owner;
+ if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) == 0)
+ goto out;
seqid = nfs_alloc_seqid(&lsp->ls_seqid, GFP_KERNEL);
status = -ENOMEM;
if (seqid == NULL)
@@ -5025,6 +5031,12 @@ int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl)
nfs4_schedule_stateid_recovery(server, state);
err = 0;
goto out;
+ case -NFS4ERR_DELAY:
+ case -NFS4ERR_GRACE:
+ set_bit(NFS_DELEGATED_STATE, &state->flags);
+ ssleep(1);
+ err = -EAGAIN;
+ goto out;
case -ENOMEM:
case -NFS4ERR_DENIED:
/* kill_proc(fl->fl_pid, SIGLOST, 1); */
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index ae73175e6e6..d401d012f14 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -271,6 +271,7 @@ static __be32
do_open_fhandle(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
{
__be32 status;
+ int accmode = 0;
/* We don't know the target directory, and therefore can not
* set the change info
@@ -284,9 +285,19 @@ do_open_fhandle(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_
open->op_truncate = (open->op_iattr.ia_valid & ATTR_SIZE) &&
(open->op_iattr.ia_size == 0);
+ /*
+ * In the delegation case, the client is telling us about an
+ * open that it *already* performed locally, some time ago. We
+ * should let it succeed now if possible.
+ *
+ * In the case of a CLAIM_FH open, on the other hand, the client
+ * may be counting on us to enforce permissions (the Linux 4.1
+ * client uses this for normal opens, for example).
+ */
+ if (open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH)
+ accmode = NFSD_MAY_OWNER_OVERRIDE;
- status = do_open_permission(rqstp, current_fh, open,
- NFSD_MAY_OWNER_OVERRIDE);
+ status = do_open_permission(rqstp, current_fh, open, accmode);
return status;
}
@@ -931,14 +942,14 @@ nfsd4_write(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
nfs4_lock_state();
status = nfs4_preprocess_stateid_op(SVC_NET(rqstp),
cstate, stateid, WR_STATE, &filp);
- if (filp)
- get_file(filp);
- nfs4_unlock_state();
-
if (status) {
+ nfs4_unlock_state();
dprintk("NFSD: nfsd4_write: couldn't process stateid!\n");
return status;
}
+ if (filp)
+ get_file(filp);
+ nfs4_unlock_state();
cnt = write->wr_buflen;
write->wr_how_written = write->wr_stable_how;
diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
index 899ca26dd19..4e9a21db867 100644
--- a/fs/nfsd/nfs4recover.c
+++ b/fs/nfsd/nfs4recover.c
@@ -146,7 +146,7 @@ out_no_tfm:
* then disable recovery tracking.
*/
static void
-legacy_recdir_name_error(int error)
+legacy_recdir_name_error(struct nfs4_client *clp, int error)
{
printk(KERN_ERR "NFSD: unable to generate recoverydir "
"name (%d).\n", error);
@@ -159,9 +159,7 @@ legacy_recdir_name_error(int error)
if (error == -ENOENT) {
printk(KERN_ERR "NFSD: disabling legacy clientid tracking. "
"Reboot recovery will not function correctly!\n");
-
- /* the argument is ignored by the legacy exit function */
- nfsd4_client_tracking_exit(NULL);
+ nfsd4_client_tracking_exit(clp->net);
}
}
@@ -184,7 +182,7 @@ nfsd4_create_clid_dir(struct nfs4_client *clp)
status = nfs4_make_rec_clidname(dname, &clp->cl_name);
if (status)
- return legacy_recdir_name_error(status);
+ return legacy_recdir_name_error(clp, status);
status = nfs4_save_creds(&original_cred);
if (status < 0)
@@ -341,7 +339,7 @@ nfsd4_remove_clid_dir(struct nfs4_client *clp)
status = nfs4_make_rec_clidname(dname, &clp->cl_name);
if (status)
- return legacy_recdir_name_error(status);
+ return legacy_recdir_name_error(clp, status);
status = mnt_want_write_file(nn->rec_file);
if (status)
@@ -601,7 +599,7 @@ nfsd4_check_legacy_client(struct nfs4_client *clp)
status = nfs4_make_rec_clidname(dname, &clp->cl_name);
if (status) {
- legacy_recdir_name_error(status);
+ legacy_recdir_name_error(clp, status);
return status;
}
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 2e27430b907..f9a5e62b768 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -210,13 +210,7 @@ static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag)
{
if (atomic_dec_and_test(&fp->fi_access[oflag])) {
nfs4_file_put_fd(fp, oflag);
- /*
- * It's also safe to get rid of the RDWR open *if*
- * we no longer have need of the other kind of access
- * or if we already have the other kind of open:
- */
- if (fp->fi_fds[1-oflag]
- || atomic_read(&fp->fi_access[1 - oflag]) == 0)
+ if (atomic_read(&fp->fi_access[1 - oflag]) == 0)
nfs4_file_put_fd(fp, O_RDWR);
}
}
@@ -267,7 +261,7 @@ kmem_cache *slab)
min_stateid = 0;
return stid;
out_free:
- kfree(stid);
+ kmem_cache_free(slab, stid);
return NULL;
}
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index a2720071f28..6eb0dc55709 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -344,10 +344,7 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval,
all 32 bits of 'nseconds'. */
READ_BUF(12);
len += 12;
- READ32(dummy32);
- if (dummy32)
- return nfserr_inval;
- READ32(iattr->ia_atime.tv_sec);
+ READ64(iattr->ia_atime.tv_sec);
READ32(iattr->ia_atime.tv_nsec);
if (iattr->ia_atime.tv_nsec >= (u32)1000000000)
return nfserr_inval;
@@ -370,10 +367,7 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval,
all 32 bits of 'nseconds'. */
READ_BUF(12);
len += 12;
- READ32(dummy32);
- if (dummy32)
- return nfserr_inval;
- READ32(iattr->ia_mtime.tv_sec);
+ READ64(iattr->ia_mtime.tv_sec);
READ32(iattr->ia_mtime.tv_nsec);
if (iattr->ia_mtime.tv_nsec >= (u32)1000000000)
return nfserr_inval;
@@ -2401,8 +2395,7 @@ out_acl:
if (bmval1 & FATTR4_WORD1_TIME_ACCESS) {
if ((buflen -= 12) < 0)
goto out_resource;
- WRITE32(0);
- WRITE32(stat.atime.tv_sec);
+ WRITE64((s64)stat.atime.tv_sec);
WRITE32(stat.atime.tv_nsec);
}
if (bmval1 & FATTR4_WORD1_TIME_DELTA) {
@@ -2415,15 +2408,13 @@ out_acl:
if (bmval1 & FATTR4_WORD1_TIME_METADATA) {
if ((buflen -= 12) < 0)
goto out_resource;
- WRITE32(0);
- WRITE32(stat.ctime.tv_sec);
+ WRITE64((s64)stat.ctime.tv_sec);
WRITE32(stat.ctime.tv_nsec);
}
if (bmval1 & FATTR4_WORD1_TIME_MODIFY) {
if ((buflen -= 12) < 0)
goto out_resource;
- WRITE32(0);
- WRITE32(stat.mtime.tv_sec);
+ WRITE64((s64)stat.mtime.tv_sec);
WRITE32(stat.mtime.tv_nsec);
}
if (bmval1 & FATTR4_WORD1_MOUNTED_ON_FILEID) {
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index 6b49f14eac8..734c93f39b9 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -202,13 +202,32 @@ static int nilfs_writepage(struct page *page, struct writeback_control *wbc)
static int nilfs_set_page_dirty(struct page *page)
{
- int ret = __set_page_dirty_buffers(page);
+ int ret = __set_page_dirty_nobuffers(page);
- if (ret) {
+ if (page_has_buffers(page)) {
struct inode *inode = page->mapping->host;
- unsigned nr_dirty = 1 << (PAGE_SHIFT - inode->i_blkbits);
+ unsigned nr_dirty = 0;
+ struct buffer_head *bh, *head;
- nilfs_set_file_dirty(inode, nr_dirty);
+ /*
+ * This page is locked by callers, and no other thread
+ * concurrently marks its buffers dirty since they are
+ * only dirtied through routines in fs/buffer.c in
+ * which call sites of mark_buffer_dirty are protected
+ * by page lock.
+ */
+ bh = head = page_buffers(page);
+ do {
+ /* Do not mark hole blocks dirty */
+ if (buffer_dirty(bh) || !buffer_mapped(bh))
+ continue;
+
+ set_buffer_dirty(bh);
+ nr_dirty++;
+ } while (bh = bh->b_this_page, bh != head);
+
+ if (nr_dirty)
+ nilfs_set_file_dirty(inode, nr_dirty);
}
return ret;
}
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index e0f7c1241a6..5fe21d69b81 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -572,7 +572,6 @@ static int inotify_update_existing_watch(struct fsnotify_group *group,
int add = (arg & IN_MASK_ADD);
int ret;
- /* don't allow invalid bits: we don't want flags set */
mask = inotify_arg_to_mask(arg);
fsn_mark = fsnotify_find_inode_mark(group, inode);
@@ -623,7 +622,6 @@ static int inotify_new_watch(struct fsnotify_group *group,
struct idr *idr = &group->inotify_data.idr;
spinlock_t *idr_lock = &group->inotify_data.idr_lock;
- /* don't allow invalid bits: we don't want flags set */
mask = inotify_arg_to_mask(arg);
tmp_i_mark = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL);
@@ -751,6 +749,10 @@ SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
int ret;
unsigned flags = 0;
+ /* don't allow invalid bits: we don't want flags set */
+ if (unlikely(!(mask & ALL_INOTIFY_BITS)))
+ return -EINVAL;
+
f = fdget(fd);
if (unlikely(!f.file))
return -EBADF;
diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c
index 1c39efb71ba..2487116d0d3 100644
--- a/fs/ocfs2/extent_map.c
+++ b/fs/ocfs2/extent_map.c
@@ -790,7 +790,7 @@ int ocfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
&hole_size, &rec, &is_last);
if (ret) {
mlog_errno(ret);
- goto out;
+ goto out_unlock;
}
if (rec.e_blkno == 0ULL) {
diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c
index 66c53b642a8..6c2d136561c 100644
--- a/fs/reiserfs/dir.c
+++ b/fs/reiserfs/dir.c
@@ -204,6 +204,8 @@ int reiserfs_readdir_dentry(struct dentry *dentry, void *dirent,
next_pos = deh_offset(deh) + 1;
if (item_moved(&tmp_ih, &path_to_entry)) {
+ set_cpu_key_k_offset(&pos_key,
+ next_pos);
goto research;
}
} /* for */
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index ea5061fd4f3..c3a9de6eadc 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -1810,11 +1810,16 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
TYPE_STAT_DATA, SD_SIZE, MAX_US_INT);
memcpy(INODE_PKEY(inode), &(ih.ih_key), KEY_SIZE);
args.dirid = le32_to_cpu(ih.ih_key.k_dir_id);
- if (insert_inode_locked4(inode, args.objectid,
- reiserfs_find_actor, &args) < 0) {
+
+ reiserfs_write_unlock(inode->i_sb);
+ err = insert_inode_locked4(inode, args.objectid,
+ reiserfs_find_actor, &args);
+ reiserfs_write_lock(inode->i_sb);
+ if (err) {
err = -EINVAL;
goto out_bad_inode;
}
+
if (old_format_only(sb))
/* not a perfect generation count, as object ids can be reused, but
** this is as good as reiserfs can do right now.
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index 4cce1d9552f..821bcf70e46 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -318,7 +318,19 @@ static int delete_one_xattr(struct dentry *dentry, void *data)
static int chown_one_xattr(struct dentry *dentry, void *data)
{
struct iattr *attrs = data;
- return reiserfs_setattr(dentry, attrs);
+ int ia_valid = attrs->ia_valid;
+ int err;
+
+ /*
+ * We only want the ownership bits. Otherwise, we'll do
+ * things like change a directory to a regular file if
+ * ATTR_MODE is set.
+ */
+ attrs->ia_valid &= (ATTR_UID|ATTR_GID);
+ err = reiserfs_setattr(dentry, attrs);
+ attrs->ia_valid = ia_valid;
+
+ return err;
}
/* No i_mutex, but the inode is unconnected. */
diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c
index d7c01ef64ed..6c8767fdfc6 100644
--- a/fs/reiserfs/xattr_acl.c
+++ b/fs/reiserfs/xattr_acl.c
@@ -443,6 +443,9 @@ int reiserfs_acl_chmod(struct inode *inode)
int depth;
int error;
+ if (IS_PRIVATE(inode))
+ return 0;
+
if (S_ISLNK(inode->i_mode))
return -EOPNOTSUPP;
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
index e14512678c9..6f31590dd12 100644
--- a/fs/sysfs/dir.c
+++ b/fs/sysfs/dir.c
@@ -1012,6 +1012,7 @@ static int sysfs_readdir(struct file * filp, void * dirent, filldir_t filldir)
enum kobj_ns_type type;
const void *ns;
ino_t ino;
+ loff_t off;
type = sysfs_ns_type(parent_sd);
ns = sysfs_info(dentry->d_sb)->ns[type];
@@ -1034,6 +1035,7 @@ static int sysfs_readdir(struct file * filp, void * dirent, filldir_t filldir)
return 0;
}
mutex_lock(&sysfs_mutex);
+ off = filp->f_pos;
for (pos = sysfs_dir_pos(ns, parent_sd, filp->f_pos, pos);
pos;
pos = sysfs_dir_next_pos(ns, parent_sd, filp->f_pos, pos)) {
@@ -1045,19 +1047,24 @@ static int sysfs_readdir(struct file * filp, void * dirent, filldir_t filldir)
len = strlen(name);
ino = pos->s_ino;
type = dt_type(pos);
- filp->f_pos = pos->s_hash;
+ off = filp->f_pos = pos->s_hash;
filp->private_data = sysfs_get(pos);
mutex_unlock(&sysfs_mutex);
- ret = filldir(dirent, name, len, filp->f_pos, ino, type);
+ ret = filldir(dirent, name, len, off, ino, type);
mutex_lock(&sysfs_mutex);
if (ret < 0)
break;
}
mutex_unlock(&sysfs_mutex);
- if ((filp->f_pos > 1) && !pos) { /* EOF */
- filp->f_pos = INT_MAX;
+
+ /* don't reference last entry if its refcount is dropped */
+ if (!pos) {
filp->private_data = NULL;
+
+ /* EOF and not changed as 0 or 1 in read/write path */
+ if (off == filp->f_pos && off > 1)
+ filp->f_pos = INT_MAX;
}
return 0;
}
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index d82efaa2ac7..ca9ecaa8111 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -455,6 +455,28 @@ xfs_vn_getattr(
return 0;
}
+static void
+xfs_setattr_mode(
+ struct xfs_trans *tp,
+ struct xfs_inode *ip,
+ struct iattr *iattr)
+{
+ struct inode *inode = VFS_I(ip);
+ umode_t mode = iattr->ia_mode;
+
+ ASSERT(tp);
+ ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
+
+ if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
+ mode &= ~S_ISGID;
+
+ ip->i_d.di_mode &= S_IFMT;
+ ip->i_d.di_mode |= mode & ~S_IFMT;
+
+ inode->i_mode &= S_IFMT;
+ inode->i_mode |= mode & ~S_IFMT;
+}
+
int
xfs_setattr_nonsize(
struct xfs_inode *ip,
@@ -606,18 +628,8 @@ xfs_setattr_nonsize(
/*
* Change file access modes.
*/
- if (mask & ATTR_MODE) {
- umode_t mode = iattr->ia_mode;
-
- if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
- mode &= ~S_ISGID;
-
- ip->i_d.di_mode &= S_IFMT;
- ip->i_d.di_mode |= mode & ~S_IFMT;
-
- inode->i_mode &= S_IFMT;
- inode->i_mode |= mode & ~S_IFMT;
- }
+ if (mask & ATTR_MODE)
+ xfs_setattr_mode(tp, ip, iattr);
/*
* Change file access or modified times.
@@ -714,9 +726,8 @@ xfs_setattr_size(
return XFS_ERROR(error);
ASSERT(S_ISREG(ip->i_d.di_mode));
- ASSERT((mask & (ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_ATIME_SET|
- ATTR_MTIME_SET|ATTR_KILL_SUID|ATTR_KILL_SGID|
- ATTR_KILL_PRIV|ATTR_TIMES_SET)) == 0);
+ ASSERT((mask & (ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_ATIME_SET|
+ ATTR_MTIME_SET|ATTR_KILL_PRIV|ATTR_TIMES_SET)) == 0);
if (!(flags & XFS_ATTR_NOLOCK)) {
lock_flags |= XFS_IOLOCK_EXCL;
@@ -860,6 +871,12 @@ xfs_setattr_size(
xfs_inode_clear_eofblocks_tag(ip);
}
+ /*
+ * Change file access modes.
+ */
+ if (mask & ATTR_MODE)
+ xfs_setattr_mode(tp, ip, iattr);
+
if (mask & ATTR_CTIME) {
inode->i_ctime = iattr->ia_ctime;
ip->i_d.di_ctime.t_sec = iattr->ia_ctime.tv_sec;
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 22ba56e834e..fc93bd3d9a8 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -352,7 +352,6 @@ acpi_status acpi_bus_get_status_handle(acpi_handle handle,
unsigned long long *sta);
int acpi_bus_get_status(struct acpi_device *device);
-#ifdef CONFIG_PM
int acpi_bus_set_power(acpi_handle handle, int state);
const char *acpi_power_state_string(int state);
int acpi_device_get_power(struct acpi_device *device, int *state);
@@ -360,41 +359,12 @@ int acpi_device_set_power(struct acpi_device *device, int state);
int acpi_bus_init_power(struct acpi_device *device);
int acpi_bus_update_power(acpi_handle handle, int *state_p);
bool acpi_bus_power_manageable(acpi_handle handle);
+
+#ifdef CONFIG_PM
bool acpi_bus_can_wakeup(acpi_handle handle);
-#else /* !CONFIG_PM */
-static inline int acpi_bus_set_power(acpi_handle handle, int state)
-{
- return 0;
-}
-static inline const char *acpi_power_state_string(int state)
-{
- return "D0";
-}
-static inline int acpi_device_get_power(struct acpi_device *device, int *state)
-{
- return 0;
-}
-static inline int acpi_device_set_power(struct acpi_device *device, int state)
-{
- return 0;
-}
-static inline int acpi_bus_init_power(struct acpi_device *device)
-{
- return 0;
-}
-static inline int acpi_bus_update_power(acpi_handle handle, int *state_p)
-{
- return 0;
-}
-static inline bool acpi_bus_power_manageable(acpi_handle handle)
-{
- return false;
-}
-static inline bool acpi_bus_can_wakeup(acpi_handle handle)
-{
- return false;
-}
-#endif /* !CONFIG_PM */
+#else
+static inline bool acpi_bus_can_wakeup(acpi_handle handle) { return false; }
+#endif
#ifdef CONFIG_ACPI_PROC_EVENT
int acpi_bus_generate_proc_event(struct acpi_device *device, u8 type, int data);
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index bfd87685fc1..a59ff51b016 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -7,6 +7,16 @@
#include <linux/mm_types.h>
#include <linux/bug.h>
+/*
+ * On almost all architectures and configurations, 0 can be used as the
+ * upper ceiling to free_pgtables(): on many architectures it has the same
+ * effect as using TASK_SIZE. However, there is one configuration which
+ * must impose a more careful limit, to avoid freeing kernel pgtables.
+ */
+#ifndef USER_PGTABLES_CEILING
+#define USER_PGTABLES_CEILING 0UL
+#endif
+
#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
extern int ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep,
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 2d94d7413d7..f1ce786736e 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -1593,9 +1593,8 @@ extern void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *s
void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv);
void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv);
-int drm_prime_add_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle);
-int drm_prime_lookup_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle);
-void drm_prime_remove_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf);
+int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle);
+void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf);
int drm_prime_add_dma_buf(struct drm_device *dev, struct drm_gem_object *obj);
int drm_prime_lookup_obj(struct drm_device *dev, struct dma_buf *buf,
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index 918e8fe2f5e..c2af598f701 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -240,6 +240,7 @@
{0x1002, 0x6819, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6821, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6822, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6823, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6824, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
@@ -247,11 +248,13 @@
{0x1002, 0x6827, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6829, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x682A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x682B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x682D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x682F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
@@ -603,6 +606,8 @@
{0x1002, 0x9999, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x999A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x999B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x999C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x999D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x99A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x99A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x99A4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
diff --git a/include/linux/arm-cci.h b/include/linux/arm-cci.h
new file mode 100644
index 00000000000..86ae587817a
--- /dev/null
+++ b/include/linux/arm-cci.h
@@ -0,0 +1,30 @@
+/*
+ * CCI support
+ *
+ * Copyright (C) 2012-2013 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __LINUX_ARM_CCI_H
+#define __LINUX_ARM_CCI_H
+
+#ifdef CONFIG_ARM_CCI
+extern void disable_cci(int cluster);
+#else
+static inline void disable_cci(int cluster) { }
+#endif
+
+#endif
diff --git a/include/linux/arm-hdlcd.h b/include/linux/arm-hdlcd.h
new file mode 100644
index 00000000000..939f3a81d56
--- /dev/null
+++ b/include/linux/arm-hdlcd.h
@@ -0,0 +1,122 @@
+/*
+ * include/linux/arm-hdlcd.h
+ *
+ * Copyright (C) 2011 ARM Limited
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ *
+ * ARM HDLCD Controller register definition
+ */
+
+#include <linux/fb.h>
+#include <linux/completion.h>
+
+/* register offsets */
+#define HDLCD_REG_VERSION 0x0000 /* ro */
+#define HDLCD_REG_INT_RAWSTAT 0x0010 /* rw */
+#define HDLCD_REG_INT_CLEAR 0x0014 /* wo */
+#define HDLCD_REG_INT_MASK 0x0018 /* rw */
+#define HDLCD_REG_INT_STATUS 0x001c /* ro */
+#define HDLCD_REG_USER_OUT 0x0020 /* rw */
+#define HDLCD_REG_FB_BASE 0x0100 /* rw */
+#define HDLCD_REG_FB_LINE_LENGTH 0x0104 /* rw */
+#define HDLCD_REG_FB_LINE_COUNT 0x0108 /* rw */
+#define HDLCD_REG_FB_LINE_PITCH 0x010c /* rw */
+#define HDLCD_REG_BUS_OPTIONS 0x0110 /* rw */
+#define HDLCD_REG_V_SYNC 0x0200 /* rw */
+#define HDLCD_REG_V_BACK_PORCH 0x0204 /* rw */
+#define HDLCD_REG_V_DATA 0x0208 /* rw */
+#define HDLCD_REG_V_FRONT_PORCH 0x020c /* rw */
+#define HDLCD_REG_H_SYNC 0x0210 /* rw */
+#define HDLCD_REG_H_BACK_PORCH 0x0214 /* rw */
+#define HDLCD_REG_H_DATA 0x0218 /* rw */
+#define HDLCD_REG_H_FRONT_PORCH 0x021c /* rw */
+#define HDLCD_REG_POLARITIES 0x0220 /* rw */
+#define HDLCD_REG_COMMAND 0x0230 /* rw */
+#define HDLCD_REG_PIXEL_FORMAT 0x0240 /* rw */
+#define HDLCD_REG_BLUE_SELECT 0x0244 /* rw */
+#define HDLCD_REG_GREEN_SELECT 0x0248 /* rw */
+#define HDLCD_REG_RED_SELECT 0x024c /* rw */
+
+/* version */
+#define HDLCD_PRODUCT_ID 0x1CDC0000
+#define HDLCD_PRODUCT_MASK 0xFFFF0000
+#define HDLCD_VERSION_MAJOR_MASK 0x0000FF00
+#define HDLCD_VERSION_MINOR_MASK 0x000000FF
+
+/* interrupts */
+#define HDLCD_INTERRUPT_DMA_END (1 << 0)
+#define HDLCD_INTERRUPT_BUS_ERROR (1 << 1)
+#define HDLCD_INTERRUPT_VSYNC (1 << 2)
+#define HDLCD_INTERRUPT_UNDERRUN (1 << 3)
+
+/* polarity */
+#define HDLCD_POLARITY_VSYNC (1 << 0)
+#define HDLCD_POLARITY_HSYNC (1 << 1)
+#define HDLCD_POLARITY_DATAEN (1 << 2)
+#define HDLCD_POLARITY_DATA (1 << 3)
+#define HDLCD_POLARITY_PIXELCLK (1 << 4)
+
+/* commands */
+#define HDLCD_COMMAND_DISABLE (0 << 0)
+#define HDLCD_COMMAND_ENABLE (1 << 0)
+
+/* pixel format */
+#define HDLCD_PIXEL_FMT_LITTLE_ENDIAN (0 << 31)
+#define HDLCD_PIXEL_FMT_BIG_ENDIAN (1 << 31)
+#define HDLCD_BYTES_PER_PIXEL_MASK (3 << 3)
+
+/* bus options */
+#define HDLCD_BUS_BURST_MASK 0x01f
+#define HDLCD_BUS_MAX_OUTSTAND 0xf00
+#define HDLCD_BUS_BURST_NONE (0 << 0)
+#define HDLCD_BUS_BURST_1 (1 << 0)
+#define HDLCD_BUS_BURST_2 (1 << 1)
+#define HDLCD_BUS_BURST_4 (1 << 2)
+#define HDLCD_BUS_BURST_8 (1 << 3)
+#define HDLCD_BUS_BURST_16 (1 << 4)
+
+/* Max resolution supported is 4096x4096, 8 bit per color component,
+ 8 bit alpha, but we are going to choose the usual hardware default
+ (2048x2048, 32 bpp) and enable double buffering */
+#define HDLCD_MAX_XRES 2048
+#define HDLCD_MAX_YRES 2048
+#define HDLCD_MAX_FRAMEBUFFER_SIZE (HDLCD_MAX_XRES * HDLCD_MAX_YRES << 2)
+
+#define HDLCD_MEM_BASE (CONFIG_PAGE_OFFSET - 0x1000000)
+
+#define NR_PALETTE 256
+
+/* OEMs using HDLCD may wish to enable these settings if
+ * display disruption is apparent and you suspect HDLCD
+ * access to RAM may be starved.
+ */
+/* Turn HDLCD default color red instead of black so
+ * that it's easy to see pixel clock data underruns
+ * (compared to other visual disruption)
+ */
+//#define HDLCD_RED_DEFAULT_COLOUR
+/* Add a counter in the IRQ handler to count buffer underruns
+ * and /proc/hdlcd_underrun to read the counter
+ */
+//#define HDLCD_COUNT_BUFFERUNDERRUNS
+/* Restrict height to 1x screen size
+ *
+ */
+//#define HDLCD_NO_VIRTUAL_SCREEN
+
+#ifdef CONFIG_ANDROID
+#define HDLCD_NO_VIRTUAL_SCREEN
+#endif
+
+struct hdlcd_device {
+ struct fb_info fb;
+ struct device *dev;
+ struct clk *clk;
+ void __iomem *base;
+ int irq;
+ struct completion vsync_completion;
+ unsigned char *edid;
+};
diff --git a/include/linux/audit.h b/include/linux/audit.h
index 5a6d718adf3..b4086cf9b7e 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -120,7 +120,7 @@ static inline void audit_syscall_entry(int arch, int major, unsigned long a0,
unsigned long a1, unsigned long a2,
unsigned long a3)
{
- if (unlikely(!audit_dummy_context()))
+ if (unlikely(current->audit_context))
__audit_syscall_entry(arch, major, a0, a1, a2, a3);
}
static inline void audit_syscall_exit(void *pt_regs)
@@ -390,6 +390,11 @@ static inline void audit_ptrace(struct task_struct *t)
#define audit_signals 0
#endif /* CONFIG_AUDITSYSCALL */
+static inline bool audit_loginuid_set(struct task_struct *tsk)
+{
+ return uid_valid(audit_get_loginuid(tsk));
+}
+
#ifdef CONFIG_AUDIT
/* These are defined in audit.c */
/* Public API */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 78feda9bbae..33f358f88b2 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -838,7 +838,7 @@ static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
unsigned int cmd_flags)
{
if (unlikely(cmd_flags & REQ_DISCARD))
- return q->limits.max_discard_sectors;
+ return min(q->limits.max_discard_sectors, UINT_MAX >> 9);
if (unlikely(cmd_flags & REQ_WRITE_SAME))
return q->limits.max_write_same_sectors;
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 92dc8af4bfc..48aa62525d8 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -304,9 +304,6 @@ struct cftype {
/* CFTYPE_* flags */
unsigned int flags;
- /* file xattrs */
- struct simple_xattrs xattrs;
-
int (*open)(struct inode *inode, struct file *file);
ssize_t (*read)(struct cgroup *cgrp, struct cftype *cft,
struct file *file,
@@ -574,7 +571,7 @@ struct cgroup *cgroup_rightmost_descendant(struct cgroup *pos);
*
* If a subsystem synchronizes against the parent in its ->css_online() and
* before starting iterating, and synchronizes against @pos on each
- * iteration, any descendant cgroup which finished ->css_offline() is
+ * iteration, any descendant cgroup which finished ->css_online() is
* guaranteed to be visible in the future iterations.
*
* In other words, the following guarantees that a descendant can't escape
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 16e4e9a643f..df1ff7c9585 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -185,8 +185,7 @@ static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
extern const struct file_operations hugetlbfs_file_operations;
extern const struct vm_operations_struct hugetlb_vm_ops;
-struct file *hugetlb_file_setup(const char *name, unsigned long addr,
- size_t size, vm_flags_t acct,
+struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
struct user_struct **user, int creat_flags,
int page_size_log);
@@ -205,8 +204,8 @@ static inline int is_file_hugepages(struct file *file)
#define is_file_hugepages(file) 0
static inline struct file *
-hugetlb_file_setup(const char *name, unsigned long addr, size_t size,
- vm_flags_t acctflag, struct user_struct **user, int creat_flags,
+hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
+ struct user_struct **user, int creat_flags,
int page_size_log)
{
return ERR_PTR(-ENOSYS);
@@ -284,6 +283,13 @@ static inline struct hstate *hstate_file(struct file *f)
return hstate_inode(file_inode(f));
}
+static inline struct hstate *hstate_sizelog(int page_size_log)
+{
+ if (!page_size_log)
+ return &default_hstate;
+ return size_to_hstate(1 << page_size_log);
+}
+
static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
{
return hstate_file(vma->vm_file);
@@ -348,11 +354,12 @@ static inline int hstate_index(struct hstate *h)
return h - hstates;
}
-#else
+#else /* CONFIG_HUGETLB_PAGE */
struct hstate {};
#define alloc_huge_page_node(h, nid) NULL
#define alloc_bootmem_huge_page(h) NULL
#define hstate_file(f) NULL
+#define hstate_sizelog(s) NULL
#define hstate_vma(v) NULL
#define hstate_inode(i) NULL
#define huge_page_size(h) PAGE_SIZE
@@ -367,6 +374,6 @@ static inline unsigned int pages_per_huge_page(struct hstate *h)
}
#define hstate_index_to_shift(index) 0
#define hstate_index(h) 0
-#endif
+#endif /* CONFIG_HUGETLB_PAGE */
#endif /* _LINUX_HUGETLB_H */
diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
index ae221a7b509..c4d870b0d5e 100644
--- a/include/linux/ipc_namespace.h
+++ b/include/linux/ipc_namespace.h
@@ -43,8 +43,8 @@ struct ipc_namespace {
size_t shm_ctlmax;
size_t shm_ctlall;
+ unsigned long shm_tot;
int shm_ctlmni;
- int shm_tot;
/*
* Defines whether IPC_RMID is forced for _all_ shm segments regardless
* of shmctl()
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
index 3fd8e4290a1..40643ca79cd 100644
--- a/include/linux/irqchip/arm-gic.h
+++ b/include/linux/irqchip/arm-gic.h
@@ -31,6 +31,8 @@
#define GIC_DIST_TARGET 0x800
#define GIC_DIST_CONFIG 0xc00
#define GIC_DIST_SOFTINT 0xf00
+#define GIC_DIST_SGI_PENDING_CLEAR 0xf10
+#define GIC_DIST_SGI_PENDING_SET 0xf20
#define GICH_HCR 0x0
#define GICH_VTR 0x4
@@ -65,15 +67,21 @@ extern struct irq_chip gic_arch_extn;
void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
u32 offset, struct device_node *);
-void gic_secondary_init(unsigned int);
void gic_cascade_irq(unsigned int gic_nr, unsigned int irq);
+void gic_cpu_if_down(void);
+
static inline void gic_init(unsigned int nr, int start,
void __iomem *dist , void __iomem *cpu)
{
gic_init_bases(nr, start, dist, cpu, 0, NULL);
}
+void gic_send_sgi(unsigned int cpu_id, unsigned int irq);
+int gic_get_cpu_id(unsigned int cpu);
+void gic_migrate_target(unsigned int new_cpu_id);
+unsigned long gic_get_sgir_physaddr(void);
+
#endif /* __ASSEMBLY */
#endif
diff --git a/include/linux/irqchip/chained_irq.h b/include/linux/irqchip/chained_irq.h
new file mode 100644
index 00000000000..adf4c30f3af
--- /dev/null
+++ b/include/linux/irqchip/chained_irq.h
@@ -0,0 +1,52 @@
+/*
+ * Chained IRQ handlers support.
+ *
+ * Copyright (C) 2011 ARM Ltd.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __IRQCHIP_CHAINED_IRQ_H
+#define __IRQCHIP_CHAINED_IRQ_H
+
+#include <linux/irq.h>
+
+/*
+ * Entry/exit functions for chained handlers where the primary IRQ chip
+ * may implement either fasteoi or level-trigger flow control.
+ */
+static inline void chained_irq_enter(struct irq_chip *chip,
+ struct irq_desc *desc)
+{
+ /* FastEOI controllers require no action on entry. */
+ if (chip->irq_eoi)
+ return;
+
+ if (chip->irq_mask_ack) {
+ chip->irq_mask_ack(&desc->irq_data);
+ } else {
+ chip->irq_mask(&desc->irq_data);
+ if (chip->irq_ack)
+ chip->irq_ack(&desc->irq_data);
+ }
+}
+
+static inline void chained_irq_exit(struct irq_chip *chip,
+ struct irq_desc *desc)
+{
+ if (chip->irq_eoi)
+ chip->irq_eoi(&desc->irq_data);
+ else
+ chip->irq_unmask(&desc->irq_data);
+}
+
+#endif /* __IRQCHIP_CHAINED_IRQ_H */
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 50e5a5e6a71..f9fe88957b7 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -480,6 +480,7 @@ struct transaction_s
T_COMMIT,
T_COMMIT_DFLUSH,
T_COMMIT_JFLUSH,
+ T_COMMIT_CALLBACK,
T_FINISHED
} t_state;
@@ -1200,6 +1201,7 @@ int __jbd2_log_start_commit(journal_t *journal, tid_t tid);
int jbd2_journal_start_commit(journal_t *journal, tid_t *tid);
int jbd2_journal_force_commit_nested(journal_t *journal);
int jbd2_log_wait_commit(journal_t *journal, tid_t tid);
+int jbd2_complete_transaction(journal_t *journal, tid_t tid);
int jbd2_log_do_checkpoint(journal_t *journal);
int jbd2_trans_will_send_data_barrier(journal_t *journal, tid_t tid);
diff --git a/include/linux/kref.h b/include/linux/kref.h
index 4972e6e9ca9..7419c02085d 100644
--- a/include/linux/kref.h
+++ b/include/linux/kref.h
@@ -19,6 +19,7 @@
#include <linux/atomic.h>
#include <linux/kernel.h>
#include <linux/mutex.h>
+#include <linux/spinlock.h>
struct kref {
atomic_t refcount;
@@ -95,6 +96,38 @@ static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref)
return kref_sub(kref, 1, release);
}
+/**
+ * kref_put_spinlock_irqsave - decrement refcount for object.
+ * @kref: object.
+ * @release: pointer to the function that will clean up the object when the
+ * last reference to the object is released.
+ * This pointer is required, and it is not acceptable to pass kfree
+ * in as this function.
+ * @lock: lock to take in release case
+ *
+ * Behaves identical to kref_put with one exception. If the reference count
+ * drops to zero, the lock will be taken atomically wrt dropping the reference
+ * count. The release function has to call spin_unlock() without _irqrestore.
+ */
+static inline int kref_put_spinlock_irqsave(struct kref *kref,
+ void (*release)(struct kref *kref),
+ spinlock_t *lock)
+{
+ unsigned long flags;
+
+ WARN_ON(release == NULL);
+ if (atomic_add_unless(&kref->refcount, -1, 1))
+ return 0;
+ spin_lock_irqsave(lock, flags);
+ if (atomic_dec_and_test(&kref->refcount)) {
+ release(kref);
+ local_irq_restore(flags);
+ return 1;
+ }
+ spin_unlock_irqrestore(lock, flags);
+ return 0;
+}
+
static inline int kref_put_mutex(struct kref *kref,
void (*release)(struct kref *kref),
struct mutex *lock)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index d7ac844e997..cd0ab0ba798 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -986,6 +986,12 @@ unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu);
bool cpus_share_cache(int this_cpu, int that_cpu);
+#ifdef CONFIG_SCHED_HMP
+struct hmp_domain {
+ struct cpumask cpus;
+ struct list_head hmp_domains;
+};
+#endif /* CONFIG_SCHED_HMP */
#else /* CONFIG_SMP */
struct sched_domain_attr;
@@ -1098,6 +1104,12 @@ struct sched_avg {
u64 last_runnable_update;
s64 decay_count;
unsigned long load_avg_contrib;
+ unsigned long load_avg_ratio;
+#ifdef CONFIG_SCHED_HMP
+ u64 hmp_last_up_migration;
+ u64 hmp_last_down_migration;
+#endif
+ u32 usage_avg_sum;
};
#ifdef CONFIG_SCHEDSTATS
@@ -2416,27 +2428,18 @@ static inline void threadgroup_change_end(struct task_struct *tsk)
*
* Lock the threadgroup @tsk belongs to. No new task is allowed to enter
* and member tasks aren't allowed to exit (as indicated by PF_EXITING) or
- * perform exec. This is useful for cases where the threadgroup needs to
- * stay stable across blockable operations.
+ * change ->group_leader/pid. This is useful for cases where the threadgroup
+ * needs to stay stable across blockable operations.
*
* fork and exit paths explicitly call threadgroup_change_{begin|end}() for
* synchronization. While held, no new task will be added to threadgroup
* and no existing live task will have its PF_EXITING set.
*
- * During exec, a task goes and puts its thread group through unusual
- * changes. After de-threading, exclusive access is assumed to resources
- * which are usually shared by tasks in the same group - e.g. sighand may
- * be replaced with a new one. Also, the exec'ing task takes over group
- * leader role including its pid. Exclude these changes while locked by
- * grabbing cred_guard_mutex which is used to synchronize exec path.
+ * de_thread() does threadgroup_change_{begin|end}() when a non-leader
+ * sub-thread becomes a new leader.
*/
static inline void threadgroup_lock(struct task_struct *tsk)
{
- /*
- * exec uses exit for de-threading nesting group_rwsem inside
- * cred_guard_mutex. Grab cred_guard_mutex first.
- */
- mutex_lock(&tsk->signal->cred_guard_mutex);
down_write(&tsk->signal->group_rwsem);
}
@@ -2449,7 +2452,6 @@ static inline void threadgroup_lock(struct task_struct *tsk)
static inline void threadgroup_unlock(struct task_struct *tsk)
{
up_write(&tsk->signal->group_rwsem);
- mutex_unlock(&tsk->signal->cred_guard_mutex);
}
#else
static inline void threadgroup_change_begin(struct task_struct *tsk) {}
diff --git a/include/linux/time.h b/include/linux/time.h
index d4835dfdf25..afcdc4bb93a 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -117,14 +117,10 @@ static inline bool timespec_valid_strict(const struct timespec *ts)
extern bool persistent_clock_exist;
-#ifdef ALWAYS_USE_PERSISTENT_CLOCK
-#define has_persistent_clock() true
-#else
static inline bool has_persistent_clock(void)
{
return persistent_clock_exist;
}
-#endif
extern void read_persistent_clock(struct timespec *ts);
extern void read_boot_clock(struct timespec *ts);
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
index 1819b59aab2..73afb19ff92 100644
--- a/include/linux/usb/serial.h
+++ b/include/linux/usb/serial.h
@@ -267,6 +267,8 @@ struct usb_serial_driver {
struct usb_serial_port *port, struct ktermios *old);
void (*break_ctl)(struct tty_struct *tty, int break_state);
int (*chars_in_buffer)(struct tty_struct *tty);
+ void (*wait_until_sent)(struct tty_struct *tty, long timeout);
+ bool (*tx_empty)(struct usb_serial_port *port);
void (*throttle)(struct tty_struct *tty);
void (*unthrottle)(struct tty_struct *tty);
int (*tiocmget)(struct tty_struct *tty);
@@ -325,6 +327,8 @@ extern void usb_serial_generic_close(struct usb_serial_port *port);
extern int usb_serial_generic_resume(struct usb_serial *serial);
extern int usb_serial_generic_write_room(struct tty_struct *tty);
extern int usb_serial_generic_chars_in_buffer(struct tty_struct *tty);
+extern void usb_serial_generic_wait_until_sent(struct tty_struct *tty,
+ long timeout);
extern void usb_serial_generic_read_bulk_callback(struct urb *urb);
extern void usb_serial_generic_write_bulk_callback(struct urb *urb);
extern void usb_serial_generic_throttle(struct tty_struct *tty);
diff --git a/include/linux/vexpress.h b/include/linux/vexpress.h
index 75818744ab5..81fa998e7ad 100644
--- a/include/linux/vexpress.h
+++ b/include/linux/vexpress.h
@@ -15,6 +15,7 @@
#define _LINUX_VEXPRESS_H
#include <linux/device.h>
+#include <linux/err.h>
#define VEXPRESS_SITE_MB 0
#define VEXPRESS_SITE_DB1 1
@@ -123,7 +124,122 @@ void vexpress_restart(char str, const char *cmd);
struct clk *vexpress_osc_setup(struct device *dev);
void vexpress_osc_of_setup(struct device_node *node);
+struct clk *vexpress_clk_register_spc(const char *name, int cluster_id);
+void vexpress_clk_of_register_spc(void);
+
void vexpress_clk_init(void __iomem *sp810_base);
void vexpress_clk_of_init(void);
+/* SPC */
+
+#define VEXPRESS_SPC_WAKE_INTR_IRQ(cluster, cpu) \
+ (1 << (4 * (cluster) + (cpu)))
+#define VEXPRESS_SPC_WAKE_INTR_FIQ(cluster, cpu) \
+ (1 << (7 * (cluster) + (cpu)))
+#define VEXPRESS_SPC_WAKE_INTR_SWDOG (1 << 10)
+#define VEXPRESS_SPC_WAKE_INTR_GTIMER (1 << 11)
+#define VEXPRESS_SPC_WAKE_INTR_MASK 0xFFF
+
+#ifdef CONFIG_ARM_SPC
+
+extern u32 vexpress_spc_get_clusterid(int cpu_part_no);
+extern u32 vexpress_spc_read_rsthold_reg(int cluster);
+extern u32 vexpress_spc_read_rststat_reg(int cluster);
+extern u32 vexpress_scc_read_rststat(int cluster);
+extern u32 vexpress_spc_get_wake_intr(int raw);
+extern int vexpress_spc_standbywfi_status(int cluster, int cpu);
+extern int vexpress_spc_standbywfil2_status(int cluster);
+extern int vexpress_spc_set_cpu_wakeup_irq(u32 cpu, u32 cluster, u32 set);
+extern int vexpress_spc_set_global_wakeup_intr(u32 set);
+extern unsigned int *vexpress_spc_get_freq_table(uint32_t cluster, int *count);
+extern int vexpress_spc_get_performance(int cluster, u32 *freq);
+extern int vexpress_spc_set_performance(int cluster, u32 freq);
+extern int vexpress_spc_wfi_cpustat(int cluster);
+extern void vexpress_spc_set_wake_intr(u32 mask);
+extern void vexpress_spc_write_bxaddr_reg(int cluster, int cpu, u32 val);
+extern int vexpress_spc_get_nb_cpus(int cluster);
+extern void vexpress_spc_write_rsthold_reg(int cluster, u32 value);
+extern void vexpress_spc_powerdown_enable(int cluster, int enable);
+extern void vexpress_spc_adb400_pd_enable(int cluster, int enable);
+extern void vexpress_spc_wfi_cpureset(int cluster, int cpu, int enable);
+extern void vexpress_spc_wfi_cluster_reset(int cluster, int enable);
+extern void vexpress_scc_ctl_snoops(int cluster, int enable);
+extern bool vexpress_spc_check_loaded(void);
+#else
+static inline int vexpress_spc_set_cpu_wakeup_irq(u32 cpu, u32 cluster, u32 set)
+{
+ return 0;
+}
+
+static inline int vexpress_spc_set_global_wakeup_intr(u32 set)
+{
+ return 0;
+}
+
+static inline int vexpress_spc_standbywfi_status(int cluster, int cpu)
+{
+ return 0;
+}
+
+static inline int vexpress_spc_standbywfil2_status(int cluster)
+{
+ return 0;
+}
+
+static inline u32 vexpress_spc_get_clusterid(int cpu_part_no)
+{
+ return 0;
+}
+
+static inline u32 vexpress_spc_read_rsthold_reg(int cluster)
+{
+ return 0;
+}
+
+static inline u32 vexpress_spc_read_rststat_reg(int cluster)
+{
+ return 0;
+}
+
+static inline void vexpress_spc_write_bxaddr_reg(int cluster, int cpu, u32 val)
+{
+}
+
+static inline void vexpress_spc_write_rsthold_reg(int cluster, u32 value)
+{
+}
+
+static inline u32 vexpress_scc_read_rststat(int cluster)
+{
+ return 0;
+}
+
+static inline unsigned int *vexpress_spc_get_freq_table(uint32_t cluster, int *count)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline int vexpress_spc_get_performance(int cluster, u32 *freq)
+{
+ return -ENOSYS;
+}
+static inline int vexpress_spc_set_performance(int cluster, u32 freq)
+{
+ return -ENOSYS;
+}
+static inline void vexpress_spc_set_wake_intr(u32 mask) { }
+static inline u32 vexpress_spc_get_wake_intr(int raw) { return 0; }
+static inline void vexpress_spc_powerdown_enable(int cluster, int enable) { }
+static inline void vexpress_spc_adb400_pd_enable(int cluster, int enable) { }
+static inline void vexpress_spc_wfi_cpureset(int cluster, int cpu, int enable)
+{ }
+static inline int vexpress_spc_wfi_cpustat(int cluster) { return 0; }
+static inline void vexpress_spc_wfi_cluster_reset(int cluster, int enable) { }
+static inline bool vexpress_spc_check_loaded(void)
+{
+ return false;
+}
+static inline void vexpress_scc_ctl_snoops(int cluster, int enable) { }
+#endif
+
#endif
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 7cb64d4b499..30194a628fd 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -217,6 +217,8 @@ do { \
if (!ret) \
break; \
} \
+ if (!ret && (condition)) \
+ ret = 1; \
finish_wait(&wq, &__wait); \
} while (0)
@@ -233,8 +235,9 @@ do { \
* wake_up() has to be called after changing any variable that could
* change the result of the wait condition.
*
- * The function returns 0 if the @timeout elapsed, and the remaining
- * jiffies if the condition evaluated to true before the timeout elapsed.
+ * The function returns 0 if the @timeout elapsed, or the remaining
+ * jiffies (at least 1) if the @condition evaluated to %true before
+ * the @timeout elapsed.
*/
#define wait_event_timeout(wq, condition, timeout) \
({ \
@@ -302,6 +305,8 @@ do { \
ret = -ERESTARTSYS; \
break; \
} \
+ if (!ret && (condition)) \
+ ret = 1; \
finish_wait(&wq, &__wait); \
} while (0)
@@ -318,9 +323,10 @@ do { \
* wake_up() has to be called after changing any variable that could
* change the result of the wait condition.
*
- * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
- * was interrupted by a signal, and the remaining jiffies otherwise
- * if the condition evaluated to true before the timeout elapsed.
+ * Returns:
+ * 0 if the @timeout elapsed, -%ERESTARTSYS if it was interrupted by
+ * a signal, or the remaining jiffies (at least 1) if the @condition
+ * evaluated to %true before the @timeout elapsed.
*/
#define wait_event_interruptible_timeout(wq, condition, timeout) \
({ \
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index 0a1dcc2fa2f..ab3d0ac3a2e 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -135,14 +135,15 @@ static inline int sum_frag_mem_limit(struct netns_frags *nf)
static inline void inet_frag_lru_move(struct inet_frag_queue *q)
{
spin_lock(&q->net->lru_lock);
- list_move_tail(&q->lru_list, &q->net->lru_list);
+ if (!list_empty(&q->lru_list))
+ list_move_tail(&q->lru_list, &q->net->lru_list);
spin_unlock(&q->net->lru_lock);
}
static inline void inet_frag_lru_del(struct inet_frag_queue *q)
{
spin_lock(&q->net->lru_lock);
- list_del(&q->lru_list);
+ list_del_init(&q->lru_list);
spin_unlock(&q->net->lru_lock);
}
diff --git a/include/net/sock.h b/include/net/sock.h
index 14f6e9d19dc..0be480a43e0 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -865,6 +865,18 @@ struct inet_hashinfo;
struct raw_hashinfo;
struct module;
+/*
+ * caches using SLAB_DESTROY_BY_RCU should let .next pointer from nulls nodes
+ * un-modified. Special care is taken when initializing object to zero.
+ */
+static inline void sk_prot_clear_nulls(struct sock *sk, int size)
+{
+ if (offsetof(struct sock, sk_node.next) != 0)
+ memset(sk, 0, offsetof(struct sock, sk_node.next));
+ memset(&sk->sk_node.pprev, 0,
+ size - offsetof(struct sock, sk_node.pprev));
+}
+
/* Networking protocol blocks we attach to sockets.
* socket layer -> transport layer interface
* transport -> network interface is defined by struct inet_proto
diff --git a/include/net/tcp.h b/include/net/tcp.h
index efa63af9050..13efb8c0bcd 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1049,6 +1049,7 @@ static inline bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
skb_queue_len(&tp->ucopy.prequeue) == 0)
return false;
+ skb_dst_force(skb);
__skb_queue_tail(&tp->ucopy.prequeue, skb);
tp->ucopy.memory += skb->truesize;
if (tp->ucopy.memory > sk->sk_rcvbuf) {
diff --git a/include/sound/emu10k1.h b/include/sound/emu10k1.h
index f841ba4bacb..dfb42ca6d04 100644
--- a/include/sound/emu10k1.h
+++ b/include/sound/emu10k1.h
@@ -1787,6 +1787,7 @@ struct snd_emu10k1 {
unsigned int next_free_voice;
const struct firmware *firmware;
+ const struct firmware *dock_fw;
#ifdef CONFIG_PM_SLEEP
unsigned int *saved_ptr;
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index c4af592f705..f8640f314d3 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -544,6 +544,7 @@ struct se_session {
struct list_head sess_list;
struct list_head sess_acl_list;
struct list_head sess_cmd_list;
+ struct list_head sess_wait_list;
spinlock_t sess_cmd_lock;
struct kref sess_kref;
};
diff --git a/include/trace/events/power_cpu_migrate.h b/include/trace/events/power_cpu_migrate.h
new file mode 100644
index 00000000000..f76dd4de625
--- /dev/null
+++ b/include/trace/events/power_cpu_migrate.h
@@ -0,0 +1,67 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM power
+
+#if !defined(_TRACE_POWER_CPU_MIGRATE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_POWER_CPU_MIGRATE_H
+
+#include <linux/tracepoint.h>
+
+#define __cpu_migrate_proto \
+ TP_PROTO(u64 timestamp, \
+ u32 cpu_hwid)
+#define __cpu_migrate_args \
+ TP_ARGS(timestamp, \
+ cpu_hwid)
+
+DECLARE_EVENT_CLASS(cpu_migrate,
+
+ __cpu_migrate_proto,
+ __cpu_migrate_args,
+
+ TP_STRUCT__entry(
+ __field(u64, timestamp )
+ __field(u32, cpu_hwid )
+ ),
+
+ TP_fast_assign(
+ __entry->timestamp = timestamp;
+ __entry->cpu_hwid = cpu_hwid;
+ ),
+
+ TP_printk("timestamp=%llu cpu_hwid=0x%08lX",
+ (unsigned long long)__entry->timestamp,
+ (unsigned long)__entry->cpu_hwid
+ )
+);
+
+#define __define_cpu_migrate_event(name) \
+ DEFINE_EVENT(cpu_migrate, cpu_migrate_##name, \
+ __cpu_migrate_proto, \
+ __cpu_migrate_args \
+ )
+
+__define_cpu_migrate_event(begin);
+__define_cpu_migrate_event(finish);
+__define_cpu_migrate_event(current);
+
+#undef __define_cpu_migrate
+#undef __cpu_migrate_proto
+#undef __cpu_migrate_args
+
+/* This file can get included multiple times, TRACE_HEADER_MULTI_READ at top */
+#ifndef _PWR_CPU_MIGRATE_EVENT_AVOID_DOUBLE_DEFINING
+#define _PWR_CPU_MIGRATE_EVENT_AVOID_DOUBLE_DEFINING
+
+/*
+ * Set from_phys_cpu and to_phys_cpu to CPU_MIGRATE_ALL_CPUS to indicate
+ * a whole-cluster migration:
+ */
+#define CPU_MIGRATE_ALL_CPUS 0x80000000U
+#endif
+
+#endif /* _TRACE_POWER_CPU_MIGRATE_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE power_cpu_migrate
+#include <trace/define_trace.h>
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index e5586caff67..203e8e9933b 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -430,6 +430,159 @@ TRACE_EVENT(sched_pi_setprio,
__entry->oldprio, __entry->newprio)
);
+/*
+ * Tracepoint for showing tracked load contribution.
+ */
+TRACE_EVENT(sched_task_load_contrib,
+
+ TP_PROTO(struct task_struct *tsk, unsigned long load_contrib),
+
+ TP_ARGS(tsk, load_contrib),
+
+ TP_STRUCT__entry(
+ __array(char, comm, TASK_COMM_LEN)
+ __field(pid_t, pid)
+ __field(unsigned long, load_contrib)
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+ __entry->pid = tsk->pid;
+ __entry->load_contrib = load_contrib;
+ ),
+
+ TP_printk("comm=%s pid=%d load_contrib=%lu",
+ __entry->comm, __entry->pid,
+ __entry->load_contrib)
+);
+
+/*
+ * Tracepoint for showing tracked task runnable ratio [0..1023].
+ */
+TRACE_EVENT(sched_task_runnable_ratio,
+
+ TP_PROTO(struct task_struct *tsk, unsigned long ratio),
+
+ TP_ARGS(tsk, ratio),
+
+ TP_STRUCT__entry(
+ __array(char, comm, TASK_COMM_LEN)
+ __field(pid_t, pid)
+ __field(unsigned long, ratio)
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+ __entry->pid = tsk->pid;
+ __entry->ratio = ratio;
+ ),
+
+ TP_printk("comm=%s pid=%d ratio=%lu",
+ __entry->comm, __entry->pid,
+ __entry->ratio)
+);
+
+/*
+ * Tracepoint for showing tracked rq runnable ratio [0..1023].
+ */
+TRACE_EVENT(sched_rq_runnable_ratio,
+
+ TP_PROTO(int cpu, unsigned long ratio),
+
+ TP_ARGS(cpu, ratio),
+
+ TP_STRUCT__entry(
+ __field(int, cpu)
+ __field(unsigned long, ratio)
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = cpu;
+ __entry->ratio = ratio;
+ ),
+
+ TP_printk("cpu=%d ratio=%lu",
+ __entry->cpu,
+ __entry->ratio)
+);
+
+/*
+ * Tracepoint for showing tracked rq runnable load.
+ */
+TRACE_EVENT(sched_rq_runnable_load,
+
+ TP_PROTO(int cpu, u64 load),
+
+ TP_ARGS(cpu, load),
+
+ TP_STRUCT__entry(
+ __field(int, cpu)
+ __field(u64, load)
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = cpu;
+ __entry->load = load;
+ ),
+
+ TP_printk("cpu=%d load=%llu",
+ __entry->cpu,
+ __entry->load)
+);
+
+/*
+ * Tracepoint for showing tracked task cpu usage ratio [0..1023].
+ */
+TRACE_EVENT(sched_task_usage_ratio,
+
+ TP_PROTO(struct task_struct *tsk, unsigned long ratio),
+
+ TP_ARGS(tsk, ratio),
+
+ TP_STRUCT__entry(
+ __array(char, comm, TASK_COMM_LEN)
+ __field(pid_t, pid)
+ __field(unsigned long, ratio)
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+ __entry->pid = tsk->pid;
+ __entry->ratio = ratio;
+ ),
+
+ TP_printk("comm=%s pid=%d ratio=%lu",
+ __entry->comm, __entry->pid,
+ __entry->ratio)
+);
+
+/*
+ * Tracepoint for HMP (CONFIG_SCHED_HMP) task migrations.
+ */
+TRACE_EVENT(sched_hmp_migrate,
+
+ TP_PROTO(struct task_struct *tsk, int dest, int force),
+
+ TP_ARGS(tsk, dest, force),
+
+ TP_STRUCT__entry(
+ __array(char, comm, TASK_COMM_LEN)
+ __field(pid_t, pid)
+ __field(int, dest)
+ __field(int, force)
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+ __entry->pid = tsk->pid;
+ __entry->dest = dest;
+ __entry->force = force;
+ ),
+
+ TP_printk("comm=%s pid=%d dest=%d force=%d",
+ __entry->comm, __entry->pid,
+ __entry->dest, __entry->force)
+);
#endif /* _TRACE_SCHED_H */
/* This part must be outside protection */
diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h
index 9f096f1c090..9554a19d341 100644
--- a/include/uapi/linux/audit.h
+++ b/include/uapi/linux/audit.h
@@ -246,6 +246,7 @@
#define AUDIT_OBJ_TYPE 21
#define AUDIT_OBJ_LEV_LOW 22
#define AUDIT_OBJ_LEV_HIGH 23
+#define AUDIT_LOGINUID_SET 24
/* These are ONLY useful when checking
* at syscall exit time (AUDIT_AT_EXIT). */
diff --git a/include/uapi/linux/if_cablemodem.h b/include/uapi/linux/if_cablemodem.h
index 9ca1007edd9..ee6b3c442ba 100644
--- a/include/uapi/linux/if_cablemodem.h
+++ b/include/uapi/linux/if_cablemodem.h
@@ -12,11 +12,11 @@
*/
/* some useful defines for sb1000.c e cmconfig.c - fv */
-#define SIOCGCMSTATS SIOCDEVPRIVATE+0 /* get cable modem stats */
-#define SIOCGCMFIRMWARE SIOCDEVPRIVATE+1 /* get cm firmware version */
-#define SIOCGCMFREQUENCY SIOCDEVPRIVATE+2 /* get cable modem frequency */
-#define SIOCSCMFREQUENCY SIOCDEVPRIVATE+3 /* set cable modem frequency */
-#define SIOCGCMPIDS SIOCDEVPRIVATE+4 /* get cable modem PIDs */
-#define SIOCSCMPIDS SIOCDEVPRIVATE+5 /* set cable modem PIDs */
+#define SIOCGCMSTATS (SIOCDEVPRIVATE+0) /* get cable modem stats */
+#define SIOCGCMFIRMWARE (SIOCDEVPRIVATE+1) /* get cm firmware version */
+#define SIOCGCMFREQUENCY (SIOCDEVPRIVATE+2) /* get cable modem frequency */
+#define SIOCSCMFREQUENCY (SIOCDEVPRIVATE+3) /* set cable modem frequency */
+#define SIOCGCMPIDS (SIOCDEVPRIVATE+4) /* get cable modem PIDs */
+#define SIOCSCMPIDS (SIOCDEVPRIVATE+5) /* set cable modem PIDs */
#endif
diff --git a/include/uapi/linux/virtio_console.h b/include/uapi/linux/virtio_console.h
index ee13ab6c361..c312f16bc4e 100644
--- a/include/uapi/linux/virtio_console.h
+++ b/include/uapi/linux/virtio_console.h
@@ -39,7 +39,7 @@
#define VIRTIO_CONSOLE_F_SIZE 0 /* Does host provide console size? */
#define VIRTIO_CONSOLE_F_MULTIPORT 1 /* Does host provide multiple ports? */
-#define VIRTIO_CONSOLE_BAD_ID (~(u32)0)
+#define VIRTIO_CONSOLE_BAD_ID (~(__u32)0)
struct virtio_console_config {
/* colums of the screens */
diff --git a/include/uapi/linux/virtio_net.h b/include/uapi/linux/virtio_net.h
index a5a8c88753b..c520203fac2 100644
--- a/include/uapi/linux/virtio_net.h
+++ b/include/uapi/linux/virtio_net.h
@@ -191,7 +191,7 @@ struct virtio_net_ctrl_mac {
* specified.
*/
struct virtio_net_ctrl_mq {
- u16 virtqueue_pairs;
+ __u16 virtqueue_pairs;
};
#define VIRTIO_NET_CTRL_MQ 4
diff --git a/include/xen/interface/io/netif.h b/include/xen/interface/io/netif.h
index 9dfc1200098..3ef3fe05ee9 100644
--- a/include/xen/interface/io/netif.h
+++ b/include/xen/interface/io/netif.h
@@ -13,6 +13,24 @@
#include <xen/interface/grant_table.h>
/*
+ * Older implementation of Xen network frontend / backend has an
+ * implicit dependency on the MAX_SKB_FRAGS as the maximum number of
+ * ring slots a skb can use. Netfront / netback may not work as
+ * expected when frontend and backend have different MAX_SKB_FRAGS.
+ *
+ * A better approach is to add mechanism for netfront / netback to
+ * negotiate this value. However we cannot fix all possible
+ * frontends, so we need to define a value which states the minimum
+ * slots backend must support.
+ *
+ * The minimum value derives from older Linux kernel's MAX_SKB_FRAGS
+ * (18), which is proved to work with most frontends. Any new backend
+ * which doesn't negotiate with frontend should expect frontend to
+ * send a valid packet using slots up to this value.
+ */
+#define XEN_NETIF_NR_SLOTS_MIN 18
+
+/*
* Notifications after enqueuing any type of message should be conditional on
* the appropriate req_event or rsp_event field in the shared ring.
* If the client sends notification for rx requests then it should specify
@@ -47,6 +65,7 @@
#define _XEN_NETTXF_extra_info (3)
#define XEN_NETTXF_extra_info (1U<<_XEN_NETTXF_extra_info)
+#define XEN_NETIF_MAX_TX_SIZE 0xFFFF
struct xen_netif_tx_request {
grant_ref_t gref; /* Reference to buffer page */
uint16_t offset; /* Offset within buffer page */
diff --git a/ipc/shm.c b/ipc/shm.c
index cb858df061d..7e199fa1960 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -462,7 +462,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
size_t size = params->u.size;
int error;
struct shmid_kernel *shp;
- int numpages = (size + PAGE_SIZE -1) >> PAGE_SHIFT;
+ size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
struct file * file;
char name[13];
int id;
@@ -491,10 +491,20 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
sprintf (name, "SYSV%08x", key);
if (shmflg & SHM_HUGETLB) {
+ struct hstate *hs = hstate_sizelog((shmflg >> SHM_HUGE_SHIFT)
+ & SHM_HUGE_MASK);
+ size_t hugesize;
+
+ if (!hs) {
+ error = -EINVAL;
+ goto no_file;
+ }
+ hugesize = ALIGN(size, huge_page_size(hs));
+
/* hugetlb_file_setup applies strict accounting */
if (shmflg & SHM_NORESERVE)
acctflag = VM_NORESERVE;
- file = hugetlb_file_setup(name, 0, size, acctflag,
+ file = hugetlb_file_setup(name, hugesize, acctflag,
&shp->mlock_user, HUGETLB_SHMFS_INODE,
(shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
} else {
diff --git a/kernel/Makefile b/kernel/Makefile
index bbde5f1a448..5a51e6c7128 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -175,7 +175,7 @@ signing_key.priv signing_key.x509: x509.genkey
openssl req -new -nodes -utf8 -$(CONFIG_MODULE_SIG_HASH) -days 36500 \
-batch -x509 -config x509.genkey \
-outform DER -out signing_key.x509 \
- -keyout signing_key.priv
+ -keyout signing_key.priv 2>&1
@echo "###"
@echo "### Key pair generated."
@echo "###"
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index 642a89c4f3d..a291aa23fb3 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -617,9 +617,9 @@ void audit_trim_trees(void)
}
spin_unlock(&hash_lock);
trim_marked(tree);
- put_tree(tree);
drop_collected_mounts(root_mnt);
skip_it:
+ put_tree(tree);
mutex_lock(&audit_filter_mutex);
}
list_del(&cursor);
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index f9fc54bbe06..2bf508dfec1 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -345,6 +345,12 @@ static struct audit_entry *audit_rule_to_entry(struct audit_rule *rule)
f->uid = INVALID_UID;
f->gid = INVALID_GID;
+ /* Support legacy tests for a valid loginuid */
+ if ((f->type == AUDIT_LOGINUID) && (f->val == 4294967295U)) {
+ f->type = AUDIT_LOGINUID_SET;
+ f->val = 0;
+ }
+
err = -EINVAL;
if (f->op == Audit_bad)
goto exit_free;
@@ -352,6 +358,12 @@ static struct audit_entry *audit_rule_to_entry(struct audit_rule *rule)
switch(f->type) {
default:
goto exit_free;
+ case AUDIT_LOGINUID_SET:
+ if ((f->val != 0) && (f->val != 1))
+ goto exit_free;
+ if (f->op != Audit_not_equal && f->op != Audit_equal)
+ goto exit_free;
+ break;
case AUDIT_UID:
case AUDIT_EUID:
case AUDIT_SUID:
@@ -459,7 +471,20 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
f->gid = INVALID_GID;
f->lsm_str = NULL;
f->lsm_rule = NULL;
- switch(f->type) {
+
+ /* Support legacy tests for a valid loginuid */
+ if ((f->type == AUDIT_LOGINUID) && (f->val == 4294967295U)) {
+ f->type = AUDIT_LOGINUID_SET;
+ f->val = 0;
+ }
+
+ switch (f->type) {
+ case AUDIT_LOGINUID_SET:
+ if ((f->val != 0) && (f->val != 1))
+ goto exit_free;
+ if (f->op != Audit_not_equal && f->op != Audit_equal)
+ goto exit_free;
+ break;
case AUDIT_UID:
case AUDIT_EUID:
case AUDIT_SUID:
@@ -1378,6 +1403,10 @@ static int audit_filter_user_rules(struct audit_krule *rule,
result = audit_uid_comparator(audit_get_loginuid(current),
f->op, f->uid);
break;
+ case AUDIT_LOGINUID_SET:
+ result = audit_comparator(audit_loginuid_set(current),
+ f->op, f->val);
+ break;
case AUDIT_SUBJ_USER:
case AUDIT_SUBJ_ROLE:
case AUDIT_SUBJ_TYPE:
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index a371f857a0a..c4b72b0e07c 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -742,6 +742,9 @@ static int audit_filter_rules(struct task_struct *tsk,
if (ctx)
result = audit_uid_comparator(tsk->loginuid, f->op, f->uid);
break;
+ case AUDIT_LOGINUID_SET:
+ result = audit_comparator(audit_loginuid_set(tsk), f->op, f->val);
+ break;
case AUDIT_SUBJ_USER:
case AUDIT_SUBJ_ROLE:
case AUDIT_SUBJ_TYPE:
@@ -2309,7 +2312,7 @@ int audit_set_loginuid(kuid_t loginuid)
unsigned int sessionid;
#ifdef CONFIG_AUDIT_LOGINUID_IMMUTABLE
- if (uid_valid(task->loginuid))
+ if (audit_loginuid_set(task))
return -EPERM;
#else /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */
if (!capable(CAP_AUDIT_CONTROL))
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index a7532c15f6e..e1d826a2cd8 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -162,6 +162,9 @@ struct cfent {
struct list_head node;
struct dentry *dentry;
struct cftype *type;
+
+ /* file xattrs */
+ struct simple_xattrs xattrs;
};
/*
@@ -910,13 +913,12 @@ static void cgroup_diput(struct dentry *dentry, struct inode *inode)
} else {
struct cfent *cfe = __d_cfe(dentry);
struct cgroup *cgrp = dentry->d_parent->d_fsdata;
- struct cftype *cft = cfe->type;
WARN_ONCE(!list_empty(&cfe->node) &&
cgrp != &cgrp->root->top_cgroup,
"cfe still linked for %s\n", cfe->type->name);
+ simple_xattrs_free(&cfe->xattrs);
kfree(cfe);
- simple_xattrs_free(&cft->xattrs);
}
iput(inode);
}
@@ -2065,7 +2067,7 @@ static int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
if (!group)
return -ENOMEM;
/* pre-allocate to guarantee space while iterating in rcu read-side. */
- retval = flex_array_prealloc(group, 0, group_size - 1, GFP_KERNEL);
+ retval = flex_array_prealloc(group, 0, group_size, GFP_KERNEL);
if (retval)
goto out_free_group_list;
@@ -2578,7 +2580,7 @@ static struct simple_xattrs *__d_xattrs(struct dentry *dentry)
if (S_ISDIR(dentry->d_inode->i_mode))
return &__d_cgrp(dentry)->xattrs;
else
- return &__d_cft(dentry)->xattrs;
+ return &__d_cfe(dentry)->xattrs;
}
static inline int xattr_enabled(struct dentry *dentry)
@@ -2754,8 +2756,6 @@ static int cgroup_add_file(struct cgroup *cgrp, struct cgroup_subsys *subsys,
umode_t mode;
char name[MAX_CGROUP_TYPE_NAMELEN + MAX_CFTYPE_NAME + 2] = { 0 };
- simple_xattrs_init(&cft->xattrs);
-
if (subsys && !test_bit(ROOT_NOPREFIX, &cgrp->root->flags)) {
strcpy(name, subsys->name);
strcat(name, ".");
@@ -2774,12 +2774,14 @@ static int cgroup_add_file(struct cgroup *cgrp, struct cgroup_subsys *subsys,
goto out;
}
+ cfe->type = (void *)cft;
+ cfe->dentry = dentry;
+ dentry->d_fsdata = cfe;
+ simple_xattrs_init(&cfe->xattrs);
+
mode = cgroup_file_mode(cft);
error = cgroup_create_file(dentry, mode | S_IFREG, cgrp->root->sb);
if (!error) {
- cfe->type = (void *)cft;
- cfe->dentry = dentry;
- dentry->d_fsdata = cfe;
list_add_tail(&cfe->node, &parent->files);
cfe = NULL;
}
@@ -3025,11 +3027,8 @@ struct cgroup *cgroup_next_descendant_pre(struct cgroup *pos,
WARN_ON_ONCE(!rcu_read_lock_held());
/* if first iteration, pretend we just visited @cgroup */
- if (!pos) {
- if (list_empty(&cgroup->children))
- return NULL;
+ if (!pos)
pos = cgroup;
- }
/* visit the first child if exists */
next = list_first_or_null_rcu(&pos->children, struct cgroup, sibling);
@@ -3037,14 +3036,14 @@ struct cgroup *cgroup_next_descendant_pre(struct cgroup *pos,
return next;
/* no child, visit my or the closest ancestor's next sibling */
- do {
+ while (pos != cgroup) {
next = list_entry_rcu(pos->sibling.next, struct cgroup,
sibling);
if (&next->sibling != &pos->parent->children)
return next;
pos = pos->parent;
- } while (pos != cgroup);
+ }
return NULL;
}
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 14be27feda4..7ef55560c72 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -276,6 +276,10 @@ ktime_t ktime_add_ns(const ktime_t kt, u64 nsec)
} else {
unsigned long rem = do_div(nsec, NSEC_PER_SEC);
+ /* Make sure nsec fits into long */
+ if (unlikely(nsec > KTIME_SEC_MAX))
+ return (ktime_t){ .tv64 = KTIME_MAX };
+
tmp = ktime_set((long)nsec, rem);
}
@@ -1310,6 +1314,8 @@ retry:
expires = ktime_sub(hrtimer_get_expires(timer),
base->offset);
+ if (expires.tv64 < 0)
+ expires.tv64 = KTIME_MAX;
if (expires.tv64 < expires_next.tv64)
expires_next = expires;
break;
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 192a302d6cf..473b2b6eccb 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -23,10 +23,27 @@
static struct lock_class_key irq_desc_lock_class;
#if defined(CONFIG_SMP)
+static int __init irq_affinity_setup(char *str)
+{
+ zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
+ cpulist_parse(str, irq_default_affinity);
+ /*
+ * Set at least the boot cpu. We don't want to end up with
+ * bugreports caused by random comandline masks
+ */
+ cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
+ return 1;
+}
+__setup("irqaffinity=", irq_affinity_setup);
+
static void __init init_irq_default_affinity(void)
{
- alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
- cpumask_setall(irq_default_affinity);
+#ifdef CONFIG_CPUMASK_OFFSTACK
+ if (!irq_default_affinity)
+ zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
+#endif
+ if (cpumask_empty(irq_default_affinity))
+ cpumask_setall(irq_default_affinity);
}
#else
static void __init init_irq_default_affinity(void)
diff --git a/kernel/kmod.c b/kernel/kmod.c
index 56dd34976d7..8985c874a2a 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -570,6 +570,11 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait)
int retval = 0;
helper_lock();
+ if (!sub_info->path) {
+ retval = -EINVAL;
+ goto out;
+ }
+
if (sub_info->path[0] == '\0')
goto out;
diff --git a/kernel/module.c b/kernel/module.c
index 0925c9a7197..97f202c9a01 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -1861,12 +1861,12 @@ static void free_module(struct module *mod)
{
trace_module_free(mod);
- /* Delete from various lists */
- mutex_lock(&module_mutex);
- stop_machine(__unlink_module, mod, NULL);
- mutex_unlock(&module_mutex);
mod_sysfs_teardown(mod);
+ /* We leave it in list to prevent duplicate loads, but make sure
+ * that noone uses it while it's being deconstructed. */
+ mod->state = MODULE_STATE_UNFORMED;
+
/* Remove dynamic debug info */
ddebug_remove_module(mod->name);
@@ -1879,6 +1879,11 @@ static void free_module(struct module *mod)
/* Free any allocated parameters. */
destroy_params(mod->kp, mod->num_kp);
+ /* Now we can delete it from the lists */
+ mutex_lock(&module_mutex);
+ stop_machine(__unlink_module, mod, NULL);
+ mutex_unlock(&module_mutex);
+
/* This may be NULL, but that's OK */
unset_module_init_ro_nx(mod);
module_free(mod, mod->module_init);
diff --git a/kernel/range.c b/kernel/range.c
index 9b8ae2d6ed6..98883ed0588 100644
--- a/kernel/range.c
+++ b/kernel/range.c
@@ -48,9 +48,11 @@ int add_range_with_merge(struct range *range, int az, int nr_range,
final_start = min(range[i].start, start);
final_end = max(range[i].end, end);
- range[i].start = final_start;
- range[i].end = final_end;
- return nr_range;
+ /* clear it and add it back for further merge */
+ range[i].start = 0;
+ range[i].end = 0;
+ return add_range_with_merge(range, az, nr_range,
+ final_start, final_end);
}
/* Need to add it: */
diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
index 0d095dcaa67..93f8e8fbfbc 100644
--- a/kernel/rcutree_trace.c
+++ b/kernel/rcutree_trace.c
@@ -97,7 +97,7 @@ static const struct file_operations rcubarrier_fops = {
.open = rcubarrier_open,
.read = seq_read,
.llseek = no_llseek,
- .release = seq_release,
+ .release = single_release,
};
#ifdef CONFIG_RCU_BOOST
@@ -208,7 +208,7 @@ static const struct file_operations rcuexp_fops = {
.open = rcuexp_open,
.read = seq_read,
.llseek = no_llseek,
- .release = seq_release,
+ .release = single_release,
};
#ifdef CONFIG_RCU_BOOST
@@ -308,7 +308,7 @@ static const struct file_operations rcuhier_fops = {
.open = rcuhier_open,
.read = seq_read,
.llseek = no_llseek,
- .release = seq_release,
+ .release = single_release,
};
static void show_one_rcugp(struct seq_file *m, struct rcu_state *rsp)
@@ -350,7 +350,7 @@ static const struct file_operations rcugp_fops = {
.open = rcugp_open,
.read = seq_read,
.llseek = no_llseek,
- .release = seq_release,
+ .release = single_release,
};
static void print_one_rcu_pending(struct seq_file *m, struct rcu_data *rdp)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 270fba88792..8edbc65959b 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1571,6 +1571,10 @@ static void __sched_fork(struct task_struct *p)
#if defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)
p->se.avg.runnable_avg_period = 0;
p->se.avg.runnable_avg_sum = 0;
+#ifdef CONFIG_SCHED_HMP
+ p->se.avg.hmp_last_up_migration = 0;
+ p->se.avg.hmp_last_down_migration = 0;
+#endif
#endif
#ifdef CONFIG_SCHEDSTATS
memset(&p->se.statistics, 0, sizeof(p->se.statistics));
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index e93cca92f38..6af50adcc0f 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -521,18 +521,49 @@ EXPORT_SYMBOL_GPL(vtime_account_irq_enter);
#else /* !CONFIG_VIRT_CPU_ACCOUNTING */
-static cputime_t scale_stime(cputime_t stime, cputime_t rtime, cputime_t total)
+/*
+ * Perform (stime * rtime) / total, but avoid multiplication overflow by
+ * loosing precision when the numbers are big.
+ */
+static cputime_t scale_stime(u64 stime, u64 rtime, u64 total)
{
- u64 temp = (__force u64) rtime;
+ u64 scaled;
- temp *= (__force u64) stime;
+ for (;;) {
+ /* Make sure "rtime" is the bigger of stime/rtime */
+ if (stime > rtime) {
+ u64 tmp = rtime; rtime = stime; stime = tmp;
+ }
- if (sizeof(cputime_t) == 4)
- temp = div_u64(temp, (__force u32) total);
- else
- temp = div64_u64(temp, (__force u64) total);
+ /* Make sure 'total' fits in 32 bits */
+ if (total >> 32)
+ goto drop_precision;
+
+ /* Does rtime (and thus stime) fit in 32 bits? */
+ if (!(rtime >> 32))
+ break;
- return (__force cputime_t) temp;
+ /* Can we just balance rtime/stime rather than dropping bits? */
+ if (stime >> 31)
+ goto drop_precision;
+
+ /* We can grow stime and shrink rtime and try to make them both fit */
+ stime <<= 1;
+ rtime >>= 1;
+ continue;
+
+drop_precision:
+ /* We drop from rtime, it has more bits than stime */
+ rtime >>= 1;
+ total >>= 1;
+ }
+
+ /*
+ * Make sure gcc understands that this is a 32x32->64 multiply,
+ * followed by a 64/32->64 divide.
+ */
+ scaled = div_u64((u64) (u32) stime * (u64) (u32) rtime, (u32)total);
+ return (__force cputime_t) scaled;
}
/*
@@ -543,7 +574,7 @@ static void cputime_adjust(struct task_cputime *curr,
struct cputime *prev,
cputime_t *ut, cputime_t *st)
{
- cputime_t rtime, stime, total;
+ cputime_t rtime, stime, utime, total;
stime = curr->stime;
total = stime + curr->utime;
@@ -560,10 +591,22 @@ static void cputime_adjust(struct task_cputime *curr,
*/
rtime = nsecs_to_cputime(curr->sum_exec_runtime);
- if (total)
- stime = scale_stime(stime, rtime, total);
- else
+ /*
+ * Update userspace visible utime/stime values only if actual execution
+ * time is bigger than already exported. Note that can happen, that we
+ * provided bigger values due to scaling inaccuracy on big numbers.
+ */
+ if (prev->stime + prev->utime >= rtime)
+ goto out;
+
+ if (total) {
+ stime = scale_stime((__force u64)stime,
+ (__force u64)rtime, (__force u64)total);
+ utime = rtime - stime;
+ } else {
stime = rtime;
+ utime = 0;
+ }
/*
* If the tick based count grows faster than the scheduler one,
@@ -571,8 +614,9 @@ static void cputime_adjust(struct task_cputime *curr,
* Let's enforce monotonicity.
*/
prev->stime = max(prev->stime, stime);
- prev->utime = max(prev->utime, rtime - prev->stime);
+ prev->utime = max(prev->utime, utime);
+out:
*ut = prev->utime;
*st = prev->stime;
}
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 75024a67352..fbd8caa83ef 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -94,6 +94,7 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group
#ifdef CONFIG_SMP
P(se->avg.runnable_avg_sum);
P(se->avg.runnable_avg_period);
+ P(se->avg.usage_avg_sum);
P(se->avg.load_avg_contrib);
P(se->avg.decay_count);
#endif
@@ -223,6 +224,8 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
cfs_rq->tg_runnable_contrib);
SEQ_printf(m, " .%-30s: %d\n", "tg->runnable_avg",
atomic_read(&cfs_rq->tg->runnable_avg));
+ SEQ_printf(m, " .%-30s: %d\n", "tg->usage_avg",
+ atomic_read(&cfs_rq->tg->usage_avg));
#endif
print_cfs_group_stats(m, cpu, cfs_rq->tg);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 7a33e5986fc..5ecfb1be889 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -31,9 +31,20 @@
#include <linux/task_work.h>
#include <trace/events/sched.h>
+#ifdef CONFIG_HMP_VARIABLE_SCALE
+#include <linux/sysfs.h>
+#include <linux/vmalloc.h>
+#ifdef CONFIG_HMP_FREQUENCY_INVARIANT_SCALE
+/* Include cpufreq header to add a notifier so that cpu frequency
+ * scaling can track the current CPU frequency
+ */
+#include <linux/cpufreq.h>
+#endif /* CONFIG_HMP_FREQUENCY_INVARIANT_SCALE */
+#endif /* CONFIG_HMP_VARIABLE_SCALE */
#include "sched.h"
+
/*
* Targeted preemption latency for CPU-bound tasks:
* (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
@@ -1200,8 +1211,95 @@ static u32 __compute_runnable_contrib(u64 n)
return contrib + runnable_avg_yN_sum[n];
}
-/*
- * We can represent the historical contribution to runnable average as the
+#ifdef CONFIG_HMP_VARIABLE_SCALE
+
+#define HMP_VARIABLE_SCALE_SHIFT 16ULL
+struct hmp_global_attr {
+ struct attribute attr;
+ ssize_t (*show)(struct kobject *kobj,
+ struct attribute *attr, char *buf);
+ ssize_t (*store)(struct kobject *a, struct attribute *b,
+ const char *c, size_t count);
+ int *value;
+ int (*to_sysfs)(int);
+ int (*from_sysfs)(int);
+};
+
+#ifdef CONFIG_HMP_FREQUENCY_INVARIANT_SCALE
+#define HMP_DATA_SYSFS_MAX 4
+#else
+#define HMP_DATA_SYSFS_MAX 3
+#endif
+
+struct hmp_data_struct {
+#ifdef CONFIG_HMP_FREQUENCY_INVARIANT_SCALE
+ int freqinvar_load_scale_enabled;
+#endif
+ int multiplier; /* used to scale the time delta */
+ struct attribute_group attr_group;
+ struct attribute *attributes[HMP_DATA_SYSFS_MAX + 1];
+ struct hmp_global_attr attr[HMP_DATA_SYSFS_MAX];
+} hmp_data;
+
+static u64 hmp_variable_scale_convert(u64 delta);
+#ifdef CONFIG_HMP_FREQUENCY_INVARIANT_SCALE
+/* Frequency-Invariant Load Modification:
+ * Loads are calculated as in PJT's patch however we also scale the current
+ * contribution in line with the frequency of the CPU that the task was
+ * executed on.
+ * In this version, we use a simple linear scale derived from the maximum
+ * frequency reported by CPUFreq. As an example:
+ *
+ * Consider that we ran a task for 100% of the previous interval.
+ *
+ * Our CPU was under asynchronous frequency control through one of the
+ * CPUFreq governors.
+ *
+ * The CPUFreq governor reports that it is able to scale the CPU between
+ * 500MHz and 1GHz.
+ *
+ * During the period, the CPU was running at 1GHz.
+ *
+ * In this case, our load contribution for that period is calculated as
+ * 1 * (number_of_active_microseconds)
+ *
+ * This results in our task being able to accumulate maximum load as normal.
+ *
+ *
+ * Consider now that our CPU was executing at 500MHz.
+ *
+ * We now scale the load contribution such that it is calculated as
+ * 0.5 * (number_of_active_microseconds)
+ *
+ * Our task can only record 50% maximum load during this period.
+ *
+ * This represents the task consuming 50% of the CPU's *possible* compute
+ * capacity. However the task did consume 100% of the CPU's *available*
+ * compute capacity which is the value seen by the CPUFreq governor and
+ * user-side CPU Utilization tools.
+ *
+ * Restricting tracked load to be scaled by the CPU's frequency accurately
+ * represents the consumption of possible compute capacity and allows the
+ * HMP migration's simple threshold migration strategy to interact more
+ * predictably with CPUFreq's asynchronous compute capacity changes.
+ */
+#define SCHED_FREQSCALE_SHIFT 10
+struct cpufreq_extents {
+ u32 curr_scale;
+ u32 min;
+ u32 max;
+ u32 flags;
+};
+/* Flag set when the governor in use only allows one frequency.
+ * Disables scaling.
+ */
+#define SCHED_LOAD_FREQINVAR_SINGLEFREQ 0x01
+
+static struct cpufreq_extents freq_scale[CONFIG_NR_CPUS];
+#endif /* CONFIG_HMP_FREQUENCY_INVARIANT_SCALE */
+#endif /* CONFIG_HMP_VARIABLE_SCALE */
+
+/* We can represent the historical contribution to runnable average as the
* coefficients of a geometric series. To do this we sub-divide our runnable
* history into segments of approximately 1ms (1024us); label the segment that
* occurred N-ms ago p_N, with p_0 corresponding to the current period, e.g.
@@ -1230,13 +1328,24 @@ static u32 __compute_runnable_contrib(u64 n)
*/
static __always_inline int __update_entity_runnable_avg(u64 now,
struct sched_avg *sa,
- int runnable)
+ int runnable,
+ int running,
+ int cpu)
{
u64 delta, periods;
u32 runnable_contrib;
int delta_w, decayed = 0;
+#ifdef CONFIG_HMP_FREQUENCY_INVARIANT_SCALE
+ u64 scaled_delta;
+ u32 scaled_runnable_contrib;
+ int scaled_delta_w;
+ u32 curr_scale = 1024;
+#endif /* CONFIG_HMP_FREQUENCY_INVARIANT_SCALE */
delta = now - sa->last_runnable_update;
+#ifdef CONFIG_HMP_VARIABLE_SCALE
+ delta = hmp_variable_scale_convert(delta);
+#endif
/*
* This should only happen when time goes backwards, which it
* unfortunately does during sched clock init when we swap over to TSC.
@@ -1255,6 +1364,12 @@ static __always_inline int __update_entity_runnable_avg(u64 now,
return 0;
sa->last_runnable_update = now;
+#ifdef CONFIG_HMP_FREQUENCY_INVARIANT_SCALE
+ /* retrieve scale factor for load */
+ if (hmp_data.freqinvar_load_scale_enabled)
+ curr_scale = freq_scale[cpu].curr_scale;
+#endif /* CONFIG_HMP_FREQUENCY_INVARIANT_SCALE */
+
/* delta_w is the amount already accumulated against our next period */
delta_w = sa->runnable_avg_period % 1024;
if (delta + delta_w >= 1024) {
@@ -1267,8 +1382,20 @@ static __always_inline int __update_entity_runnable_avg(u64 now,
* period and accrue it.
*/
delta_w = 1024 - delta_w;
+ /* scale runnable time if necessary */
+#ifdef CONFIG_HMP_FREQUENCY_INVARIANT_SCALE
+ scaled_delta_w = (delta_w * curr_scale)
+ >> SCHED_FREQSCALE_SHIFT;
+ if (runnable)
+ sa->runnable_avg_sum += scaled_delta_w;
+ if (running)
+ sa->usage_avg_sum += scaled_delta_w;
+#else
if (runnable)
sa->runnable_avg_sum += delta_w;
+ if (running)
+ sa->usage_avg_sum += delta_w;
+#endif /* #ifdef CONFIG_HMP_FREQUENCY_INVARIANT_SCALE */
sa->runnable_avg_period += delta_w;
delta -= delta_w;
@@ -1276,22 +1403,49 @@ static __always_inline int __update_entity_runnable_avg(u64 now,
/* Figure out how many additional periods this update spans */
periods = delta / 1024;
delta %= 1024;
-
+ /* decay the load we have accumulated so far */
sa->runnable_avg_sum = decay_load(sa->runnable_avg_sum,
periods + 1);
sa->runnable_avg_period = decay_load(sa->runnable_avg_period,
periods + 1);
-
+ sa->usage_avg_sum = decay_load(sa->usage_avg_sum, periods + 1);
+ /* add the contribution from this period */
/* Efficiently calculate \sum (1..n_period) 1024*y^i */
runnable_contrib = __compute_runnable_contrib(periods);
+ /* Apply load scaling if necessary.
+ * Note that multiplying the whole series is same as
+ * multiplying all terms
+ */
+#ifdef CONFIG_HMP_FREQUENCY_INVARIANT_SCALE
+ scaled_runnable_contrib = (runnable_contrib * curr_scale)
+ >> SCHED_FREQSCALE_SHIFT;
+ if (runnable)
+ sa->runnable_avg_sum += scaled_runnable_contrib;
+ if (running)
+ sa->usage_avg_sum += scaled_runnable_contrib;
+#else
if (runnable)
sa->runnable_avg_sum += runnable_contrib;
+ if (running)
+ sa->usage_avg_sum += runnable_contrib;
+#endif /* CONFIG_HMP_FREQUENCY_INVARIANT_SCALE */
sa->runnable_avg_period += runnable_contrib;
}
/* Remainder of delta accrued against u_0` */
+ /* scale if necessary */
+#ifdef CONFIG_HMP_FREQUENCY_INVARIANT_SCALE
+ scaled_delta = ((delta * curr_scale) >> SCHED_FREQSCALE_SHIFT);
+ if (runnable)
+ sa->runnable_avg_sum += scaled_delta;
+ if (running)
+ sa->usage_avg_sum += scaled_delta;
+#else
if (runnable)
sa->runnable_avg_sum += delta;
+ if (running)
+ sa->usage_avg_sum += delta;
+#endif /* CONFIG_HMP_FREQUENCY_INVARIANT_SCALE */
sa->runnable_avg_period += delta;
return decayed;
@@ -1337,16 +1491,28 @@ static inline void __update_tg_runnable_avg(struct sched_avg *sa,
struct cfs_rq *cfs_rq)
{
struct task_group *tg = cfs_rq->tg;
- long contrib;
+ long contrib, usage_contrib;
/* The fraction of a cpu used by this cfs_rq */
contrib = div_u64(sa->runnable_avg_sum << NICE_0_SHIFT,
sa->runnable_avg_period + 1);
contrib -= cfs_rq->tg_runnable_contrib;
- if (abs(contrib) > cfs_rq->tg_runnable_contrib / 64) {
+ usage_contrib = div_u64(sa->usage_avg_sum << NICE_0_SHIFT,
+ sa->runnable_avg_period + 1);
+ usage_contrib -= cfs_rq->tg_usage_contrib;
+
+ /*
+ * contrib/usage at this point represent deltas, only update if they
+ * are substantive.
+ */
+ if ((abs(contrib) > cfs_rq->tg_runnable_contrib / 64) ||
+ (abs(usage_contrib) > cfs_rq->tg_usage_contrib / 64)) {
atomic_add(contrib, &tg->runnable_avg);
cfs_rq->tg_runnable_contrib += contrib;
+
+ atomic_add(usage_contrib, &tg->usage_avg);
+ cfs_rq->tg_usage_contrib += usage_contrib;
}
}
@@ -1407,6 +1573,11 @@ static inline void __update_task_entity_contrib(struct sched_entity *se)
contrib = se->avg.runnable_avg_sum * scale_load_down(se->load.weight);
contrib /= (se->avg.runnable_avg_period + 1);
se->avg.load_avg_contrib = scale_load(contrib);
+ trace_sched_task_load_contrib(task_of(se), se->avg.load_avg_contrib);
+ contrib = se->avg.runnable_avg_sum * scale_load_down(NICE_0_LOAD);
+ contrib /= (se->avg.runnable_avg_period + 1);
+ se->avg.load_avg_ratio = scale_load(contrib);
+ trace_sched_task_runnable_ratio(task_of(se), se->avg.load_avg_ratio);
}
/* Compute the current contribution to load_avg by se, return any delta */
@@ -1442,7 +1613,11 @@ static inline void update_entity_load_avg(struct sched_entity *se,
struct cfs_rq *cfs_rq = cfs_rq_of(se);
long contrib_delta;
u64 now;
+ int cpu = -1; /* not used in normal case */
+#ifdef CONFIG_HMP_FREQUENCY_INVARIANT_SCALE
+ cpu = cfs_rq->rq->cpu;
+#endif
/*
* For a group entity we need to use their owned cfs_rq_clock_task() in
* case they are the parent of a throttled hierarchy.
@@ -1452,7 +1627,8 @@ static inline void update_entity_load_avg(struct sched_entity *se,
else
now = cfs_rq_clock_task(group_cfs_rq(se));
- if (!__update_entity_runnable_avg(now, &se->avg, se->on_rq))
+ if (!__update_entity_runnable_avg(now, &se->avg, se->on_rq,
+ cfs_rq->curr == se, cpu))
return;
contrib_delta = __update_entity_load_avg_contrib(se);
@@ -1496,8 +1672,19 @@ static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, int force_update)
static inline void update_rq_runnable_avg(struct rq *rq, int runnable)
{
- __update_entity_runnable_avg(rq->clock_task, &rq->avg, runnable);
+ u32 contrib;
+ int cpu = -1; /* not used in normal case */
+
+#ifdef CONFIG_HMP_FREQUENCY_INVARIANT_SCALE
+ cpu = rq->cpu;
+#endif
+ __update_entity_runnable_avg(rq->clock_task, &rq->avg, runnable,
+ runnable, cpu);
__update_tg_runnable_avg(&rq->avg, &rq->cfs);
+ contrib = rq->avg.runnable_avg_sum * scale_load_down(1024);
+ contrib /= (rq->avg.runnable_avg_period + 1);
+ trace_sched_rq_runnable_ratio(cpu_of(rq), scale_load(contrib));
+ trace_sched_rq_runnable_load(cpu_of(rq), rq->cfs.runnable_load_avg);
}
/* Add the load generated by se into cfs_rq's child load-average */
@@ -1864,6 +2051,7 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
*/
update_stats_wait_end(cfs_rq, se);
__dequeue_entity(cfs_rq, se);
+ update_entity_load_avg(se, 1);
}
update_stats_curr_start(cfs_rq, se);
@@ -3292,6 +3480,387 @@ done:
return target;
}
+#ifdef CONFIG_SCHED_HMP
+/*
+ * Heterogenous multiprocessor (HMP) optimizations
+ *
+ * The cpu types are distinguished using a list of hmp_domains
+ * which each represent one cpu type using a cpumask.
+ * The list is assumed ordered by compute capacity with the
+ * fastest domain first.
+ */
+DEFINE_PER_CPU(struct hmp_domain *, hmp_cpu_domain);
+
+extern void __init arch_get_hmp_domains(struct list_head *hmp_domains_list);
+
+/* Setup hmp_domains */
+static int __init hmp_cpu_mask_setup(void)
+{
+ char buf[64];
+ struct hmp_domain *domain;
+ struct list_head *pos;
+ int dc, cpu;
+
+ pr_debug("Initializing HMP scheduler:\n");
+
+ /* Initialize hmp_domains using platform code */
+ arch_get_hmp_domains(&hmp_domains);
+ if (list_empty(&hmp_domains)) {
+ pr_debug("HMP domain list is empty!\n");
+ return 0;
+ }
+
+ /* Print hmp_domains */
+ dc = 0;
+ list_for_each(pos, &hmp_domains) {
+ domain = list_entry(pos, struct hmp_domain, hmp_domains);
+ cpulist_scnprintf(buf, 64, &domain->cpus);
+ pr_debug(" HMP domain %d: %s\n", dc, buf);
+
+ for_each_cpu_mask(cpu, domain->cpus) {
+ per_cpu(hmp_cpu_domain, cpu) = domain;
+ }
+ dc++;
+ }
+
+ return 1;
+}
+
+/*
+ * Migration thresholds should be in the range [0..1023]
+ * hmp_up_threshold: min. load required for migrating tasks to a faster cpu
+ * hmp_down_threshold: max. load allowed for tasks migrating to a slower cpu
+ * The default values (512, 256) offer good responsiveness, but may need
+ * tweaking suit particular needs.
+ *
+ * hmp_up_prio: Only up migrate task with high priority (<hmp_up_prio)
+ * hmp_next_up_threshold: Delay before next up migration (1024 ~= 1 ms)
+ * hmp_next_down_threshold: Delay before next down migration (1024 ~= 1 ms)
+ */
+unsigned int hmp_up_threshold = 512;
+unsigned int hmp_down_threshold = 256;
+#ifdef CONFIG_SCHED_HMP_PRIO_FILTER
+unsigned int hmp_up_prio = NICE_TO_PRIO(CONFIG_SCHED_HMP_PRIO_FILTER_VAL);
+#endif
+unsigned int hmp_next_up_threshold = 4096;
+unsigned int hmp_next_down_threshold = 4096;
+
+static unsigned int hmp_up_migration(int cpu, struct sched_entity *se);
+static unsigned int hmp_down_migration(int cpu, struct sched_entity *se);
+
+/* Check if cpu is in fastest hmp_domain */
+static inline unsigned int hmp_cpu_is_fastest(int cpu)
+{
+ struct list_head *pos;
+
+ pos = &hmp_cpu_domain(cpu)->hmp_domains;
+ return pos == hmp_domains.next;
+}
+
+/* Check if cpu is in slowest hmp_domain */
+static inline unsigned int hmp_cpu_is_slowest(int cpu)
+{
+ struct list_head *pos;
+
+ pos = &hmp_cpu_domain(cpu)->hmp_domains;
+ return list_is_last(pos, &hmp_domains);
+}
+
+/* Next (slower) hmp_domain relative to cpu */
+static inline struct hmp_domain *hmp_slower_domain(int cpu)
+{
+ struct list_head *pos;
+
+ pos = &hmp_cpu_domain(cpu)->hmp_domains;
+ return list_entry(pos->next, struct hmp_domain, hmp_domains);
+}
+
+/* Previous (faster) hmp_domain relative to cpu */
+static inline struct hmp_domain *hmp_faster_domain(int cpu)
+{
+ struct list_head *pos;
+
+ pos = &hmp_cpu_domain(cpu)->hmp_domains;
+ return list_entry(pos->prev, struct hmp_domain, hmp_domains);
+}
+
+/*
+ * Selects a cpu in previous (faster) hmp_domain
+ * Note that cpumask_any_and() returns the first cpu in the cpumask
+ */
+static inline unsigned int hmp_select_faster_cpu(struct task_struct *tsk,
+ int cpu)
+{
+ return cpumask_any_and(&hmp_faster_domain(cpu)->cpus,
+ tsk_cpus_allowed(tsk));
+}
+
+/*
+ * Selects a cpu in next (slower) hmp_domain
+ * Note that cpumask_any_and() returns the first cpu in the cpumask
+ */
+static inline unsigned int hmp_select_slower_cpu(struct task_struct *tsk,
+ int cpu)
+{
+ return cpumask_any_and(&hmp_slower_domain(cpu)->cpus,
+ tsk_cpus_allowed(tsk));
+}
+
+static inline void hmp_next_up_delay(struct sched_entity *se, int cpu)
+{
+ struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs;
+
+ se->avg.hmp_last_up_migration = cfs_rq_clock_task(cfs_rq);
+ se->avg.hmp_last_down_migration = 0;
+}
+
+static inline void hmp_next_down_delay(struct sched_entity *se, int cpu)
+{
+ struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs;
+
+ se->avg.hmp_last_down_migration = cfs_rq_clock_task(cfs_rq);
+ se->avg.hmp_last_up_migration = 0;
+}
+
+#ifdef CONFIG_HMP_VARIABLE_SCALE
+/*
+ * Heterogenous multiprocessor (HMP) optimizations
+ *
+ * These functions allow to change the growing speed of the load_avg_ratio
+ * by default it goes from 0 to 0.5 in LOAD_AVG_PERIOD = 32ms
+ * This can now be changed with /sys/kernel/hmp/load_avg_period_ms.
+ *
+ * These functions also allow to change the up and down threshold of HMP
+ * using /sys/kernel/hmp/{up,down}_threshold.
+ * Both must be between 0 and 1023. The threshold that is compared
+ * to the load_avg_ratio is up_threshold/1024 and down_threshold/1024.
+ *
+ * For instance, if load_avg_period = 64 and up_threshold = 512, an idle
+ * task with a load of 0 will reach the threshold after 64ms of busy loop.
+ *
+ * Changing load_avg_periods_ms has the same effect than changing the
+ * default scaling factor Y=1002/1024 in the load_avg_ratio computation to
+ * (1002/1024.0)^(LOAD_AVG_PERIOD/load_avg_period_ms), but the last one
+ * could trigger overflows.
+ * For instance, with Y = 1023/1024 in __update_task_entity_contrib()
+ * "contrib = se->avg.runnable_avg_sum * scale_load_down(se->load.weight);"
+ * could be overflowed for a weight > 2^12 even is the load_avg_contrib
+ * should still be a 32bits result. This would not happen by multiplicating
+ * delta time by 1/22 and setting load_avg_period_ms = 706.
+ */
+
+/*
+ * By scaling the delta time it end-up increasing or decrease the
+ * growing speed of the per entity load_avg_ratio
+ * The scale factor hmp_data.multiplier is a fixed point
+ * number: (32-HMP_VARIABLE_SCALE_SHIFT).HMP_VARIABLE_SCALE_SHIFT
+ */
+static u64 hmp_variable_scale_convert(u64 delta)
+{
+ u64 high = delta >> 32ULL;
+ u64 low = delta & 0xffffffffULL;
+ low *= hmp_data.multiplier;
+ high *= hmp_data.multiplier;
+ return (low >> HMP_VARIABLE_SCALE_SHIFT)
+ + (high << (32ULL - HMP_VARIABLE_SCALE_SHIFT));
+}
+
+static ssize_t hmp_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ ssize_t ret = 0;
+ struct hmp_global_attr *hmp_attr =
+ container_of(attr, struct hmp_global_attr, attr);
+ int temp = *(hmp_attr->value);
+ if (hmp_attr->to_sysfs != NULL)
+ temp = hmp_attr->to_sysfs(temp);
+ ret = sprintf(buf, "%d\n", temp);
+ return ret;
+}
+
+static ssize_t hmp_store(struct kobject *a, struct attribute *attr,
+ const char *buf, size_t count)
+{
+ int temp;
+ ssize_t ret = count;
+ struct hmp_global_attr *hmp_attr =
+ container_of(attr, struct hmp_global_attr, attr);
+ char *str = vmalloc(count + 1);
+ if (str == NULL)
+ return -ENOMEM;
+ memcpy(str, buf, count);
+ str[count] = 0;
+ if (sscanf(str, "%d", &temp) < 1)
+ ret = -EINVAL;
+ else {
+ if (hmp_attr->from_sysfs != NULL)
+ temp = hmp_attr->from_sysfs(temp);
+ if (temp < 0)
+ ret = -EINVAL;
+ else
+ *(hmp_attr->value) = temp;
+ }
+ vfree(str);
+ return ret;
+}
+
+static int hmp_period_tofrom_sysfs(int value)
+{
+ return (LOAD_AVG_PERIOD << HMP_VARIABLE_SCALE_SHIFT) / value;
+}
+
+/* max value for threshold is 1024 */
+static int hmp_theshold_from_sysfs(int value)
+{
+ if (value > 1024)
+ return -1;
+ return value;
+}
+#ifdef CONFIG_HMP_FREQUENCY_INVARIANT_SCALE
+/* freqinvar control is only 0,1 off/on */
+static int hmp_freqinvar_from_sysfs(int value)
+{
+ if (value < 0 || value > 1)
+ return -1;
+ return value;
+}
+#endif
+static void hmp_attr_add(
+ const char *name,
+ int *value,
+ int (*to_sysfs)(int),
+ int (*from_sysfs)(int))
+{
+ int i = 0;
+ while (hmp_data.attributes[i] != NULL) {
+ i++;
+ if (i >= HMP_DATA_SYSFS_MAX)
+ return;
+ }
+ hmp_data.attr[i].attr.mode = 0644;
+ hmp_data.attr[i].show = hmp_show;
+ hmp_data.attr[i].store = hmp_store;
+ hmp_data.attr[i].attr.name = name;
+ hmp_data.attr[i].value = value;
+ hmp_data.attr[i].to_sysfs = to_sysfs;
+ hmp_data.attr[i].from_sysfs = from_sysfs;
+ hmp_data.attributes[i] = &hmp_data.attr[i].attr;
+ hmp_data.attributes[i + 1] = NULL;
+}
+
+static int hmp_attr_init(void)
+{
+ int ret;
+ memset(&hmp_data, sizeof(hmp_data), 0);
+ /* by default load_avg_period_ms == LOAD_AVG_PERIOD
+ * meaning no change
+ */
+ hmp_data.multiplier = hmp_period_tofrom_sysfs(LOAD_AVG_PERIOD);
+
+ hmp_attr_add("load_avg_period_ms",
+ &hmp_data.multiplier,
+ hmp_period_tofrom_sysfs,
+ hmp_period_tofrom_sysfs);
+ hmp_attr_add("up_threshold",
+ &hmp_up_threshold,
+ NULL,
+ hmp_theshold_from_sysfs);
+ hmp_attr_add("down_threshold",
+ &hmp_down_threshold,
+ NULL,
+ hmp_theshold_from_sysfs);
+#ifdef CONFIG_HMP_FREQUENCY_INVARIANT_SCALE
+ /* default frequency-invariant scaling ON */
+ hmp_data.freqinvar_load_scale_enabled = 1;
+ hmp_attr_add("frequency_invariant_load_scale",
+ &hmp_data.freqinvar_load_scale_enabled,
+ NULL,
+ hmp_freqinvar_from_sysfs);
+#endif
+ hmp_data.attr_group.name = "hmp";
+ hmp_data.attr_group.attrs = hmp_data.attributes;
+ ret = sysfs_create_group(kernel_kobj,
+ &hmp_data.attr_group);
+ return 0;
+}
+late_initcall(hmp_attr_init);
+#endif /* CONFIG_HMP_VARIABLE_SCALE */
+
+static inline unsigned int hmp_domain_min_load(struct hmp_domain *hmpd,
+ int *min_cpu)
+{
+ int cpu;
+ int min_load = INT_MAX;
+ int min_cpu_temp = NR_CPUS;
+
+ for_each_cpu_mask(cpu, hmpd->cpus) {
+ if (cpu_rq(cpu)->cfs.tg_load_contrib < min_load) {
+ min_load = cpu_rq(cpu)->cfs.tg_load_contrib;
+ min_cpu_temp = cpu;
+ }
+ }
+
+ if (min_cpu)
+ *min_cpu = min_cpu_temp;
+
+ return min_load;
+}
+
+/*
+ * Calculate the task starvation
+ * This is the ratio of actually running time vs. runnable time.
+ * If the two are equal the task is getting the cpu time it needs or
+ * it is alone on the cpu and the cpu is fully utilized.
+ */
+static inline unsigned int hmp_task_starvation(struct sched_entity *se)
+{
+ u32 starvation;
+
+ starvation = se->avg.usage_avg_sum * scale_load_down(NICE_0_LOAD);
+ starvation /= (se->avg.runnable_avg_sum + 1);
+
+ return scale_load(starvation);
+}
+
+static inline unsigned int hmp_offload_down(int cpu, struct sched_entity *se)
+{
+ int min_usage;
+ int dest_cpu = NR_CPUS;
+
+ if (hmp_cpu_is_slowest(cpu))
+ return NR_CPUS;
+
+ /* Is the current domain fully loaded? */
+ /* load < ~94% */
+ min_usage = hmp_domain_min_load(hmp_cpu_domain(cpu), NULL);
+ if (min_usage < NICE_0_LOAD-64)
+ return NR_CPUS;
+
+ /* Is the cpu oversubscribed? */
+ /* load < ~194% */
+ if (cpu_rq(cpu)->cfs.tg_load_contrib < 2*NICE_0_LOAD-64)
+ return NR_CPUS;
+
+ /* Is the task alone on the cpu? */
+ if (cpu_rq(cpu)->cfs.nr_running < 2)
+ return NR_CPUS;
+
+ /* Is the task actually starving? */
+ if (hmp_task_starvation(se) > 768) /* <25% waiting */
+ return NR_CPUS;
+
+ /* Does the slower domain have spare cycles? */
+ min_usage = hmp_domain_min_load(hmp_slower_domain(cpu), &dest_cpu);
+ /* load > 50% */
+ if (min_usage > NICE_0_LOAD/2)
+ return NR_CPUS;
+
+ if (cpumask_test_cpu(dest_cpu, &hmp_slower_domain(cpu)->cpus))
+ return dest_cpu;
+ return NR_CPUS;
+}
+#endif /* CONFIG_SCHED_HMP */
+
/*
* sched_balance_self: balance the current task (running on cpu) in domains
* that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
@@ -3390,6 +3959,24 @@ select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
unlock:
rcu_read_unlock();
+#ifdef CONFIG_SCHED_HMP
+ if (hmp_up_migration(prev_cpu, &p->se)) {
+ new_cpu = hmp_select_faster_cpu(p, prev_cpu);
+ hmp_next_up_delay(&p->se, new_cpu);
+ trace_sched_hmp_migrate(p, new_cpu, 0);
+ return new_cpu;
+ }
+ if (hmp_down_migration(prev_cpu, &p->se)) {
+ new_cpu = hmp_select_slower_cpu(p, prev_cpu);
+ hmp_next_down_delay(&p->se, new_cpu);
+ trace_sched_hmp_migrate(p, new_cpu, 0);
+ return new_cpu;
+ }
+ /* Make sure that the task stays in its previous hmp domain */
+ if (!cpumask_test_cpu(new_cpu, &hmp_cpu_domain(prev_cpu)->cpus))
+ return prev_cpu;
+#endif
+
return new_cpu;
}
@@ -3916,7 +4503,6 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
* 1) task is cache cold, or
* 2) too many balance attempts have failed.
*/
-
tsk_cache_hot = task_hot(p, env->src_rq->clock_task, env->sd);
if (!tsk_cache_hot ||
env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
@@ -5650,6 +6236,286 @@ need_kick:
static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
#endif
+#ifdef CONFIG_SCHED_HMP
+/* Check if task should migrate to a faster cpu */
+static unsigned int hmp_up_migration(int cpu, struct sched_entity *se)
+{
+ struct task_struct *p = task_of(se);
+ struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs;
+ u64 now;
+
+ if (hmp_cpu_is_fastest(cpu))
+ return 0;
+
+#ifdef CONFIG_SCHED_HMP_PRIO_FILTER
+ /* Filter by task priority */
+ if (p->prio >= hmp_up_prio)
+ return 0;
+#endif
+
+ /* Let the task load settle before doing another up migration */
+ now = cfs_rq_clock_task(cfs_rq);
+ if (((now - se->avg.hmp_last_up_migration) >> 10)
+ < hmp_next_up_threshold)
+ return 0;
+
+ if (se->avg.load_avg_ratio > hmp_up_threshold) {
+ /* Target domain load < ~94% */
+ if (hmp_domain_min_load(hmp_faster_domain(cpu), NULL)
+ > NICE_0_LOAD-64)
+ return 0;
+ if (cpumask_intersects(&hmp_faster_domain(cpu)->cpus,
+ tsk_cpus_allowed(p)))
+ return 1;
+ }
+ return 0;
+}
+
+/* Check if task should migrate to a slower cpu */
+static unsigned int hmp_down_migration(int cpu, struct sched_entity *se)
+{
+ struct task_struct *p = task_of(se);
+ struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs;
+ u64 now;
+
+ if (hmp_cpu_is_slowest(cpu))
+ return 0;
+
+#ifdef CONFIG_SCHED_HMP_PRIO_FILTER
+ /* Filter by task priority */
+ if ((p->prio >= hmp_up_prio) &&
+ cpumask_intersects(&hmp_slower_domain(cpu)->cpus,
+ tsk_cpus_allowed(p))) {
+ return 1;
+ }
+#endif
+
+ /* Let the task load settle before doing another down migration */
+ now = cfs_rq_clock_task(cfs_rq);
+ if (((now - se->avg.hmp_last_down_migration) >> 10)
+ < hmp_next_down_threshold)
+ return 0;
+
+ if (cpumask_intersects(&hmp_slower_domain(cpu)->cpus,
+ tsk_cpus_allowed(p))
+ && se->avg.load_avg_ratio < hmp_down_threshold) {
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * hmp_can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
+ * Ideally this function should be merged with can_migrate_task() to avoid
+ * redundant code.
+ */
+static int hmp_can_migrate_task(struct task_struct *p, struct lb_env *env)
+{
+ int tsk_cache_hot = 0;
+
+ /*
+ * We do not migrate tasks that are:
+ * 1) running (obviously), or
+ * 2) cannot be migrated to this CPU due to cpus_allowed
+ */
+ if (!cpumask_test_cpu(env->dst_cpu, tsk_cpus_allowed(p))) {
+ schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
+ return 0;
+ }
+ env->flags &= ~LBF_ALL_PINNED;
+
+ if (task_running(env->src_rq, p)) {
+ schedstat_inc(p, se.statistics.nr_failed_migrations_running);
+ return 0;
+ }
+
+ /*
+ * Aggressive migration if:
+ * 1) task is cache cold, or
+ * 2) too many balance attempts have failed.
+ */
+
+ tsk_cache_hot = task_hot(p, env->src_rq->clock_task, env->sd);
+ if (!tsk_cache_hot ||
+ env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
+#ifdef CONFIG_SCHEDSTATS
+ if (tsk_cache_hot) {
+ schedstat_inc(env->sd, lb_hot_gained[env->idle]);
+ schedstat_inc(p, se.statistics.nr_forced_migrations);
+ }
+#endif
+ return 1;
+ }
+
+ return 1;
+}
+
+/*
+ * move_specific_task tries to move a specific task.
+ * Returns 1 if successful and 0 otherwise.
+ * Called with both runqueues locked.
+ */
+static int move_specific_task(struct lb_env *env, struct task_struct *pm)
+{
+ struct task_struct *p, *n;
+
+ list_for_each_entry_safe(p, n, &env->src_rq->cfs_tasks, se.group_node) {
+ if (throttled_lb_pair(task_group(p), env->src_rq->cpu,
+ env->dst_cpu))
+ continue;
+
+ if (!hmp_can_migrate_task(p, env))
+ continue;
+ /* Check if we found the right task */
+ if (p != pm)
+ continue;
+
+ move_task(p, env);
+ /*
+ * Right now, this is only the third place move_task()
+ * is called, so we can safely collect move_task()
+ * stats here rather than inside move_task().
+ */
+ schedstat_inc(env->sd, lb_gained[env->idle]);
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * hmp_active_task_migration_cpu_stop is run by cpu stopper and used to
+ * migrate a specific task from one runqueue to another.
+ * hmp_force_up_migration uses this to push a currently running task
+ * off a runqueue.
+ * Based on active_load_balance_stop_cpu and can potentially be merged.
+ */
+static int hmp_active_task_migration_cpu_stop(void *data)
+{
+ struct rq *busiest_rq = data;
+ struct task_struct *p = busiest_rq->migrate_task;
+ int busiest_cpu = cpu_of(busiest_rq);
+ int target_cpu = busiest_rq->push_cpu;
+ struct rq *target_rq = cpu_rq(target_cpu);
+ struct sched_domain *sd;
+
+ raw_spin_lock_irq(&busiest_rq->lock);
+ /* make sure the requested cpu hasn't gone down in the meantime */
+ if (unlikely(busiest_cpu != smp_processor_id() ||
+ !busiest_rq->active_balance)) {
+ goto out_unlock;
+ }
+ /* Is there any task to move? */
+ if (busiest_rq->nr_running <= 1)
+ goto out_unlock;
+ /* Task has migrated meanwhile, abort forced migration */
+ if (task_rq(p) != busiest_rq)
+ goto out_unlock;
+ /*
+ * This condition is "impossible", if it occurs
+ * we need to fix it. Originally reported by
+ * Bjorn Helgaas on a 128-cpu setup.
+ */
+ BUG_ON(busiest_rq == target_rq);
+
+ /* move a task from busiest_rq to target_rq */
+ double_lock_balance(busiest_rq, target_rq);
+
+ /* Search for an sd spanning us and the target CPU. */
+ rcu_read_lock();
+ for_each_domain(target_cpu, sd) {
+ if (cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
+ break;
+ }
+
+ if (likely(sd)) {
+ struct lb_env env = {
+ .sd = sd,
+ .dst_cpu = target_cpu,
+ .dst_rq = target_rq,
+ .src_cpu = busiest_rq->cpu,
+ .src_rq = busiest_rq,
+ .idle = CPU_IDLE,
+ };
+
+ schedstat_inc(sd, alb_count);
+
+ if (move_specific_task(&env, p))
+ schedstat_inc(sd, alb_pushed);
+ else
+ schedstat_inc(sd, alb_failed);
+ }
+ rcu_read_unlock();
+ double_unlock_balance(busiest_rq, target_rq);
+out_unlock:
+ busiest_rq->active_balance = 0;
+ raw_spin_unlock_irq(&busiest_rq->lock);
+ return 0;
+}
+
+static DEFINE_SPINLOCK(hmp_force_migration);
+
+/*
+ * hmp_force_up_migration checks runqueues for tasks that need to
+ * be actively migrated to a faster cpu.
+ */
+static void hmp_force_up_migration(int this_cpu)
+{
+ int cpu;
+ struct sched_entity *curr;
+ struct rq *target;
+ unsigned long flags;
+ unsigned int force;
+ struct task_struct *p;
+
+ if (!spin_trylock(&hmp_force_migration))
+ return;
+ for_each_online_cpu(cpu) {
+ force = 0;
+ target = cpu_rq(cpu);
+ raw_spin_lock_irqsave(&target->lock, flags);
+ curr = target->cfs.curr;
+ if (!curr || !entity_is_task(curr)) {
+ raw_spin_unlock_irqrestore(&target->lock, flags);
+ continue;
+ }
+ p = task_of(curr);
+ if (hmp_up_migration(cpu, curr)) {
+ if (!target->active_balance) {
+ target->active_balance = 1;
+ target->push_cpu = hmp_select_faster_cpu(p, cpu);
+ target->migrate_task = p;
+ force = 1;
+ trace_sched_hmp_migrate(p, target->push_cpu, 1);
+ hmp_next_up_delay(&p->se, target->push_cpu);
+ }
+ }
+ if (!force && !target->active_balance) {
+ /*
+ * For now we just check the currently running task.
+ * Selecting the lightest task for offloading will
+ * require extensive book keeping.
+ */
+ target->push_cpu = hmp_offload_down(cpu, curr);
+ if (target->push_cpu < NR_CPUS) {
+ target->active_balance = 1;
+ target->migrate_task = p;
+ force = 1;
+ trace_sched_hmp_migrate(p, target->push_cpu, 2);
+ hmp_next_down_delay(&p->se, target->push_cpu);
+ }
+ }
+ raw_spin_unlock_irqrestore(&target->lock, flags);
+ if (force)
+ stop_one_cpu_nowait(cpu_of(target),
+ hmp_active_task_migration_cpu_stop,
+ target, &target->active_balance_work);
+ }
+ spin_unlock(&hmp_force_migration);
+}
+#else
+static void hmp_force_up_migration(int this_cpu) { }
+#endif /* CONFIG_SCHED_HMP */
+
/*
* run_rebalance_domains is triggered when needed from the scheduler tick.
* Also triggered for nohz idle balancing (with nohz_balancing_kick set).
@@ -5661,6 +6527,8 @@ static void run_rebalance_domains(struct softirq_action *h)
enum cpu_idle_type idle = this_rq->idle_balance ?
CPU_IDLE : CPU_NOT_IDLE;
+ hmp_force_up_migration(this_cpu);
+
rebalance_domains(this_cpu, idle);
/*
@@ -6160,6 +7028,139 @@ __init void init_sched_fair_class(void)
zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
cpu_notifier(sched_ilb_notifier, 0);
#endif
+
+#ifdef CONFIG_SCHED_HMP
+ hmp_cpu_mask_setup();
+#endif
#endif /* SMP */
}
+
+#ifdef CONFIG_HMP_FREQUENCY_INVARIANT_SCALE
+static u32 cpufreq_calc_scale(u32 min, u32 max, u32 curr)
+{
+ u32 result = curr / max;
+ return result;
+}
+
+/* Called when the CPU Frequency is changed.
+ * Once for each CPU.
+ */
+static int cpufreq_callback(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ struct cpufreq_freqs *freq = data;
+ int cpu = freq->cpu;
+ struct cpufreq_extents *extents;
+
+ if (freq->flags & CPUFREQ_CONST_LOOPS)
+ return NOTIFY_OK;
+
+ if (val != CPUFREQ_POSTCHANGE)
+ return NOTIFY_OK;
+
+ /* if dynamic load scale is disabled, set the load scale to 1.0 */
+ if (!hmp_data.freqinvar_load_scale_enabled) {
+ freq_scale[cpu].curr_scale = 1024;
+ return NOTIFY_OK;
+ }
+
+ extents = &freq_scale[cpu];
+ if (extents->flags & SCHED_LOAD_FREQINVAR_SINGLEFREQ) {
+ /* If our governor was recognised as a single-freq governor,
+ * use 1.0
+ */
+ extents->curr_scale = 1024;
+ } else {
+ extents->curr_scale = cpufreq_calc_scale(extents->min,
+ extents->max, freq->new);
+ }
+
+ return NOTIFY_OK;
+}
+
+/* Called when the CPUFreq governor is changed.
+ * Only called for the CPUs which are actually changed by the
+ * userspace.
+ */
+static int cpufreq_policy_callback(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct cpufreq_policy *policy = data;
+ struct cpufreq_extents *extents;
+ int cpu, singleFreq = 0;
+ static const char performance_governor[] = "performance";
+ static const char powersave_governor[] = "powersave";
+
+ if (event == CPUFREQ_START)
+ return 0;
+
+ if (event != CPUFREQ_INCOMPATIBLE)
+ return 0;
+
+ /* CPUFreq governors do not accurately report the range of
+ * CPU Frequencies they will choose from.
+ * We recognise performance and powersave governors as
+ * single-frequency only.
+ */
+ if (!strncmp(policy->governor->name, performance_governor,
+ strlen(performance_governor)) ||
+ !strncmp(policy->governor->name, powersave_governor,
+ strlen(powersave_governor)))
+ singleFreq = 1;
+
+ /* Make sure that all CPUs impacted by this policy are
+ * updated since we will only get a notification when the
+ * user explicitly changes the policy on a CPU.
+ */
+ for_each_cpu(cpu, policy->cpus) {
+ extents = &freq_scale[cpu];
+ extents->max = policy->max >> SCHED_FREQSCALE_SHIFT;
+ extents->min = policy->min >> SCHED_FREQSCALE_SHIFT;
+ if (!hmp_data.freqinvar_load_scale_enabled) {
+ extents->curr_scale = 1024;
+ } else if (singleFreq) {
+ extents->flags |= SCHED_LOAD_FREQINVAR_SINGLEFREQ;
+ extents->curr_scale = 1024;
+ } else {
+ extents->flags &= ~SCHED_LOAD_FREQINVAR_SINGLEFREQ;
+ extents->curr_scale = cpufreq_calc_scale(extents->min,
+ extents->max, policy->cur);
+ }
+ }
+
+ return 0;
+}
+
+static struct notifier_block cpufreq_notifier = {
+ .notifier_call = cpufreq_callback,
+};
+static struct notifier_block cpufreq_policy_notifier = {
+ .notifier_call = cpufreq_policy_callback,
+};
+
+static int __init register_sched_cpufreq_notifier(void)
+{
+ int ret = 0;
+
+ /* init safe defaults since there are no policies at registration */
+ for (ret = 0; ret < CONFIG_NR_CPUS; ret++) {
+ /* safe defaults */
+ freq_scale[ret].max = 1024;
+ freq_scale[ret].min = 1024;
+ freq_scale[ret].curr_scale = 1024;
+ }
+
+ pr_info("sched: registering cpufreq notifiers for scale-invariant loads\n");
+ ret = cpufreq_register_notifier(&cpufreq_policy_notifier,
+ CPUFREQ_POLICY_NOTIFIER);
+
+ if (ret != -EINVAL)
+ ret = cpufreq_register_notifier(&cpufreq_notifier,
+ CPUFREQ_TRANSITION_NOTIFIER);
+
+ return ret;
+}
+
+core_initcall(register_sched_cpufreq_notifier);
+#endif /* CONFIG_HMP_FREQUENCY_INVARIANT_SCALE */
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index cc03cfdf469..5affd9cf0a0 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -115,7 +115,7 @@ struct task_group {
atomic_t load_weight;
atomic64_t load_avg;
- atomic_t runnable_avg;
+ atomic_t runnable_avg, usage_avg;
#endif
#ifdef CONFIG_RT_GROUP_SCHED
@@ -245,7 +245,7 @@ struct cfs_rq {
#endif /* CONFIG_FAIR_GROUP_SCHED */
/* These always depend on CONFIG_FAIR_GROUP_SCHED */
#ifdef CONFIG_FAIR_GROUP_SCHED
- u32 tg_runnable_contrib;
+ u32 tg_runnable_contrib, tg_usage_contrib;
u64 tg_load_contrib;
#endif /* CONFIG_FAIR_GROUP_SCHED */
@@ -427,6 +427,9 @@ struct rq {
int active_balance;
int push_cpu;
struct cpu_stop_work active_balance_work;
+#ifdef CONFIG_SCHED_HMP
+ struct task_struct *migrate_task;
+#endif
/* cpu of this runqueue: */
int cpu;
int online;
@@ -549,6 +552,12 @@ DECLARE_PER_CPU(int, sd_llc_id);
extern int group_balance_cpu(struct sched_group *sg);
+#ifdef CONFIG_SCHED_HMP
+static LIST_HEAD(hmp_domains);
+DECLARE_PER_CPU(struct hmp_domain *, hmp_cpu_domain);
+#define hmp_cpu_domain(cpu) (per_cpu(hmp_cpu_domain, (cpu)))
+#endif /* CONFIG_SCHED_HMP */
+
#endif /* CONFIG_SMP */
#include "stats.h"
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 14d7758074a..d93dcb169ab 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -195,8 +195,12 @@ void local_bh_enable_ip(unsigned long ip)
EXPORT_SYMBOL(local_bh_enable_ip);
/*
- * We restart softirq processing for at most 2 ms,
- * and if need_resched() is not set.
+ * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
+ * but break the loop if need_resched() is set or after 2 ms.
+ * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
+ * certain cases, such as stop_machine(), jiffies may cease to
+ * increment and so we need the MAX_SOFTIRQ_RESTART limit as
+ * well to make sure we eventually return from this method.
*
* These limits have been established via experimentation.
* The two things to balance is latency against fairness -
@@ -204,6 +208,7 @@ EXPORT_SYMBOL(local_bh_enable_ip);
* should not be able to lock up the box.
*/
#define MAX_SOFTIRQ_TIME msecs_to_jiffies(2)
+#define MAX_SOFTIRQ_RESTART 10
asmlinkage void __do_softirq(void)
{
@@ -212,6 +217,7 @@ asmlinkage void __do_softirq(void)
unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
int cpu;
unsigned long old_flags = current->flags;
+ int max_restart = MAX_SOFTIRQ_RESTART;
/*
* Mask out PF_MEMALLOC s current task context is borrowed for the
@@ -265,7 +271,8 @@ restart:
pending = local_softirq_pending();
if (pending) {
- if (time_before(jiffies, end) && !need_resched())
+ if (time_before(jiffies, end) && !need_resched() &&
+ --max_restart)
goto restart;
wakeup_softirqd();
diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig
index 24510d84efd..b69692250af 100644
--- a/kernel/time/Kconfig
+++ b/kernel/time/Kconfig
@@ -12,11 +12,6 @@ config CLOCKSOURCE_WATCHDOG
config ARCH_CLOCKSOURCE_DATA
bool
-# Platforms has a persistent clock
-config ALWAYS_USE_PERSISTENT_CLOCK
- bool
- default n
-
# Timekeeping vsyscall support
config GENERIC_TIME_VSYSCALL
bool
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 7f32fe0e52c..90ad470ee8d 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -67,6 +67,8 @@ static void tick_broadcast_start_periodic(struct clock_event_device *bc)
*/
int tick_check_broadcast_device(struct clock_event_device *dev)
{
+ struct clock_event_device *cur = tick_broadcast_device.evtdev;
+
if ((dev->features & CLOCK_EVT_FEAT_DUMMY) ||
(tick_broadcast_device.evtdev &&
tick_broadcast_device.evtdev->rating >= dev->rating) ||
@@ -74,6 +76,8 @@ int tick_check_broadcast_device(struct clock_event_device *dev)
return 0;
clockevents_exchange_device(tick_broadcast_device.evtdev, dev);
+ if (cur)
+ cur->event_handler = clockevents_handle_noop;
tick_broadcast_device.evtdev = dev;
if (!cpumask_empty(tick_get_broadcast_mask()))
tick_broadcast_start_periodic(dev);
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index b1600a6973f..7076b3f53e8 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -323,6 +323,7 @@ static void tick_shutdown(unsigned int *cpup)
*/
dev->mode = CLOCK_EVT_MODE_UNUSED;
clockevents_exchange_device(dev, NULL);
+ dev->event_handler = clockevents_handle_noop;
td->evtdev = NULL;
}
raw_spin_unlock_irqrestore(&tick_device_lock, flags);
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index a19a39952c1..e717ad9f141 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -904,7 +904,7 @@ void tick_cancel_sched_timer(int cpu)
hrtimer_cancel(&ts->sched_timer);
# endif
- ts->nohz_mode = NOHZ_MODE_INACTIVE;
+ memset(ts, 0, sizeof(*ts));
}
#endif
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 9a0bc98fbe1..183df62de9e 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -826,6 +826,14 @@ static int timekeeping_suspend(void)
read_persistent_clock(&timekeeping_suspend_time);
+ /*
+ * On some systems the persistent_clock can not be detected at
+ * timekeeping_init by its return value, so if we see a valid
+ * value returned, update the persistent_clock_exists flag.
+ */
+ if (timekeeping_suspend_time.tv_sec || timekeeping_suspend_time.tv_nsec)
+ persistent_clock_exist = true;
+
write_seqlock_irqsave(&tk->lock, flags);
timekeeping_forward_now(tk);
timekeeping_suspended = 1;
diff --git a/kernel/timer.c b/kernel/timer.c
index dbf7a78a1ef..1b399c89e3a 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1678,12 +1678,12 @@ static int __cpuinit init_timers_cpu(int cpu)
boot_done = 1;
base = &boot_tvec_bases;
}
+ spin_lock_init(&base->lock);
tvec_base_done[cpu] = 1;
} else {
base = per_cpu(tvec_bases, cpu);
}
- spin_lock_init(&base->lock);
for (j = 0; j < TVN_SIZE; j++) {
INIT_LIST_HEAD(base->tv5.vec + j);
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index b3fde6d7b7f..0a0e2a6da4f 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -676,7 +676,7 @@ int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
- for (i = 0; i < pages; i++) {
+ for (i = 1; i < pages; i++) {
pg->next = (void *)get_zeroed_page(GFP_KERNEL);
if (!pg->next)
goto out_free;
@@ -3736,7 +3736,8 @@ out:
if (fail)
return -EINVAL;
- ftrace_graph_filter_enabled = 1;
+ ftrace_graph_filter_enabled = !!(*idx);
+
return 0;
}
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 0308e572cda..0370adacb39 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -5119,6 +5119,8 @@ static __init int tracer_init_debugfs(void)
trace_access_lock_init();
d_tracer = tracing_init_dentry();
+ if (!d_tracer)
+ return 0;
trace_create_file("trace_options", 0644, d_tracer,
NULL, &tracing_iter_fops);
@@ -5260,36 +5262,32 @@ void trace_init_global_iter(struct trace_iterator *iter)
iter->cpu_file = TRACE_PIPE_ALL_CPU;
}
-static void
-__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
+void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
{
- static arch_spinlock_t ftrace_dump_lock =
- (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
/* use static because iter can be a bit big for the stack */
static struct trace_iterator iter;
+ static atomic_t dump_running;
unsigned int old_userobj;
- static int dump_ran;
unsigned long flags;
int cnt = 0, cpu;
- /* only one dump */
- local_irq_save(flags);
- arch_spin_lock(&ftrace_dump_lock);
- if (dump_ran)
- goto out;
-
- dump_ran = 1;
+ /* Only allow one dump user at a time. */
+ if (atomic_inc_return(&dump_running) != 1) {
+ atomic_dec(&dump_running);
+ return;
+ }
+ /*
+ * Always turn off tracing when we dump.
+ * We don't need to show trace output of what happens
+ * between multiple crashes.
+ *
+ * If the user does a sysrq-z, then they can re-enable
+ * tracing with echo 1 > tracing_on.
+ */
tracing_off();
- /* Did function tracer already get disabled? */
- if (ftrace_is_dead()) {
- printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
- printk("# MAY BE MISSING FUNCTION EVENTS\n");
- }
-
- if (disable_tracing)
- ftrace_kill();
+ local_irq_save(flags);
/* Simulate the iterator */
trace_init_global_iter(&iter);
@@ -5319,6 +5317,12 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
printk(KERN_TRACE "Dumping ftrace buffer:\n");
+ /* Did function tracer already get disabled? */
+ if (ftrace_is_dead()) {
+ printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
+ printk("# MAY BE MISSING FUNCTION EVENTS\n");
+ }
+
/*
* We need to stop all tracing on all CPUS to read the
* the next buffer. This is a bit expensive, but is
@@ -5358,26 +5362,14 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
printk(KERN_TRACE "---------------------------------\n");
out_enable:
- /* Re-enable tracing if requested */
- if (!disable_tracing) {
- trace_flags |= old_userobj;
+ trace_flags |= old_userobj;
- for_each_tracing_cpu(cpu) {
- atomic_dec(&iter.tr->data[cpu]->disabled);
- }
- tracing_on();
+ for_each_tracing_cpu(cpu) {
+ atomic_dec(&iter.tr->data[cpu]->disabled);
}
-
- out:
- arch_spin_unlock(&ftrace_dump_lock);
+ atomic_dec(&dump_running);
local_irq_restore(flags);
}
-
-/* By default: disable tracing after the dump */
-void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
-{
- __ftrace_dump(true, oops_dump_mode);
-}
EXPORT_SYMBOL_GPL(ftrace_dump);
__init static int tracer_alloc_buffers(void)
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index e5b0ca8b8d4..5a8a53e67f8 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -777,7 +777,11 @@ static int filter_set_pred(struct event_filter *filter,
static void __free_preds(struct event_filter *filter)
{
+ int i;
+
if (filter->preds) {
+ for (i = 0; i < filter->n_preds; i++)
+ kfree(filter->preds[i].ops);
kfree(filter->preds);
filter->preds = NULL;
}
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index 51c819c12c2..eedc2978a88 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -703,8 +703,6 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
/* Maximum number of functions to trace before diagnosing a hang */
#define GRAPH_MAX_FUNC_TEST 100000000
-static void
-__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode);
static unsigned int graph_hang_thresh;
/* Wrap the real function entry probe to avoid possible hanging */
@@ -714,8 +712,11 @@ static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
ftrace_graph_stop();
printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
- if (ftrace_dump_on_oops)
- __ftrace_dump(false, DUMP_ALL);
+ if (ftrace_dump_on_oops) {
+ ftrace_dump(DUMP_ALL);
+ /* ftrace_dump() disables tracing */
+ tracing_on();
+ }
return 0;
}
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 83a8b5b7bd3..b20428c5efe 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -20,13 +20,24 @@
#define STACK_TRACE_ENTRIES 500
+#ifdef CC_USING_FENTRY
+# define fentry 1
+#else
+# define fentry 0
+#endif
+
static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
{ [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
static unsigned stack_dump_index[STACK_TRACE_ENTRIES];
+/*
+ * Reserve one entry for the passed in ip. This will allow
+ * us to remove most or all of the stack size overhead
+ * added by the stack tracer itself.
+ */
static struct stack_trace max_stack_trace = {
- .max_entries = STACK_TRACE_ENTRIES,
- .entries = stack_dump_trace,
+ .max_entries = STACK_TRACE_ENTRIES - 1,
+ .entries = &stack_dump_trace[1],
};
static unsigned long max_stack_size;
@@ -39,25 +50,34 @@ static DEFINE_MUTEX(stack_sysctl_mutex);
int stack_tracer_enabled;
static int last_stack_tracer_enabled;
-static inline void check_stack(void)
+static inline void
+check_stack(unsigned long ip, unsigned long *stack)
{
unsigned long this_size, flags;
unsigned long *p, *top, *start;
+ static int tracer_frame;
+ int frame_size = ACCESS_ONCE(tracer_frame);
int i;
- this_size = ((unsigned long)&this_size) & (THREAD_SIZE-1);
+ this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
this_size = THREAD_SIZE - this_size;
+ /* Remove the frame of the tracer */
+ this_size -= frame_size;
if (this_size <= max_stack_size)
return;
/* we do not handle interrupt stacks yet */
- if (!object_is_on_stack(&this_size))
+ if (!object_is_on_stack(stack))
return;
local_irq_save(flags);
arch_spin_lock(&max_stack_lock);
+ /* In case another CPU set the tracer_frame on us */
+ if (unlikely(!frame_size))
+ this_size -= tracer_frame;
+
/* a race could have already updated it */
if (this_size <= max_stack_size)
goto out;
@@ -70,10 +90,18 @@ static inline void check_stack(void)
save_stack_trace(&max_stack_trace);
/*
+ * Add the passed in ip from the function tracer.
+ * Searching for this on the stack will skip over
+ * most of the overhead from the stack tracer itself.
+ */
+ stack_dump_trace[0] = ip;
+ max_stack_trace.nr_entries++;
+
+ /*
* Now find where in the stack these are.
*/
i = 0;
- start = &this_size;
+ start = stack;
top = (unsigned long *)
(((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
@@ -97,6 +125,18 @@ static inline void check_stack(void)
found = 1;
/* Start the search from here */
start = p + 1;
+ /*
+ * We do not want to show the overhead
+ * of the stack tracer stack in the
+ * max stack. If we haven't figured
+ * out what that is, then figure it out
+ * now.
+ */
+ if (unlikely(!tracer_frame) && i == 1) {
+ tracer_frame = (p - stack) *
+ sizeof(unsigned long);
+ max_stack_size -= tracer_frame;
+ }
}
}
@@ -113,6 +153,7 @@ static void
stack_trace_call(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *pt_regs)
{
+ unsigned long stack;
int cpu;
preempt_disable_notrace();
@@ -122,7 +163,26 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip,
if (per_cpu(trace_active, cpu)++ != 0)
goto out;
- check_stack();
+ /*
+ * When fentry is used, the traced function does not get
+ * its stack frame set up, and we lose the parent.
+ * The ip is pretty useless because the function tracer
+ * was called before that function set up its stack frame.
+ * In this case, we use the parent ip.
+ *
+ * By adding the return address of either the parent ip
+ * or the current ip we can disregard most of the stack usage
+ * caused by the stack tracer itself.
+ *
+ * The function tracer always reports the address of where the
+ * mcount call was, but the stack will hold the return address.
+ */
+ if (fentry)
+ ip = parent_ip;
+ else
+ ip += MCOUNT_INSN_SIZE;
+
+ check_stack(ip, &stack);
out:
per_cpu(trace_active, cpu)--;
@@ -371,6 +431,8 @@ static __init int stack_trace_init(void)
struct dentry *d_tracer;
d_tracer = tracing_init_dentry();
+ if (!d_tracer)
+ return 0;
trace_create_file("stack_max_size", 0644, d_tracer,
&max_stack_size, &stack_max_size_fops);
diff --git a/kernel/trace/trace_stat.c b/kernel/trace/trace_stat.c
index 96cffb269e7..847f88a6194 100644
--- a/kernel/trace/trace_stat.c
+++ b/kernel/trace/trace_stat.c
@@ -307,6 +307,8 @@ static int tracing_stat_init(void)
struct dentry *d_tracing;
d_tracing = tracing_init_dentry();
+ if (!d_tracing)
+ return 0;
stat_dir = debugfs_create_dir("trace_stat", d_tracing);
if (!stat_dir)
diff --git a/lib/klist.c b/lib/klist.c
index 0874e41609a..358a368a294 100644
--- a/lib/klist.c
+++ b/lib/klist.c
@@ -193,10 +193,10 @@ static void klist_release(struct kref *kref)
if (waiter->node != n)
continue;
+ list_del(&waiter->list);
waiter->woken = 1;
mb();
wake_up_process(waiter->process);
- list_del(&waiter->list);
}
spin_unlock(&klist_remove_lock);
knode_set_klist(n, NULL);
diff --git a/lib/oid_registry.c b/lib/oid_registry.c
index d8de11f4590..318f382a010 100644
--- a/lib/oid_registry.c
+++ b/lib/oid_registry.c
@@ -9,6 +9,7 @@
* 2 of the Licence, or (at your option) any later version.
*/
+#include <linux/module.h>
#include <linux/export.h>
#include <linux/oid_registry.h>
#include <linux/kernel.h>
@@ -16,6 +17,10 @@
#include <linux/bug.h>
#include "oid_registry_data.c"
+MODULE_DESCRIPTION("OID Registry");
+MODULE_AUTHOR("Red Hat, Inc.");
+MODULE_LICENSE("GPL");
+
/**
* look_up_OID - Find an OID registration for the specified data
* @data: Binary representation of the OID
diff --git a/linaro/configs/android.conf b/linaro/configs/android.conf
new file mode 100644
index 00000000000..bb90ecd9e16
--- /dev/null
+++ b/linaro/configs/android.conf
@@ -0,0 +1,31 @@
+CONFIG_IPV6=y
+# CONFIG_IPV6_SIT is not set
+CONFIG_PANIC_TIMEOUT=0
+CONFIG_HAS_WAKELOCK=y
+CONFIG_WAKELOCK=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_DM_CRYPT=y
+CONFIG_POWER_SUPPLY=y
+CONFIG_ANDROID_PARANOID_NETWORK=y
+CONFIG_NET_ACTIVITY_STATS=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_UINPUT=y
+CONFIG_INPUT_GPIO=y
+CONFIG_USB_G_ANDROID=y
+CONFIG_SWITCH=y
+CONFIG_STAGING=y
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_ASHMEM=y
+CONFIG_ANDROID_LOGGER=y
+CONFIG_ANDROID_TIMED_OUTPUT=y
+CONFIG_ANDROID_TIMED_GPIO=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_ANDROID_INTF_ALARM_DEV=y
+CONFIG_CRYPTO_TWOFISH=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=16384
+CONFIG_FUSE_FS=y
+CONFIG_CPU_FREQ_GOV_INTERACTIVE=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE=y
diff --git a/linaro/configs/big-LITTLE-IKS.conf b/linaro/configs/big-LITTLE-IKS.conf
new file mode 100644
index 00000000000..b067fde86ea
--- /dev/null
+++ b/linaro/configs/big-LITTLE-IKS.conf
@@ -0,0 +1,5 @@
+CONFIG_BIG_LITTLE=y
+CONFIG_BL_SWITCHER=y
+CONFIG_ARM_DT_BL_CPUFREQ=y
+CONFIG_ARM_VEXPRESS_BL_CPUFREQ=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
diff --git a/linaro/configs/big-LITTLE-MP.conf b/linaro/configs/big-LITTLE-MP.conf
new file mode 100644
index 00000000000..8cc2be049a4
--- /dev/null
+++ b/linaro/configs/big-LITTLE-MP.conf
@@ -0,0 +1,13 @@
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_FAIR_GROUP_SCHED=y
+CONFIG_NO_HZ=y
+CONFIG_SCHED_MC=y
+CONFIG_DISABLE_CPU_SCHED_DOMAIN_BALANCE=y
+CONFIG_SCHED_HMP=y
+CONFIG_HMP_FAST_CPU_MASK=""
+CONFIG_HMP_SLOW_CPU_MASK=""
+CONFIG_HMP_VARIABLE_SCALE=y
+CONFIG_HMP_FREQUENCY_INVARIANT_SCALE=y
+CONFIG_SCHED_HMP_PRIO_FILTER=y
+CONFIG_SCHED_HMP_PRIO_FILTER_VAL=5
diff --git a/linaro/configs/debug.conf b/linaro/configs/debug.conf
new file mode 100644
index 00000000000..36980566b2d
--- /dev/null
+++ b/linaro/configs/debug.conf
@@ -0,0 +1 @@
+CONFIG_PROVE_LOCKING=y
diff --git a/linaro/configs/linaro-base.conf b/linaro/configs/linaro-base.conf
new file mode 100644
index 00000000000..5c748a75dfe
--- /dev/null
+++ b/linaro/configs/linaro-base.conf
@@ -0,0 +1,88 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=16
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EMBEDDED=y
+CONFIG_HOTPLUG=y
+CONFIG_PERF_EVENTS=y
+CONFIG_SLAB=y
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_SMP=y
+CONFIG_SCHED_MC=y
+CONFIG_SCHED_SMT=y
+CONFIG_THUMB2_KERNEL=y
+CONFIG_AEABI=y
+# CONFIG_OABI_COMPAT is not set
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
+CONFIG_CPU_IDLE=y
+CONFIG_BINFMT_MISC=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_NET_KEY=y
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+# CONFIG_INET_LRO is not set
+CONFIG_NETFILTER=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_CONNECTOR=y
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_OOPS=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_NAND=y
+CONFIG_NETDEVICES=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS=y
+CONFIG_BTRFS_FS=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_ECRYPT_FS=y
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_SUMMARY=y
+CONFIG_JFFS2_FS_XATTR=y
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
+CONFIG_JFFS2_LZO=y
+CONFIG_JFFS2_RUBIN=y
+CONFIG_CRAMFS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_FS=y
+CONFIG_SCHEDSTATS=y
+CONFIG_TIMER_STATS=y
+CONFIG_KEYS=y
+CONFIG_CRYPTO_MICHAEL_MIC=y
+CONFIG_CRC_CCITT=y
+CONFIG_CRC_T10DIF=y
+CONFIG_CRC_ITU_T=y
+CONFIG_CRC7=y
+CONFIG_HW_PERF_EVENTS=y
+CONFIG_FUNCTION_TRACER=y
+CONFIG_ENABLE_DEFAULT_TRACERS=y
+CONFIG_PROC_DEVICETREE=y
diff --git a/linaro/configs/ubuntu-minimal.conf b/linaro/configs/ubuntu-minimal.conf
new file mode 100644
index 00000000000..057ad013c28
--- /dev/null
+++ b/linaro/configs/ubuntu-minimal.conf
@@ -0,0 +1,26 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_CGROUPS=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=32768
+CONFIG_SECCOMP=y
+CONFIG_CC_STACKPROTECTOR=y
+CONFIG_SYN_COOKIES=y
+CONFIG_IPV6=y
+CONFIG_NETLABEL=y
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=65536
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_UINPUT=y
+# CONFIG_DEVKMEM is not set
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_STRICT_DEVMEM=y
+CONFIG_SECURITY=y
+CONFIG_LSM_MMAP_MIN_ADDR=0
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SMACK=y
+CONFIG_SECURITY_APPARMOR=y
+CONFIG_DEFAULT_SECURITY_APPARMOR=y
diff --git a/linaro/configs/ubuntu.conf b/linaro/configs/ubuntu.conf
new file mode 100644
index 00000000000..b65be649bb1
--- /dev/null
+++ b/linaro/configs/ubuntu.conf
@@ -0,0 +1,2129 @@
+# CONFIG_COMPAT_BRK is not set
+# CONFIG_DEVKMEM is not set
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_INIT_PASS_ALL_PARAMS=y
+CONFIG_DEBUG_RODATA=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_KERNEL_GZIP=y
+CONFIG_SWAP=y
+CONFIG_SYSVIPC_SYSCTL=y
+CONFIG_POSIX_MQUEUE_SYSCTL=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_FHANDLE=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_AUDIT=y
+CONFIG_AUDITSYSCALL=y
+CONFIG_AUDIT_WATCH=y
+CONFIG_AUDIT_TREE=y
+# CONFIG_AUDIT_LOGINUID_IMMUTABLE is not set
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_GENERIC_IRQ_CHIP=y
+CONFIG_IRQ_DOMAIN=y
+# CONFIG_CGROUPS is not set
+CONFIG_CHECKPOINT_RESTORE=y
+CONFIG_NAMESPACES=y
+CONFIG_UTS_NS=y
+CONFIG_IPC_NS=y
+CONFIG_USER_NS=y
+CONFIG_PID_NS=y
+CONFIG_NET_NS=y
+# CONFIG_SCHED_AUTOGROUP is not set
+CONFIG_RELAY=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+CONFIG_RD_BZIP2=y
+CONFIG_RD_LZMA=y
+CONFIG_RD_XZ=y
+CONFIG_RD_LZO=y
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+CONFIG_EXPERT=y
+CONFIG_UID16=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+CONFIG_PERF_EVENTS=y
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_TRACEPOINTS=y
+CONFIG_KPROBES=y
+CONFIG_JUMP_LABEL=y
+CONFIG_KRETPROBES=y
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_BLOCK=y
+CONFIG_LBDAF=y
+CONFIG_BLK_DEV_BSG=y
+CONFIG_BLK_DEV_BSGLIB=y
+CONFIG_BLK_DEV_INTEGRITY=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_MSDOS_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_LDM_PARTITION=y
+CONFIG_EFI_PARTITION=y
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+CONFIG_DEFAULT_CFQ=y
+CONFIG_DEFAULT_IOSCHED="cfq"
+CONFIG_FREEZER=y
+CONFIG_TICK_ONESHOT=y
+CONFIG_VMSPLIT_3G=y
+CONFIG_PAGE_OFFSET=0xC0000000
+CONFIG_PREEMPT_VOLUNTARY=y
+CONFIG_HZ=128
+CONFIG_AEABI=y
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_ZONE_DMA_FLAG=0
+CONFIG_VIRT_TO_BUS=y
+CONFIG_DEFAULT_MMAP_MIN_ADDR=32768
+CONFIG_CLEANCACHE=y
+CONFIG_FORCE_MAX_ZONEORDER=11
+CONFIG_LEDS=y
+CONFIG_ALIGNMENT_TRAP=y
+CONFIG_SECCOMP=y
+CONFIG_CC_STACKPROTECTOR=y
+CONFIG_USE_OF=y
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_KEXEC=y
+CONFIG_ATAGS_PROC=y
+CONFIG_CRASH_DUMP=y
+CONFIG_CPU_FREQ_TABLE=y
+CONFIG_CPU_FREQ_STAT=y
+CONFIG_CPU_FREQ_STAT_DETAILS=y
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_IDLE_GOV_LADDER=y
+CONFIG_CPU_IDLE_GOV_MENU=y
+CONFIG_BINFMT_ELF=y
+CONFIG_BINFMT_AOUT=m
+CONFIG_SUSPEND=y
+CONFIG_SUSPEND_FREEZER=y
+CONFIG_PM_SLEEP=y
+CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
+CONFIG_PM_OPP=y
+CONFIG_PM_CLK=y
+CONFIG_UNIX_DIAG=m
+CONFIG_XFRM=y
+CONFIG_XFRM_IPCOMP=m
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_FIB_TRIE_STATS=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_ROUTE_CLASSID=y
+CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE_DEMUX=m
+CONFIG_NET_IPGRE=m
+CONFIG_NET_IPGRE_BROADCAST=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_XFRM_TUNNEL=m
+CONFIG_INET_TUNNEL=m
+CONFIG_INET_XFRM_MODE_TRANSPORT=m
+CONFIG_INET_XFRM_MODE_TUNNEL=m
+CONFIG_INET_XFRM_MODE_BEET=m
+CONFIG_INET_DIAG=m
+CONFIG_INET_TCP_DIAG=m
+CONFIG_INET_UDP_DIAG=m
+CONFIG_TCP_CONG_ADVANCED=y
+CONFIG_TCP_CONG_BIC=m
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_TCP_CONG_WESTWOOD=m
+CONFIG_TCP_CONG_HTCP=m
+CONFIG_TCP_CONG_HSTCP=m
+CONFIG_TCP_CONG_HYBLA=m
+CONFIG_TCP_CONG_VEGAS=m
+CONFIG_TCP_CONG_SCALABLE=m
+CONFIG_TCP_CONG_LP=m
+CONFIG_TCP_CONG_VENO=m
+CONFIG_TCP_CONG_YEAH=m
+CONFIG_TCP_CONG_ILLINOIS=m
+CONFIG_DEFAULT_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+CONFIG_TCP_MD5SIG=y
+CONFIG_IPV6=y
+CONFIG_IPV6_PRIVACY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_MIP6=m
+CONFIG_INET6_XFRM_TUNNEL=m
+CONFIG_INET6_TUNNEL=m
+CONFIG_INET6_XFRM_MODE_TRANSPORT=m
+CONFIG_INET6_XFRM_MODE_TUNNEL=m
+CONFIG_INET6_XFRM_MODE_BEET=m
+CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
+CONFIG_IPV6_SIT=m
+CONFIG_IPV6_SIT_6RD=y
+CONFIG_IPV6_NDISC_NODETYPE=y
+CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_IPV6_MROUTE=y
+CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y
+CONFIG_IPV6_PIMSM_V2=y
+CONFIG_NETLABEL=y
+CONFIG_NETWORK_SECMARK=y
+CONFIG_NETFILTER_ADVANCED=y
+CONFIG_BRIDGE_NETFILTER=y
+CONFIG_NETFILTER_NETLINK=m
+CONFIG_NETFILTER_NETLINK_ACCT=m
+CONFIG_NETFILTER_NETLINK_QUEUE=m
+CONFIG_NETFILTER_NETLINK_LOG=m
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_MARK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_ZONES=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CONNTRACK_TIMEOUT=y
+CONFIG_NF_CONNTRACK_TIMESTAMP=y
+CONFIG_NF_CT_PROTO_DCCP=m
+CONFIG_NF_CT_PROTO_GRE=m
+CONFIG_NF_CT_PROTO_SCTP=m
+CONFIG_NF_CT_PROTO_UDPLITE=m
+CONFIG_NF_CONNTRACK_AMANDA=m
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_H323=m
+CONFIG_NF_CONNTRACK_IRC=m
+CONFIG_NF_CONNTRACK_BROADCAST=m
+CONFIG_NF_CONNTRACK_NETBIOS_NS=m
+CONFIG_NF_CONNTRACK_SNMP=m
+CONFIG_NF_CONNTRACK_PPTP=m
+CONFIG_NF_CONNTRACK_SANE=m
+CONFIG_NF_CONNTRACK_SIP=m
+CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NF_CT_NETLINK=m
+CONFIG_NF_CT_NETLINK_TIMEOUT=m
+CONFIG_NETFILTER_TPROXY=m
+CONFIG_NETFILTER_XTABLES=m
+CONFIG_NETFILTER_XT_MARK=m
+CONFIG_NETFILTER_XT_CONNMARK=m
+CONFIG_NETFILTER_XT_SET=m
+CONFIG_NETFILTER_XT_TARGET_AUDIT=m
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
+CONFIG_NETFILTER_XT_TARGET_CT=m
+CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_HL=m
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
+CONFIG_NETFILTER_XT_TARGET_LED=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
+CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NFLOG=m
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
+CONFIG_NETFILTER_XT_TARGET_RATEEST=m
+CONFIG_NETFILTER_XT_TARGET_TEE=m
+CONFIG_NETFILTER_XT_TARGET_TPROXY=m
+CONFIG_NETFILTER_XT_TARGET_TRACE=m
+CONFIG_NETFILTER_XT_TARGET_SECMARK=m
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
+CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
+CONFIG_NETFILTER_XT_MATCH_COMMENT=m
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_CPU=m
+CONFIG_NETFILTER_XT_MATCH_DCCP=m
+CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
+CONFIG_NETFILTER_XT_MATCH_DSCP=m
+CONFIG_NETFILTER_XT_MATCH_ECN=m
+CONFIG_NETFILTER_XT_MATCH_ESP=m
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_HL=m
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
+CONFIG_NETFILTER_XT_MATCH_IPVS=m
+CONFIG_NETFILTER_XT_MATCH_LENGTH=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_NFACCT=m
+CONFIG_NETFILTER_XT_MATCH_OSF=m
+CONFIG_NETFILTER_XT_MATCH_OWNER=m
+CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
+CONFIG_NETFILTER_XT_MATCH_QUOTA=m
+CONFIG_NETFILTER_XT_MATCH_RATEEST=m
+CONFIG_NETFILTER_XT_MATCH_REALM=m
+CONFIG_NETFILTER_XT_MATCH_RECENT=m
+CONFIG_NETFILTER_XT_MATCH_SCTP=m
+CONFIG_NETFILTER_XT_MATCH_SOCKET=m
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
+CONFIG_NETFILTER_XT_MATCH_STRING=m
+CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_TIME=m
+CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_SET=m
+CONFIG_IP_SET_MAX=256
+CONFIG_IP_SET_BITMAP_IP=m
+CONFIG_IP_SET_BITMAP_IPMAC=m
+CONFIG_IP_SET_BITMAP_PORT=m
+CONFIG_IP_SET_HASH_IP=m
+CONFIG_IP_SET_HASH_IPPORT=m
+CONFIG_IP_SET_HASH_IPPORTIP=m
+CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETPORT=m
+CONFIG_IP_SET_HASH_NETIFACE=m
+CONFIG_IP_SET_LIST_SET=m
+CONFIG_IP_VS=m
+CONFIG_IP_VS_IPV6=y
+CONFIG_IP_VS_TAB_BITS=12
+CONFIG_IP_VS_PROTO_TCP=y
+CONFIG_IP_VS_PROTO_UDP=y
+CONFIG_IP_VS_PROTO_AH_ESP=y
+CONFIG_IP_VS_PROTO_ESP=y
+CONFIG_IP_VS_PROTO_AH=y
+CONFIG_IP_VS_PROTO_SCTP=y
+CONFIG_IP_VS_RR=m
+CONFIG_IP_VS_WRR=m
+CONFIG_IP_VS_LC=m
+CONFIG_IP_VS_WLC=m
+CONFIG_IP_VS_LBLC=m
+CONFIG_IP_VS_LBLCR=m
+CONFIG_IP_VS_DH=m
+CONFIG_IP_VS_SH=m
+CONFIG_IP_VS_SED=m
+CONFIG_IP_VS_NQ=m
+CONFIG_IP_VS_SH_TAB_BITS=8
+CONFIG_IP_VS_FTP=m
+CONFIG_IP_VS_NFCT=y
+CONFIG_IP_VS_PE_SIP=m
+CONFIG_NF_DEFRAG_IPV4=m
+CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_IP_NF_QUEUE=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_AH=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_RPFILTER=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_ULOG=m
+CONFIG_NF_NAT=m
+CONFIG_NF_NAT_NEEDED=y
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_TARGET_NETMAP=m
+CONFIG_IP_NF_TARGET_REDIRECT=m
+CONFIG_NF_NAT_SNMP_BASIC=m
+CONFIG_NF_NAT_PROTO_DCCP=m
+CONFIG_NF_NAT_PROTO_GRE=m
+CONFIG_NF_NAT_PROTO_UDPLITE=m
+CONFIG_NF_NAT_PROTO_SCTP=m
+CONFIG_NF_NAT_FTP=m
+CONFIG_NF_NAT_IRC=m
+CONFIG_NF_NAT_TFTP=m
+CONFIG_NF_NAT_AMANDA=m
+CONFIG_NF_NAT_PPTP=m
+CONFIG_NF_NAT_H323=m
+CONFIG_NF_NAT_SIP=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_CLUSTERIP=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_TTL=m
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_SECURITY=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+CONFIG_NF_DEFRAG_IPV6=m
+CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_IP6_NF_QUEUE=m
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_MATCH_AH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
+CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_HL=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RPFILTER=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_TARGET_HL=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_RAW=m
+CONFIG_IP6_NF_SECURITY=m
+CONFIG_DECNET_NF_GRABULATOR=m
+CONFIG_BRIDGE_NF_EBTABLES=m
+CONFIG_BRIDGE_EBT_BROUTE=m
+CONFIG_BRIDGE_EBT_T_FILTER=m
+CONFIG_BRIDGE_EBT_T_NAT=m
+CONFIG_BRIDGE_EBT_802_3=m
+CONFIG_BRIDGE_EBT_AMONG=m
+CONFIG_BRIDGE_EBT_ARP=m
+CONFIG_BRIDGE_EBT_IP=m
+CONFIG_BRIDGE_EBT_IP6=m
+CONFIG_BRIDGE_EBT_LIMIT=m
+CONFIG_BRIDGE_EBT_MARK=m
+CONFIG_BRIDGE_EBT_PKTTYPE=m
+CONFIG_BRIDGE_EBT_STP=m
+CONFIG_BRIDGE_EBT_VLAN=m
+CONFIG_BRIDGE_EBT_ARPREPLY=m
+CONFIG_BRIDGE_EBT_DNAT=m
+CONFIG_BRIDGE_EBT_MARK_T=m
+CONFIG_BRIDGE_EBT_REDIRECT=m
+CONFIG_BRIDGE_EBT_SNAT=m
+CONFIG_BRIDGE_EBT_LOG=m
+CONFIG_BRIDGE_EBT_ULOG=m
+CONFIG_BRIDGE_EBT_NFLOG=m
+CONFIG_IP_DCCP=m
+CONFIG_INET_DCCP_DIAG=m
+CONFIG_IP_DCCP_CCID3=y
+CONFIG_IP_DCCP_TFRC_LIB=y
+CONFIG_NET_DCCPPROBE=m
+CONFIG_IP_SCTP=m
+CONFIG_NET_SCTPPROBE=m
+CONFIG_SCTP_HMAC_MD5=y
+CONFIG_RDS=m
+CONFIG_RDS_TCP=m
+CONFIG_TIPC=m
+CONFIG_ATM=m
+CONFIG_ATM_CLIP=m
+CONFIG_ATM_LANE=m
+CONFIG_ATM_MPOA=m
+CONFIG_ATM_BR2684=m
+CONFIG_ATM_BR2684_IPFILTER=y
+CONFIG_L2TP=m
+CONFIG_L2TP_DEBUGFS=m
+CONFIG_STP=m
+CONFIG_GARP=m
+CONFIG_BRIDGE=m
+CONFIG_BRIDGE_IGMP_SNOOPING=y
+CONFIG_NET_DSA=y
+CONFIG_NET_DSA_TAG_DSA=y
+CONFIG_NET_DSA_TAG_EDSA=y
+CONFIG_NET_DSA_TAG_TRAILER=y
+CONFIG_VLAN_8021Q=m
+CONFIG_VLAN_8021Q_GVRP=y
+CONFIG_DECNET=m
+CONFIG_LLC=m
+CONFIG_LLC2=m
+CONFIG_IPX=m
+CONFIG_ATALK=m
+CONFIG_DEV_APPLETALK=m
+CONFIG_IPDDP=m
+CONFIG_IPDDP_ENCAP=y
+CONFIG_IPDDP_DECAP=y
+CONFIG_X25=m
+CONFIG_LAPB=m
+CONFIG_WAN_ROUTER=m
+CONFIG_PHONET=m
+CONFIG_IEEE802154=m
+CONFIG_IEEE802154_6LOWPAN=m
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_ATM=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_MULTIQ=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFB=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_DRR=m
+CONFIG_NET_SCH_MQPRIO=m
+CONFIG_NET_SCH_CHOKE=m
+CONFIG_NET_SCH_QFQ=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_SCH_PLUG=m
+CONFIG_NET_CLS=y
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_FLOW=m
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_STACK=32
+CONFIG_NET_EMATCH_CMP=m
+CONFIG_NET_EMATCH_NBYTE=m
+CONFIG_NET_EMATCH_U32=m
+CONFIG_NET_EMATCH_META=m
+CONFIG_NET_EMATCH_TEXT=m
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=m
+CONFIG_NET_ACT_GACT=m
+CONFIG_GACT_PROB=y
+CONFIG_NET_ACT_MIRRED=m
+CONFIG_NET_ACT_IPT=m
+CONFIG_NET_ACT_NAT=m
+CONFIG_NET_ACT_PEDIT=m
+CONFIG_NET_ACT_SIMP=m
+CONFIG_NET_ACT_SKBEDIT=m
+CONFIG_NET_ACT_CSUM=m
+CONFIG_NET_SCH_FIFO=y
+CONFIG_DCB=y
+CONFIG_DNS_RESOLVER=y
+CONFIG_BATMAN_ADV=m
+CONFIG_OPENVSWITCH=m
+CONFIG_BQL=y
+CONFIG_BPF_JIT=y
+CONFIG_NET_PKTGEN=m
+CONFIG_NET_TCPPROBE=m
+CONFIG_HAMRADIO=y
+CONFIG_AX25=m
+CONFIG_AX25_DAMA_SLAVE=y
+CONFIG_NETROM=m
+CONFIG_ROSE=m
+CONFIG_MKISS=m
+CONFIG_6PACK=m
+CONFIG_BPQETHER=m
+CONFIG_BAYCOM_SER_FDX=m
+CONFIG_BAYCOM_SER_HDX=m
+CONFIG_BAYCOM_PAR=m
+CONFIG_BAYCOM_EPP=m
+CONFIG_YAM=m
+CONFIG_CAN=m
+CONFIG_CAN_RAW=m
+CONFIG_CAN_BCM=m
+CONFIG_CAN_GW=m
+CONFIG_CAN_VCAN=m
+CONFIG_CAN_SLCAN=m
+CONFIG_CAN_DEV=m
+CONFIG_CAN_CALC_BITTIMING=y
+CONFIG_CAN_MCP251X=m
+CONFIG_CAN_SJA1000=m
+CONFIG_CAN_SJA1000_ISA=m
+CONFIG_CAN_SJA1000_PLATFORM=m
+CONFIG_CAN_C_CAN=m
+CONFIG_CAN_C_CAN_PLATFORM=m
+CONFIG_CAN_CC770=m
+CONFIG_CAN_CC770_ISA=m
+CONFIG_CAN_CC770_PLATFORM=m
+CONFIG_CAN_EMS_USB=m
+CONFIG_CAN_ESD_USB2=m
+CONFIG_CAN_PEAK_USB=m
+CONFIG_CAN_SOFTING=m
+CONFIG_IRDA=m
+CONFIG_IRLAN=m
+CONFIG_IRNET=m
+CONFIG_IRCOMM=m
+CONFIG_IRDA_ULTRA=y
+CONFIG_IRDA_CACHE_LAST_LSAP=y
+CONFIG_IRDA_FAST_RR=y
+CONFIG_IRDA_DEBUG=y
+CONFIG_IRTTY_SIR=m
+CONFIG_DONGLE=y
+CONFIG_ESI_DONGLE=m
+CONFIG_ACTISYS_DONGLE=m
+CONFIG_TEKRAM_DONGLE=m
+CONFIG_TOIM3232_DONGLE=m
+CONFIG_LITELINK_DONGLE=m
+CONFIG_MA600_DONGLE=m
+CONFIG_GIRBIL_DONGLE=m
+CONFIG_MCP2120_DONGLE=m
+CONFIG_OLD_BELKIN_DONGLE=m
+CONFIG_ACT200L_DONGLE=m
+CONFIG_KINGSUN_DONGLE=m
+CONFIG_KSDAZZLE_DONGLE=m
+CONFIG_KS959_DONGLE=m
+CONFIG_USB_IRDA=m
+CONFIG_SIGMATEL_FIR=m
+CONFIG_MCS_FIR=m
+CONFIG_BT=m
+CONFIG_BT_RFCOMM=m
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=m
+CONFIG_BT_BNEP_MC_FILTER=y
+CONFIG_BT_BNEP_PROTO_FILTER=y
+CONFIG_BT_CMTP=m
+CONFIG_BT_HIDP=m
+CONFIG_BT_HCIBTUSB=m
+CONFIG_BT_HCIBTSDIO=m
+CONFIG_BT_HCIUART=m
+CONFIG_BT_HCIUART_H4=y
+CONFIG_BT_HCIUART_BCSP=y
+CONFIG_BT_HCIUART_ATH3K=y
+CONFIG_BT_HCIUART_LL=y
+CONFIG_BT_HCIBCM203X=m
+CONFIG_BT_HCIBPA10X=m
+CONFIG_BT_HCIBFUSB=m
+CONFIG_BT_HCIVHCI=m
+CONFIG_BT_MRVL=m
+CONFIG_BT_MRVL_SDIO=m
+CONFIG_BT_ATH3K=m
+CONFIG_BT_WILINK=m
+CONFIG_AF_RXRPC=m
+CONFIG_RXKAD=m
+CONFIG_FIB_RULES=y
+CONFIG_WIRELESS=y
+CONFIG_WIRELESS_EXT=y
+CONFIG_WEXT_CORE=y
+CONFIG_WEXT_PROC=y
+CONFIG_WEXT_SPY=y
+CONFIG_WEXT_PRIV=y
+CONFIG_CFG80211_REG_DEBUG=y
+CONFIG_CFG80211_DEFAULT_PS=y
+CONFIG_CFG80211_DEBUGFS=y
+CONFIG_CFG80211_WEXT=y
+CONFIG_WIRELESS_EXT_SYSFS=y
+CONFIG_LIB80211_CRYPT_WEP=m
+CONFIG_LIB80211_CRYPT_CCMP=m
+CONFIG_LIB80211_CRYPT_TKIP=m
+CONFIG_MAC80211=m
+CONFIG_MAC80211_MESH=y
+CONFIG_MAC80211_LEDS=y
+CONFIG_MAC80211_DEBUGFS=y
+CONFIG_MAC80211_DEBUG_MENU=y
+CONFIG_WIMAX=m
+CONFIG_WIMAX_DEBUG_LEVEL=8
+CONFIG_RFKILL=y
+CONFIG_RFKILL_LEDS=y
+CONFIG_RFKILL_INPUT=y
+CONFIG_RFKILL_REGULATOR=m
+CONFIG_RFKILL_GPIO=m
+CONFIG_NET_9P=m
+CONFIG_CAIF=m
+CONFIG_CAIF_NETDEV=m
+CONFIG_CAIF_USB=m
+CONFIG_CEPH_LIB=m
+CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y
+CONFIG_NFC=m
+CONFIG_NFC_NCI=m
+CONFIG_PN544_NFC=m
+CONFIG_NFC_PN533=m
+CONFIG_NFC_WILINK=m
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
+CONFIG_REGMAP=y
+CONFIG_REGMAP_I2C=y
+CONFIG_REGMAP_SPI=y
+CONFIG_SPI=y
+CONFIG_DMA_SHARED_BUFFER=y
+CONFIG_PROC_EVENTS=y
+CONFIG_MTD_REDBOOT_PARTS=m
+CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1
+CONFIG_MTD_AFS_PARTS=m
+CONFIG_MTD_OF_PARTS=y
+CONFIG_MTD_AR7_PARTS=m
+CONFIG_HAVE_MTD_OTP=y
+CONFIG_MTD_BLKDEVS=y
+CONFIG_FTL=m
+CONFIG_NFTL=m
+CONFIG_NFTL_RW=y
+CONFIG_INFTL=m
+CONFIG_RFD_FTL=m
+CONFIG_SSFDC=m
+CONFIG_SM_FTL=m
+CONFIG_MTD_SWAP=m
+CONFIG_MTD_JEDECPROBE=m
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+CONFIG_MTD_CFI_AMDSTD=m
+CONFIG_MTD_CFI_STAA=m
+CONFIG_MTD_RAM=m
+CONFIG_MTD_ROM=m
+CONFIG_MTD_ABSENT=m
+CONFIG_MTD_COMPLEX_MAPPINGS=y
+CONFIG_MTD_PHYSMAP=m
+CONFIG_MTD_PHYSMAP_OF=m
+CONFIG_MTD_IMPA7=m
+CONFIG_MTD_GPIO_ADDR=m
+CONFIG_MTD_PLATRAM=m
+CONFIG_MTD_LATCH_ADDR=m
+CONFIG_MTD_DATAFLASH=m
+CONFIG_MTD_DATAFLASH_OTP=y
+CONFIG_MTD_M25P80=m
+CONFIG_M25PXX_USE_FAST_READ=y
+CONFIG_MTD_SST25L=m
+CONFIG_MTD_SLRAM=m
+CONFIG_MTD_PHRAM=m
+CONFIG_MTD_MTDRAM=m
+CONFIG_MTDRAM_TOTAL_SIZE=4096
+CONFIG_MTDRAM_ERASE_SIZE=128
+CONFIG_MTD_BLOCK2MTD=m
+CONFIG_MTD_DOC2000=m
+CONFIG_MTD_DOC2001=m
+CONFIG_MTD_DOC2001PLUS=m
+CONFIG_MTD_DOCG3=m
+CONFIG_BCH_CONST_M=14
+CONFIG_BCH_CONST_T=4
+CONFIG_MTD_ONENAND=m
+CONFIG_MTD_DOCPROBE=m
+CONFIG_MTD_DOCECC=m
+CONFIG_MTD_DOCPROBE_ADDRESS=0x0
+CONFIG_MTD_NAND_ECC=y
+CONFIG_MTD_NAND_BCH=y
+CONFIG_MTD_NAND_ECC_BCH=y
+CONFIG_MTD_NAND_GPIO=m
+CONFIG_MTD_NAND_IDS=y
+CONFIG_MTD_NAND_DISKONCHIP=m
+CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS=0
+CONFIG_MTD_NAND_DOCG4=m
+CONFIG_MTD_NAND_PLATFORM=m
+CONFIG_MTD_ALAUDA=m
+CONFIG_MTD_ONENAND_GENERIC=m
+CONFIG_MTD_ONENAND_2X_PROGRAM=y
+CONFIG_MTD_ONENAND_SIM=m
+CONFIG_MTD_LPDDR=m
+CONFIG_MTD_QINFO_PROBE=m
+CONFIG_DTC=y
+CONFIG_OF=y
+CONFIG_OF_FLATTREE=y
+CONFIG_OF_EARLY_FLATTREE=y
+CONFIG_OF_ADDRESS=y
+CONFIG_OF_IRQ=y
+CONFIG_OF_DEVICE=y
+CONFIG_OF_GPIO=y
+CONFIG_OF_I2C=y
+CONFIG_OF_NET=y
+CONFIG_OF_SPI=y
+CONFIG_OF_MDIO=y
+CONFIG_OF_MTD=y
+CONFIG_PARPORT=m
+CONFIG_PARPORT_AX88796=m
+CONFIG_PARPORT_1284=y
+CONFIG_PARPORT_NOT_PC=y
+CONFIG_BLK_DEV=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_LOOP_MIN_COUNT=8
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_DRBD=m
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_UB=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=65536
+CONFIG_CDROM_PKTCDVD=m
+CONFIG_CDROM_PKTCDVD_BUFFERS=8
+CONFIG_ATA_OVER_ETH=m
+CONFIG_MG_DISK=m
+CONFIG_MG_DISK_RES=0
+CONFIG_BLK_DEV_RBD=m
+CONFIG_SENSORS_LIS3LV02D=m
+CONFIG_AD525X_DPOT=m
+CONFIG_AD525X_DPOT_I2C=m
+CONFIG_AD525X_DPOT_SPI=m
+CONFIG_ICS932S401=m
+CONFIG_ENCLOSURE_SERVICES=m
+CONFIG_APDS9802ALS=m
+CONFIG_ISL29003=m
+CONFIG_ISL29020=m
+CONFIG_SENSORS_TSL2550=m
+CONFIG_SENSORS_BH1780=m
+CONFIG_SENSORS_BH1770=m
+CONFIG_SENSORS_APDS990X=m
+CONFIG_HMC6352=m
+CONFIG_DS1682=m
+CONFIG_USB_SWITCH_FSA9480=m
+CONFIG_C2PORT=m
+CONFIG_EEPROM_AT24=m
+CONFIG_EEPROM_AT25=m
+CONFIG_EEPROM_LEGACY=m
+CONFIG_EEPROM_MAX6875=m
+CONFIG_EEPROM_93XX46=m
+CONFIG_IWMC3200TOP=m
+CONFIG_SENSORS_LIS3_SPI=m
+CONFIG_SENSORS_LIS3_I2C=m
+CONFIG_SCSI_MOD=y
+CONFIG_RAID_ATTRS=m
+CONFIG_SCSI_DMA=y
+CONFIG_SCSI_TGT=m
+CONFIG_SCSI_NETLINK=y
+CONFIG_SCSI_PROC_FS=y
+CONFIG_CHR_DEV_ST=m
+CONFIG_CHR_DEV_OSST=m
+CONFIG_BLK_DEV_SR=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=m
+CONFIG_SCSI_ENCLOSURE=m
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_WAIT_SCAN=m
+CONFIG_SCSI_FC_ATTRS=m
+CONFIG_SCSI_FC_TGT_ATTRS=y
+CONFIG_SCSI_ISCSI_ATTRS=m
+CONFIG_SCSI_SAS_ATTRS=m
+CONFIG_SCSI_SAS_LIBSAS=m
+CONFIG_SCSI_SAS_ATA=y
+CONFIG_SCSI_SAS_HOST_SMP=y
+CONFIG_SCSI_SRP_ATTRS=m
+CONFIG_SCSI_SRP_TGT_ATTRS=y
+CONFIG_SCSI_LOWLEVEL=y
+CONFIG_ISCSI_TCP=m
+CONFIG_ISCSI_BOOT_SYSFS=m
+CONFIG_LIBFC=m
+CONFIG_LIBFCOE=m
+CONFIG_SCSI_DEBUG=m
+CONFIG_SCSI_DH=y
+CONFIG_SCSI_DH_RDAC=m
+CONFIG_SCSI_DH_HP_SW=m
+CONFIG_SCSI_DH_EMC=m
+CONFIG_SCSI_DH_ALUA=m
+CONFIG_BLK_DEV_MD=y
+CONFIG_MD_AUTODETECT=y
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_RAID1=m
+CONFIG_MD_RAID10=m
+CONFIG_MD_RAID456=m
+CONFIG_MD_MULTIPATH=m
+CONFIG_MD_FAULTY=m
+CONFIG_DM_BUFIO=m
+CONFIG_DM_PERSISTENT_DATA=m
+CONFIG_DM_CRYPT=m
+CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_MIRROR=m
+CONFIG_DM_RAID=m
+CONFIG_DM_ZERO=m
+CONFIG_DM_MULTIPATH=m
+CONFIG_DM_MULTIPATH_QL=m
+CONFIG_DM_MULTIPATH_ST=m
+CONFIG_DM_UEVENT=y
+CONFIG_TARGET_CORE=m
+CONFIG_TCM_IBLOCK=m
+CONFIG_TCM_FILEIO=m
+CONFIG_TCM_PSCSI=m
+CONFIG_LOOPBACK_TARGET=m
+CONFIG_TCM_FC=m
+CONFIG_ISCSI_TARGET=m
+CONFIG_NET_CORE=y
+CONFIG_BONDING=m
+CONFIG_DUMMY=m
+CONFIG_EQUALIZER=m
+CONFIG_MII=y
+CONFIG_IEEE802154_DRIVERS=m
+CONFIG_IFB=m
+CONFIG_MACVLAN=m
+CONFIG_MACVTAP=m
+CONFIG_NETCONSOLE=m
+CONFIG_NETCONSOLE_DYNAMIC=y
+CONFIG_NETPOLL=y
+CONFIG_NET_POLL_CONTROLLER=y
+CONFIG_TUN=y
+CONFIG_VETH=m
+CONFIG_ATM_DRIVERS=y
+CONFIG_ATM_DUMMY=m
+CONFIG_ATM_TCP=m
+CONFIG_CAIF_TTY=m
+CONFIG_CAIF_SPI_SLAVE=m
+CONFIG_CAIF_HSI=m
+CONFIG_ETHERNET=y
+CONFIG_B44=m
+CONFIG_CS89x0=m
+CONFIG_CS89x0_PLATFORM=y
+CONFIG_DM9000=m
+CONFIG_DNET=m
+CONFIG_MDIO_BITBANG=m
+CONFIG_MDIO_GPIO=m
+CONFIG_PLIP=m
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_MPPE=m
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOATM=m
+CONFIG_PPPOE=m
+CONFIG_PPTP=m
+CONFIG_PPPOL2TP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+CONFIG_SLIP=m
+CONFIG_SLHC=y
+CONFIG_SLIP_COMPRESSED=y
+CONFIG_SLIP_SMART=y
+CONFIG_SLIP_MODE_SLIP6=y
+CONFIG_USB_CATC=m
+CONFIG_USB_KAWETH=m
+CONFIG_USB_PEGASUS=m
+CONFIG_USB_RTL8150=m
+CONFIG_USB_NET_AX8817X=m
+CONFIG_USB_NET_CDCETHER=m
+CONFIG_USB_NET_CDC_EEM=m
+CONFIG_USB_NET_CDC_NCM=m
+CONFIG_USB_NET_DM9601=m
+CONFIG_USB_NET_GL620A=m
+CONFIG_USB_NET_NET1080=m
+CONFIG_USB_NET_PLUSB=m
+CONFIG_USB_NET_MCS7830=m
+CONFIG_USB_NET_RNDIS_HOST=m
+CONFIG_USB_NET_CDC_SUBSET=m
+CONFIG_USB_BELKIN=y
+CONFIG_USB_ARMLINUX=y
+CONFIG_USB_NET_ZAURUS=m
+CONFIG_USB_NET_CX82310_ETH=m
+CONFIG_USB_NET_KALMIA=m
+CONFIG_USB_NET_QMI_WWAN=m
+CONFIG_USB_HSO=m
+CONFIG_USB_NET_INT51X1=m
+CONFIG_USB_CDC_PHONET=m
+CONFIG_USB_IPHETH=m
+CONFIG_USB_SIERRA_NET=m
+CONFIG_USB_VL600=m
+CONFIG_WLAN=y
+CONFIG_LIBERTAS=m
+CONFIG_LIBERTAS_THINFIRM=m
+CONFIG_LIBERTAS_THINFIRM_USB=m
+CONFIG_AT76C50X_USB=m
+CONFIG_USB_ZD1201=m
+CONFIG_USB_NET_RNDIS_WLAN=m
+CONFIG_RTL8187=m
+CONFIG_RTL8187_LEDS=y
+CONFIG_ATH_COMMON=m
+CONFIG_ATH9K_HW=m
+CONFIG_ATH9K_COMMON=m
+CONFIG_ATH9K_BTCOEX_SUPPORT=y
+CONFIG_ATH9K=m
+CONFIG_ATH9K_AHB=y
+CONFIG_ATH9K_DEBUGFS=y
+CONFIG_ATH9K_RATE_CONTROL=y
+CONFIG_ATH9K_HTC=m
+CONFIG_ATH9K_HTC_DEBUGFS=y
+CONFIG_CARL9170=m
+CONFIG_CARL9170_LEDS=y
+CONFIG_CARL9170_WPC=y
+CONFIG_CARL9170_HWRNG=y
+CONFIG_B43=m
+CONFIG_B43_BCMA=y
+# CONFIG_B43_BCMA_EXTRA is not set
+CONFIG_B43_SSB=y
+CONFIG_B43_BCMA_PIO=y
+CONFIG_B43_PIO=y
+CONFIG_B43_PHY_N=y
+CONFIG_B43_PHY_LP=y
+CONFIG_B43_PHY_HT=y
+CONFIG_B43_LEDS=y
+CONFIG_B43_HWRNG=y
+CONFIG_B43LEGACY=m
+CONFIG_B43LEGACY_LEDS=y
+CONFIG_B43LEGACY_HWRNG=y
+CONFIG_B43LEGACY_DEBUG=y
+CONFIG_B43LEGACY_DMA=y
+CONFIG_B43LEGACY_PIO=y
+CONFIG_B43LEGACY_DMA_AND_PIO_MODE=y
+CONFIG_BRCMUTIL=m
+CONFIG_BRCMSMAC=m
+CONFIG_BRCMFMAC=m
+CONFIG_BRCMFMAC_SDIO=y
+CONFIG_BRCMFMAC_USB=y
+CONFIG_HOSTAP=m
+CONFIG_HOSTAP_FIRMWARE=y
+CONFIG_HOSTAP_FIRMWARE_NVRAM=y
+CONFIG_IWM=m
+CONFIG_IWM_TRACING=y
+CONFIG_LIBERTAS_SPI=m
+CONFIG_P54_COMMON=m
+CONFIG_P54_USB=m
+CONFIG_P54_SPI=m
+CONFIG_P54_LEDS=y
+CONFIG_RT2X00=m
+CONFIG_RT2500USB=m
+CONFIG_RT73USB=m
+CONFIG_RT2800USB=m
+CONFIG_RT2800USB_RT33XX=y
+CONFIG_RT2800_LIB=m
+CONFIG_RT2X00_LIB_USB=m
+CONFIG_RT2X00_LIB=m
+CONFIG_RT2X00_LIB_FIRMWARE=y
+CONFIG_RT2X00_LIB_CRYPTO=y
+CONFIG_RT2X00_LIB_LEDS=y
+CONFIG_RT2X00_LIB_DEBUGFS=y
+CONFIG_RTL8192CU=m
+CONFIG_RTLWIFI=m
+CONFIG_RTL8192C_COMMON=m
+CONFIG_WL1251=m
+CONFIG_WL1251_SPI=m
+CONFIG_WL1251_SDIO=m
+CONFIG_WL12XX_MENU=m
+CONFIG_WL12XX=m
+CONFIG_WL12XX_SPI=m
+CONFIG_WL12XX_SDIO=m
+CONFIG_WL12XX_PLATFORM_DATA=y
+CONFIG_ZD1211RW=m
+CONFIG_MWIFIEX=m
+CONFIG_MWIFIEX_SDIO=m
+CONFIG_WAN=y
+CONFIG_HDLC=m
+CONFIG_HDLC_RAW=m
+CONFIG_HDLC_RAW_ETH=m
+CONFIG_HDLC_CISCO=m
+CONFIG_HDLC_FR=m
+CONFIG_HDLC_PPP=m
+CONFIG_HDLC_X25=m
+CONFIG_DLCI=m
+CONFIG_DLCI_MAX=8
+CONFIG_WAN_ROUTER_DRIVERS=m
+CONFIG_LAPBETHER=m
+CONFIG_ISDN=y
+CONFIG_ISDN_I4L=m
+CONFIG_ISDN_PPP=y
+CONFIG_ISDN_PPP_VJ=y
+CONFIG_ISDN_MPP=y
+CONFIG_IPPP_FILTER=y
+CONFIG_ISDN_PPP_BSDCOMP=m
+CONFIG_ISDN_AUDIO=y
+CONFIG_ISDN_TTY_FAX=y
+CONFIG_ISDN_X25=y
+CONFIG_ISDN_DIVERSION=m
+CONFIG_ISDN_DRV_HISAX=m
+CONFIG_ISDN_CAPI=m
+CONFIG_ISDN_DRV_AVMB1_VERBOSE_REASON=y
+CONFIG_CAPI_TRACE=y
+CONFIG_ISDN_CAPI_MIDDLEWARE=y
+CONFIG_ISDN_CAPI_CAPI20=m
+CONFIG_ISDN_CAPI_CAPIDRV=m
+CONFIG_CAPI_AVM=y
+CONFIG_CAPI_EICON=y
+CONFIG_ISDN_DRV_GIGASET=m
+CONFIG_GIGASET_I4L=y
+CONFIG_GIGASET_BASE=m
+CONFIG_GIGASET_M105=m
+CONFIG_GIGASET_M101=m
+CONFIG_MISDN=m
+CONFIG_MISDN_DSP=m
+CONFIG_MISDN_L1OIP=m
+CONFIG_MISDN_HFCUSB=m
+CONFIG_INPUT=y
+CONFIG_INPUT_FF_MEMLESS=m
+CONFIG_INPUT_POLLDEV=m
+CONFIG_INPUT_SPARSEKMAP=m
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_INPUT_MOUSEDEV_PSAUX=y
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+CONFIG_INPUT_EVBUG=m
+CONFIG_INPUT_KEYBOARD=y
+CONFIG_KEYBOARD_ADP5588=m
+CONFIG_KEYBOARD_ADP5589=m
+CONFIG_KEYBOARD_ATKBD=y
+CONFIG_KEYBOARD_QT1070=m
+CONFIG_KEYBOARD_LKKBD=m
+CONFIG_KEYBOARD_TCA6416=m
+CONFIG_KEYBOARD_TCA8418=m
+CONFIG_KEYBOARD_MATRIX=m
+CONFIG_KEYBOARD_LM8323=m
+CONFIG_KEYBOARD_MAX7359=m
+CONFIG_KEYBOARD_MCS=m
+CONFIG_KEYBOARD_MPR121=m
+CONFIG_KEYBOARD_NEWTON=m
+CONFIG_KEYBOARD_OPENCORES=m
+CONFIG_KEYBOARD_SAMSUNG=m
+CONFIG_KEYBOARD_STOWAWAY=m
+CONFIG_KEYBOARD_SUNKBD=m
+CONFIG_KEYBOARD_STMPE=m
+CONFIG_KEYBOARD_XTKBD=m
+CONFIG_INPUT_MOUSE=y
+CONFIG_MOUSE_PS2=m
+CONFIG_MOUSE_PS2_ALPS=y
+CONFIG_MOUSE_PS2_LOGIPS2PP=y
+CONFIG_MOUSE_PS2_SYNAPTICS=y
+CONFIG_MOUSE_PS2_TRACKPOINT=y
+CONFIG_MOUSE_PS2_ELANTECH=y
+CONFIG_MOUSE_PS2_SENTELIC=y
+CONFIG_MOUSE_SERIAL=m
+CONFIG_MOUSE_APPLETOUCH=m
+CONFIG_MOUSE_BCM5974=m
+CONFIG_MOUSE_VSXXXAA=m
+CONFIG_MOUSE_GPIO=m
+CONFIG_MOUSE_SYNAPTICS_I2C=m
+CONFIG_MOUSE_SYNAPTICS_USB=m
+CONFIG_INPUT_JOYSTICK=y
+CONFIG_JOYSTICK_ANALOG=m
+CONFIG_JOYSTICK_INTERACT=m
+CONFIG_JOYSTICK_SIDEWINDER=m
+CONFIG_JOYSTICK_WARRIOR=m
+CONFIG_JOYSTICK_MAGELLAN=m
+CONFIG_JOYSTICK_GAMECON=m
+CONFIG_JOYSTICK_TURBOGRAFX=m
+CONFIG_JOYSTICK_JOYDUMP=m
+CONFIG_JOYSTICK_XPAD=m
+CONFIG_JOYSTICK_XPAD_FF=y
+CONFIG_JOYSTICK_XPAD_LEDS=y
+CONFIG_JOYSTICK_WALKERA0701=m
+CONFIG_INPUT_TABLET=y
+CONFIG_TABLET_USB_ACECAD=m
+CONFIG_TABLET_USB_AIPTEK=m
+CONFIG_TABLET_USB_GTCO=m
+CONFIG_TABLET_USB_HANWANG=m
+CONFIG_TABLET_USB_KBTAB=m
+CONFIG_TABLET_USB_WACOM=m
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_88PM860X_ONKEY=m
+CONFIG_INPUT_AD714X=m
+CONFIG_INPUT_AD714X_I2C=m
+CONFIG_INPUT_AD714X_SPI=m
+CONFIG_INPUT_BMA150=m
+CONFIG_INPUT_MMA8450=m
+CONFIG_INPUT_MPU3050=m
+CONFIG_INPUT_GP2A=m
+CONFIG_INPUT_GPIO_TILT_POLLED=m
+CONFIG_INPUT_ATI_REMOTE2=m
+CONFIG_INPUT_KEYSPAN_REMOTE=m
+CONFIG_INPUT_KXTJ9=m
+CONFIG_INPUT_POWERMATE=m
+CONFIG_INPUT_YEALINK=m
+CONFIG_INPUT_CM109=m
+CONFIG_INPUT_TWL4030_VIBRA=m
+CONFIG_INPUT_TWL6040_VIBRA=m
+CONFIG_INPUT_UINPUT=y
+CONFIG_INPUT_PCF8574=m
+CONFIG_INPUT_GPIO_ROTARY_ENCODER=m
+CONFIG_INPUT_ADXL34X=m
+CONFIG_INPUT_ADXL34X_I2C=m
+CONFIG_INPUT_ADXL34X_SPI=m
+CONFIG_INPUT_CMA3000=m
+CONFIG_INPUT_CMA3000_I2C=m
+CONFIG_SERIO=y
+CONFIG_SERIO_SERPORT=m
+CONFIG_SERIO_PARKBD=m
+CONFIG_SERIO_LIBPS2=y
+CONFIG_SERIO_RAW=m
+CONFIG_SERIO_ALTERA_PS2=m
+CONFIG_SERIO_PS2MULT=m
+CONFIG_GAMEPORT=m
+CONFIG_GAMEPORT_NS558=m
+CONFIG_GAMEPORT_L4=m
+CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
+CONFIG_VT_CONSOLE=y
+CONFIG_VT_CONSOLE_SLEEP=y
+CONFIG_HW_CONSOLE=y
+CONFIG_UNIX98_PTYS=y
+CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
+CONFIG_SERIAL_NONSTANDARD=y
+CONFIG_N_HDLC=m
+CONFIG_TRACE_ROUTER=m
+CONFIG_TRACE_SINK=m
+CONFIG_STALDRV=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_RUNTIME_UARTS=32
+CONFIG_SERIAL_8250_DW=m
+CONFIG_SERIAL_MAX3100=m
+CONFIG_SERIAL_MAX3107=m
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_CONSOLE_POLL=y
+CONFIG_SERIAL_OF_PLATFORM=m
+CONFIG_SERIAL_TIMBERDALE=m
+CONFIG_SERIAL_ALTERA_JTAGUART=m
+CONFIG_SERIAL_ALTERA_UART=m
+CONFIG_SERIAL_ALTERA_UART_MAXPORTS=4
+CONFIG_SERIAL_ALTERA_UART_BAUDRATE=115200
+CONFIG_SERIAL_XILINX_PS_UART=m
+CONFIG_TTY_PRINTK=y
+CONFIG_PRINTER=m
+CONFIG_PPDEV=m
+CONFIG_HVC_DRIVER=y
+CONFIG_HVC_DCC=y
+CONFIG_IPMI_HANDLER=m
+CONFIG_IPMI_DEVICE_INTERFACE=m
+CONFIG_IPMI_SI=m
+CONFIG_IPMI_WATCHDOG=m
+CONFIG_IPMI_POWEROFF=m
+CONFIG_HW_RANDOM_TIMERIOMEM=m
+CONFIG_NVRAM=m
+CONFIG_RAW_DRIVER=m
+CONFIG_MAX_RAW_DEVS=256
+CONFIG_RAMOOPS=m
+CONFIG_I2C=y
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
+CONFIG_I2C_MUX=m
+CONFIG_I2C_MUX_GPIO=m
+CONFIG_I2C_MUX_PCA9541=m
+CONFIG_I2C_MUX_PCA954x=m
+CONFIG_I2C_SMBUS=m
+CONFIG_I2C_ALGOBIT=m
+CONFIG_I2C_ALGOPCF=m
+CONFIG_I2C_ALGOPCA=m
+CONFIG_I2C_DESIGNWARE_PLATFORM=m
+CONFIG_I2C_GPIO=m
+CONFIG_I2C_OCORES=m
+CONFIG_I2C_PCA_PLATFORM=m
+CONFIG_I2C_SIMTEC=m
+CONFIG_I2C_XILINX=m
+CONFIG_I2C_DIOLAN_U2C=m
+CONFIG_I2C_PARPORT=m
+CONFIG_I2C_PARPORT_LIGHT=m
+CONFIG_I2C_TAOS_EVM=m
+CONFIG_I2C_TINY_USB=m
+CONFIG_I2C_STUB=m
+CONFIG_SPI_MASTER=y
+CONFIG_SPI_BITBANG=m
+CONFIG_SPI_BUTTERFLY=m
+CONFIG_SPI_GPIO=m
+CONFIG_SPI_LM70_LLP=m
+CONFIG_SPI_OC_TINY=m
+CONFIG_SPI_DESIGNWARE=m
+CONFIG_SPI_TLE62X0=m
+CONFIG_HSI=m
+CONFIG_HSI_BOARDINFO=y
+CONFIG_HSI_CHAR=m
+CONFIG_PPS=m
+CONFIG_PPS_CLIENT_PARPORT=m
+CONFIG_PPS_CLIENT_GPIO=m
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_GENERIC=m
+CONFIG_GPIO_GENERIC_PLATFORM=m
+CONFIG_POWER_SUPPLY=y
+CONFIG_TEST_POWER=m
+CONFIG_HWMON=y
+CONFIG_HWMON_VID=m
+CONFIG_THERMAL=y
+CONFIG_THERMAL_HWMON=y
+CONFIG_WATCHDOG_CORE=y
+CONFIG_SOFT_WATCHDOG=m
+CONFIG_SSB_POSSIBLE=y
+CONFIG_SSB=m
+CONFIG_SSB_BLOCKIO=y
+CONFIG_SSB_SDIOHOST_POSSIBLE=y
+CONFIG_SSB_SDIOHOST=y
+CONFIG_BCMA_POSSIBLE=y
+CONFIG_BCMA=m
+CONFIG_BCMA_BLOCKIO=y
+CONFIG_MFD_CORE=y
+CONFIG_MFD_88PM860X=y
+CONFIG_MFD_SM501=m
+CONFIG_HTC_EGPIO=y
+CONFIG_HTC_PASIC3=m
+CONFIG_HTC_I2CPLD=y
+CONFIG_MFD_STMPE=y
+CONFIG_STMPE_I2C=y
+CONFIG_STMPE_SPI=y
+CONFIG_MFD_WL1273_CORE=m
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_VIRTUAL_CONSUMER=m
+CONFIG_REGULATOR_USERSPACE_CONSUMER=m
+CONFIG_REGULATOR_GPIO=m
+CONFIG_DVB_CORE=m
+CONFIG_DVB_NET=y
+CONFIG_VIDEO_MEDIA=m
+CONFIG_MEDIA_SUPPORT=m
+CONFIG_VIDEO_DEV=m
+CONFIG_RC_CORE=m
+CONFIG_LIRC=m
+CONFIG_LIRC_SERIAL_TRANSMITTER=y
+CONFIG_RC_MAP=m
+CONFIG_IR_NEC_DECODER=m
+CONFIG_IR_JVC_DECODER=m
+CONFIG_IR_SONY_DECODER=m
+CONFIG_IR_SANYO_DECODER=m
+CONFIG_IR_LIRC_CODEC=m
+CONFIG_RC_ATI_REMOTE=m
+CONFIG_IR_IMON=m
+CONFIG_IR_MCEUSB=m
+CONFIG_IR_REDRAT3=m
+CONFIG_IR_STREAMZAP=m
+CONFIG_RC_LOOPBACK=m
+CONFIG_IR_GPIO_CIR=m
+CONFIG_MEDIA_ATTACH=y
+CONFIG_MEDIA_TUNER=m
+CONFIG_MEDIA_TUNER_SIMPLE=m
+CONFIG_VIDEOBUF_GEN=m
+CONFIG_VIDEOBUF_VMALLOC=m
+CONFIG_VIDEOBUF_DMA_CONTIG=m
+CONFIG_VIDEOBUF_DVB=m
+CONFIG_VIDEO_TVEEPROM=m
+CONFIG_VIDEO_TUNER=m
+CONFIG_V4L2_MEM2MEM_DEV=m
+CONFIG_VIDEOBUF2_DMA_CONTIG=m
+CONFIG_VIDEO_CAPTURE_DRIVERS=y
+CONFIG_VIDEO_IR_I2C=m
+CONFIG_VIDEO_TVAUDIO=m
+CONFIG_V4L_USB_DRIVERS=y
+CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y
+CONFIG_USB_GSPCA=m
+CONFIG_USB_M5602=m
+CONFIG_USB_STV06XX=m
+CONFIG_USB_GL860=m
+CONFIG_USB_GSPCA_BENQ=m
+CONFIG_USB_GSPCA_CONEX=m
+CONFIG_USB_GSPCA_CPIA1=m
+CONFIG_USB_GSPCA_ETOMS=m
+CONFIG_USB_GSPCA_FINEPIX=m
+CONFIG_USB_GSPCA_JEILINJ=m
+CONFIG_USB_GSPCA_JL2005BCD=m
+CONFIG_USB_GSPCA_KINECT=m
+CONFIG_USB_GSPCA_KONICA=m
+CONFIG_USB_GSPCA_MARS=m
+CONFIG_USB_GSPCA_MR97310A=m
+CONFIG_USB_GSPCA_NW80X=m
+CONFIG_USB_GSPCA_OV519=m
+CONFIG_USB_GSPCA_OV534=m
+CONFIG_USB_GSPCA_OV534_9=m
+CONFIG_USB_GSPCA_PAC207=m
+CONFIG_USB_GSPCA_PAC7302=m
+CONFIG_USB_GSPCA_PAC7311=m
+CONFIG_USB_GSPCA_SE401=m
+CONFIG_USB_GSPCA_SN9C2028=m
+CONFIG_USB_GSPCA_SN9C20X=m
+CONFIG_USB_GSPCA_SONIXB=m
+CONFIG_USB_GSPCA_SONIXJ=m
+CONFIG_USB_GSPCA_SPCA500=m
+CONFIG_USB_GSPCA_SPCA501=m
+CONFIG_USB_GSPCA_SPCA505=m
+CONFIG_USB_GSPCA_SPCA506=m
+CONFIG_USB_GSPCA_SPCA508=m
+CONFIG_USB_GSPCA_SPCA561=m
+CONFIG_USB_GSPCA_SPCA1528=m
+CONFIG_USB_GSPCA_SQ905=m
+CONFIG_USB_GSPCA_SQ905C=m
+CONFIG_USB_GSPCA_SQ930X=m
+CONFIG_USB_GSPCA_STK014=m
+CONFIG_USB_GSPCA_STV0680=m
+CONFIG_USB_GSPCA_SUNPLUS=m
+CONFIG_USB_GSPCA_T613=m
+CONFIG_USB_GSPCA_TOPRO=m
+CONFIG_USB_GSPCA_TV8532=m
+CONFIG_USB_GSPCA_VC032X=m
+CONFIG_USB_GSPCA_VICAM=m
+CONFIG_USB_GSPCA_XIRLINK_CIT=m
+CONFIG_USB_GSPCA_ZC3XX=m
+CONFIG_VIDEO_PVRUSB2=m
+CONFIG_VIDEO_PVRUSB2_SYSFS=y
+CONFIG_VIDEO_PVRUSB2_DVB=y
+CONFIG_VIDEO_HDPVR=m
+CONFIG_VIDEO_EM28XX=m
+CONFIG_VIDEO_EM28XX_ALSA=m
+CONFIG_VIDEO_EM28XX_DVB=m
+CONFIG_VIDEO_EM28XX_RC=y
+CONFIG_VIDEO_TLG2300=m
+CONFIG_VIDEO_CX231XX=m
+CONFIG_VIDEO_CX231XX_RC=y
+CONFIG_VIDEO_CX231XX_ALSA=m
+CONFIG_VIDEO_CX231XX_DVB=m
+CONFIG_VIDEO_TM6000=m
+CONFIG_VIDEO_TM6000_ALSA=m
+CONFIG_VIDEO_TM6000_DVB=m
+CONFIG_VIDEO_USBVISION=m
+CONFIG_USB_PWC=m
+CONFIG_USB_PWC_INPUT_EVDEV=y
+CONFIG_VIDEO_CPIA2=m
+CONFIG_USB_ZR364XX=m
+CONFIG_USB_STKWEBCAM=m
+CONFIG_USB_S2255=m
+CONFIG_V4L_ISA_PARPORT_DRIVERS=y
+CONFIG_VIDEO_BWQCAM=m
+CONFIG_VIDEO_CQCAM=m
+CONFIG_VIDEO_W9966=m
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_VIDEO_TIMBERDALE=m
+CONFIG_SOC_CAMERA=m
+CONFIG_SOC_CAMERA_PLATFORM=m
+CONFIG_VIDEO_SH_MOBILE_CSI2=m
+CONFIG_VIDEO_SH_MOBILE_CEU=m
+CONFIG_V4L_MEM2MEM_DRIVERS=y
+CONFIG_VIDEO_MEM2MEM_TESTDEV=m
+CONFIG_RADIO_ADAPTERS=y
+CONFIG_RADIO_SI470X=y
+CONFIG_USB_SI470X=m
+CONFIG_I2C_SI470X=m
+CONFIG_USB_MR800=m
+CONFIG_USB_DSBR=m
+CONFIG_I2C_SI4713=m
+CONFIG_RADIO_SI4713=m
+CONFIG_USB_KEENE=m
+CONFIG_RADIO_WL1273=m
+CONFIG_RADIO_WL128X=m
+CONFIG_DVB_MAX_ADAPTERS=8
+CONFIG_DVB_DYNAMIC_MINORS=y
+CONFIG_DVB_CAPTURE_DRIVERS=y
+CONFIG_TTPCI_EEPROM=m
+CONFIG_DVB_USB=m
+CONFIG_SMS_SIANO_MDTV=m
+CONFIG_SMS_USB_DRV=m
+CONFIG_SMS_SDIO_DRV=m
+CONFIG_DVB_B2C2_FLEXCOP=m
+CONFIG_DVB_B2C2_FLEXCOP_USB=m
+CONFIG_DVB_FE_CUSTOMISE=y
+CONFIG_DVB_PLL=m
+CONFIG_DRM=m
+CONFIG_DRM_USB=m
+CONFIG_DRM_KMS_HELPER=m
+CONFIG_DRM_LOAD_EDID_FIRMWARE=y
+CONFIG_DRM_I2C_CH7006=m
+CONFIG_DRM_I2C_SIL164=m
+CONFIG_DRM_UDL=m
+CONFIG_VIDEO_OUTPUT_CONTROL=m
+CONFIG_FB_SYS_FILLRECT=m
+CONFIG_FB_SYS_COPYAREA=m
+CONFIG_FB_SYS_IMAGEBLIT=m
+CONFIG_FB_SYS_FOPS=m
+CONFIG_FB_DEFERRED_IO=y
+CONFIG_FB_UVESA=m
+CONFIG_FB_S1D13XXX=m
+CONFIG_FB_TMIO=m
+CONFIG_FB_TMIO_ACCELL=y
+CONFIG_FB_SM501=m
+CONFIG_FB_SMSCUFX=m
+CONFIG_FB_UDL=m
+CONFIG_FB_METRONOME=m
+CONFIG_FB_BROADSHEET=m
+CONFIG_PANEL_LGPHILIPS_LB035Q02=m
+CONFIG_PANEL_SHARP_LS037V7DW01=y
+CONFIG_PANEL_NEC_NL8048HL11_01B=m
+CONFIG_PANEL_PICODLP=m
+CONFIG_PANEL_TPO_TD043MTEA1=y
+CONFIG_LCD_L4F00242T03=m
+CONFIG_LCD_LMS283GF05=m
+CONFIG_LCD_LTV350QV=m
+CONFIG_LCD_ILI9320=m
+CONFIG_LCD_TDO24M=m
+CONFIG_LCD_VGG2432A4=m
+CONFIG_LCD_S6E63M0=m
+CONFIG_LCD_LD9040=m
+CONFIG_LCD_AMS369FG06=m
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_BACKLIGHT_ATMEL_PWM=m
+CONFIG_BACKLIGHT_GENERIC=m
+CONFIG_BACKLIGHT_PWM=m
+CONFIG_DUMMY_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
+CONFIG_FONTS=y
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+CONFIG_FONT_ACORN_8x8=y
+CONFIG_SOUND_OSS_CORE=y
+CONFIG_SOUND_OSS_CORE_PRECLAIM=y
+CONFIG_SND_JACK=y
+CONFIG_SND_SEQUENCER=m
+CONFIG_SND_SEQ_DUMMY=m
+CONFIG_SND_OSSEMUL=y
+CONFIG_SND_PCM_OSS_PLUGINS=y
+CONFIG_SND_HRTIMER=m
+CONFIG_SND_SEQ_HRTIMER_DEFAULT=y
+CONFIG_SND_DYNAMIC_MINORS=y
+CONFIG_SND_SUPPORT_OLD_API=y
+CONFIG_SND_VERBOSE_PROCFS=y
+CONFIG_SND_VMASTER=y
+CONFIG_SND_RAWMIDI_SEQ=m
+CONFIG_SND_MPU401_UART=m
+CONFIG_SND_DRIVERS=y
+CONFIG_SND_DUMMY=m
+CONFIG_SND_ALOOP=m
+CONFIG_SND_VIRMIDI=m
+CONFIG_SND_MTPAV=m
+CONFIG_SND_MTS64=m
+CONFIG_SND_SERIAL_U16550=m
+CONFIG_SND_MPU401=m
+CONFIG_SND_PORTMAN2X4=m
+CONFIG_SND_SPI=y
+CONFIG_SND_USB=y
+CONFIG_SND_USB_UA101=m
+CONFIG_SND_USB_CAIAQ=m
+CONFIG_SND_USB_CAIAQ_INPUT=y
+CONFIG_SND_USB_6FIRE=m
+CONFIG_HID_SUPPORT=y
+CONFIG_HID=m
+CONFIG_HIDRAW=y
+CONFIG_USB_HID=m
+CONFIG_HID_PID=y
+CONFIG_USB_HIDDEV=y
+CONFIG_USB_KBD=m
+CONFIG_USB_MOUSE=m
+CONFIG_HID_A4TECH=m
+CONFIG_HID_ACRUX=m
+CONFIG_HID_ACRUX_FF=y
+CONFIG_HID_APPLE=m
+CONFIG_HID_BELKIN=m
+CONFIG_HID_CHERRY=m
+CONFIG_HID_CHICONY=m
+CONFIG_HID_PRODIKEYS=m
+CONFIG_HID_CYPRESS=m
+CONFIG_HID_DRAGONRISE=m
+CONFIG_DRAGONRISE_FF=y
+CONFIG_HID_EMS_FF=m
+CONFIG_HID_ELECOM=m
+CONFIG_HID_EZKEY=m
+CONFIG_HID_HOLTEK=m
+CONFIG_HOLTEK_FF=y
+CONFIG_HID_KEYTOUCH=m
+CONFIG_HID_KYE=m
+CONFIG_HID_UCLOGIC=m
+CONFIG_HID_WALTOP=m
+CONFIG_HID_GYRATION=m
+CONFIG_HID_TWINHAN=m
+CONFIG_HID_KENSINGTON=m
+CONFIG_HID_LCPOWER=m
+CONFIG_HID_LOGITECH=m
+CONFIG_HID_LOGITECH_DJ=m
+CONFIG_LOGITECH_FF=y
+CONFIG_LOGIRUMBLEPAD2_FF=y
+CONFIG_LOGIG940_FF=y
+CONFIG_LOGIWHEELS_FF=y
+CONFIG_HID_MAGICMOUSE=m
+CONFIG_HID_MICROSOFT=m
+CONFIG_HID_MONTEREY=m
+CONFIG_HID_MULTITOUCH=m
+CONFIG_HID_NTRIG=m
+CONFIG_HID_ORTEK=m
+CONFIG_HID_PANTHERLORD=m
+CONFIG_PANTHERLORD_FF=y
+CONFIG_HID_PETALYNX=m
+CONFIG_HID_PICOLCD=m
+CONFIG_HID_PICOLCD_FB=y
+CONFIG_HID_PICOLCD_BACKLIGHT=y
+CONFIG_HID_PICOLCD_LCD=y
+CONFIG_HID_PICOLCD_LEDS=y
+CONFIG_HID_PRIMAX=m
+CONFIG_HID_ROCCAT=m
+CONFIG_HID_SAITEK=m
+CONFIG_HID_SAMSUNG=m
+CONFIG_HID_SONY=m
+CONFIG_HID_SPEEDLINK=m
+CONFIG_HID_SUNPLUS=m
+CONFIG_HID_GREENASIA=m
+CONFIG_GREENASIA_FF=y
+CONFIG_HID_SMARTJOYPLUS=m
+CONFIG_SMARTJOYPLUS_FF=y
+CONFIG_HID_TIVO=m
+CONFIG_HID_TOPSEED=m
+CONFIG_HID_THRUSTMASTER=m
+CONFIG_THRUSTMASTER_FF=y
+CONFIG_HID_WACOM=m
+CONFIG_HID_WACOM_POWER_SUPPLY=y
+CONFIG_HID_WIIMOTE=m
+CONFIG_HID_WIIMOTE_EXT=y
+CONFIG_HID_ZEROPLUS=m
+CONFIG_ZEROPLUS_FF=y
+CONFIG_HID_ZYDACRON=m
+CONFIG_USB_SUPPORT=y
+CONFIG_USB_SUSPEND=y
+CONFIG_USB_COMMON=y
+CONFIG_USB_OTG=y
+CONFIG_USB_WUSB_CBAF=m
+CONFIG_USB_C67X00_HCD=m
+CONFIG_USB_OXU210HP_HCD=m
+CONFIG_USB_ISP116X_HCD=m
+CONFIG_USB_ISP1760_HCD=m
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_LITTLE_ENDIAN=y
+CONFIG_USB_U132_HCD=m
+CONFIG_USB_SL811_HCD=m
+CONFIG_USB_SL811_HCD_ISO=y
+CONFIG_USB_R8A66597_HCD=m
+CONFIG_USB_RENESAS_USBHS_HCD=m
+CONFIG_USB_RENESAS_USBHS=m
+CONFIG_USB_ACM=m
+CONFIG_USB_PRINTER=m
+CONFIG_USB_TMC=m
+CONFIG_USB_STORAGE_REALTEK=m
+CONFIG_REALTEK_AUTOPM=y
+CONFIG_USB_STORAGE_DATAFAB=m
+CONFIG_USB_STORAGE_FREECOM=m
+CONFIG_USB_STORAGE_ISD200=m
+CONFIG_USB_STORAGE_USBAT=m
+CONFIG_USB_STORAGE_SDDR09=m
+CONFIG_USB_STORAGE_SDDR55=m
+CONFIG_USB_STORAGE_JUMPSHOT=m
+CONFIG_USB_STORAGE_ALAUDA=m
+CONFIG_USB_STORAGE_ONETOUCH=m
+CONFIG_USB_STORAGE_KARMA=m
+CONFIG_USB_STORAGE_CYPRESS_ATACB=m
+CONFIG_USB_STORAGE_ENE_UB6250=m
+CONFIG_USB_UAS=m
+CONFIG_USB_MDC800=m
+CONFIG_USB_MICROTEK=m
+CONFIG_USB_USS720=m
+CONFIG_USB_SERIAL=m
+CONFIG_USB_EZUSB=y
+CONFIG_USB_SERIAL_GENERIC=y
+CONFIG_USB_SERIAL_CP210X=m
+CONFIG_USB_SERIAL_PL2303=m
+CONFIG_USB_SERIAL_QUALCOMM=m
+CONFIG_USB_SERIAL_SPCP8X5=m
+CONFIG_USB_SERIAL_HP4X=m
+CONFIG_USB_SERIAL_TI=m
+CONFIG_USB_SERIAL_DEBUG=m
+CONFIG_USB_EMI62=m
+CONFIG_USB_EMI26=m
+CONFIG_USB_ADUTUX=m
+CONFIG_USB_SEVSEG=m
+CONFIG_USB_RIO500=m
+CONFIG_USB_LEGOTOWER=m
+CONFIG_USB_LCD=m
+CONFIG_USB_LED=m
+CONFIG_USB_CYPRESS_CY7C63=m
+CONFIG_USB_CYTHERM=m
+CONFIG_USB_IDMOUSE=m
+CONFIG_USB_FTDI_ELAN=m
+CONFIG_USB_APPLEDISPLAY=m
+CONFIG_USB_SISUSBVGA=m
+CONFIG_USB_LD=m
+CONFIG_USB_TRANCEVIBRATOR=m
+CONFIG_USB_IOWARRIOR=m
+CONFIG_USB_ISIGHTFW=m
+CONFIG_USB_YUREX=m
+CONFIG_USB_ATM=m
+CONFIG_USB_SPEEDTOUCH=m
+CONFIG_USB_CXACRU=m
+CONFIG_USB_UEAGLEATM=m
+CONFIG_USB_XUSBATM=m
+CONFIG_USB_GADGET_VBUS_DRAW=2
+CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS=2
+CONFIG_USB_ZERO=m
+CONFIG_USB_AUDIO=m
+CONFIG_GADGET_UAC1=y
+CONFIG_USB_ETH=m
+CONFIG_USB_ETH_RNDIS=y
+CONFIG_USB_G_NCM=m
+CONFIG_USB_GADGETFS=m
+CONFIG_USB_FUNCTIONFS=m
+CONFIG_USB_FUNCTIONFS_ETH=y
+CONFIG_USB_FUNCTIONFS_RNDIS=y
+CONFIG_USB_FUNCTIONFS_GENERIC=y
+CONFIG_USB_MASS_STORAGE=m
+CONFIG_USB_G_SERIAL=m
+CONFIG_USB_MIDI_GADGET=m
+CONFIG_USB_G_PRINTER=m
+CONFIG_USB_CDC_COMPOSITE=m
+CONFIG_USB_G_NOKIA=m
+CONFIG_USB_G_ACM_MS=m
+CONFIG_USB_G_MULTI=m
+CONFIG_USB_G_MULTI_RNDIS=y
+CONFIG_USB_G_MULTI_CDC=y
+CONFIG_USB_G_HID=m
+CONFIG_USB_G_DBGP=m
+CONFIG_USB_G_DBGP_SERIAL=y
+CONFIG_USB_G_WEBCAM=m
+CONFIG_USB_OTG_UTILS=y
+CONFIG_USB_GPIO_VBUS=y
+CONFIG_USB_ULPI=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_MMC_BLOCK=y
+CONFIG_MMC_BLOCK_MINORS=8
+CONFIG_MMC_BLOCK_BOUNCE=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=m
+CONFIG_MMC_SDHCI_PXAV3=m
+CONFIG_MMC_SDHCI_PXAV2=m
+CONFIG_MMC_SPI=m
+CONFIG_MMC_TMIO_CORE=m
+CONFIG_MMC_TMIO=m
+CONFIG_MMC_DW=m
+CONFIG_MMC_DW_PLTFM=m
+CONFIG_MMC_VUB300=m
+CONFIG_MMC_USHC=m
+CONFIG_MEMSTICK=m
+CONFIG_MSPRO_BLOCK=m
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_PCA9532=m
+CONFIG_LEDS_PCA9532_GPIO=y
+CONFIG_LEDS_GPIO=m
+CONFIG_LEDS_LP3944=m
+CONFIG_LEDS_LP5521=m
+CONFIG_LEDS_LP5523=m
+CONFIG_LEDS_PWM=m
+CONFIG_LEDS_REGULATOR=m
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=m
+CONFIG_LEDS_TRIGGER_HEARTBEAT=m
+CONFIG_LEDS_TRIGGER_BACKLIGHT=m
+CONFIG_LEDS_TRIGGER_GPIO=m
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=m
+CONFIG_RTC_DRV_CMOS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+CONFIG_DMADEVICES=y
+CONFIG_DW_DMAC=m
+CONFIG_TIMB_DMA=m
+CONFIG_DMA_ENGINE=y
+CONFIG_NET_DMA=y
+CONFIG_ASYNC_TX_DMA=y
+CONFIG_AUXDISPLAY=y
+CONFIG_UIO=m
+CONFIG_UIO_PDRV=m
+CONFIG_UIO_PDRV_GENIRQ=m
+CONFIG_STAGING=y
+CONFIG_USBIP_CORE=m
+CONFIG_USBIP_VHCI_HCD=m
+CONFIG_USBIP_HOST=m
+CONFIG_W35UND=m
+CONFIG_PRISM2_USB=m
+CONFIG_ECHO=m
+CONFIG_ASUS_OLED=m
+CONFIG_PANEL=m
+CONFIG_PANEL_PARPORT=0
+CONFIG_PANEL_PROFILE=5
+CONFIG_RTLLIB=m
+CONFIG_RTLLIB_CRYPTO_CCMP=m
+CONFIG_RTLLIB_CRYPTO_TKIP=m
+CONFIG_RTLLIB_CRYPTO_WEP=m
+CONFIG_R8712U=m
+CONFIG_RTS5139=m
+CONFIG_TRANZPORT=m
+CONFIG_LINE6_USB=m
+CONFIG_USB_SERIAL_QUATECH2=m
+CONFIG_USB_SERIAL_QUATECH_USB2=m
+CONFIG_IIO=m
+CONFIG_IIO_ST_HWMON=m
+CONFIG_IIO_BUFFER=y
+CONFIG_IIO_SW_RING=m
+CONFIG_IIO_KFIFO_BUF=m
+CONFIG_IIO_TRIGGER=y
+CONFIG_IIO_CONSUMERS_PER_TRIGGER=2
+CONFIG_KXSD9=m
+CONFIG_SCA3000=m
+CONFIG_IIO_PERIODIC_RTC_TRIGGER=m
+CONFIG_IIO_GPIO_TRIGGER=m
+CONFIG_IIO_SYSFS_TRIGGER=m
+CONFIG_IIO_SIMPLE_DUMMY=m
+CONFIG_FB_SM7XX=m
+CONFIG_USB_ENESTORAGE=m
+CONFIG_BCM_WIMAX=m
+CONFIG_FT1000=m
+CONFIG_FT1000_USB=m
+CONFIG_SPEAKUP=m
+CONFIG_SPEAKUP_SYNTH_SPKOUT=m
+CONFIG_SPEAKUP_SYNTH_TXPRT=m
+CONFIG_SPEAKUP_SYNTH_DUMMY=m
+CONFIG_STAGING_MEDIA=y
+CONFIG_DVB_AS102=m
+CONFIG_EASYCAP=m
+CONFIG_LIRC_STAGING=y
+CONFIG_LIRC_IGORPLUGUSB=m
+CONFIG_LIRC_IMON=m
+CONFIG_LIRC_PARALLEL=m
+CONFIG_LIRC_SASEM=m
+CONFIG_LIRC_SERIAL=m
+CONFIG_LIRC_SIR=m
+CONFIG_LIRC_TTUSBIR=m
+CONFIG_LIRC_ZILOG=m
+CONFIG_PHONE=m
+CONFIG_USB_WPAN_HCD=m
+CONFIG_CLKDEV_LOOKUP=y
+CONFIG_IOMMU_SUPPORT=y
+CONFIG_VIRT_DRIVERS=y
+CONFIG_PM_DEVFREQ=y
+CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
+CONFIG_DEVFREQ_GOV_PERFORMANCE=y
+CONFIG_DEVFREQ_GOV_POWERSAVE=y
+CONFIG_DEVFREQ_GOV_USERSPACE=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT3_DEFAULTS_TO_ORDERED=y
+CONFIG_EXT3_FS_XATTR=y
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_EXT4_FS_XATTR=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_JBD=y
+CONFIG_JBD2=y
+CONFIG_FS_MBCACHE=y
+CONFIG_REISERFS_FS=m
+CONFIG_REISERFS_FS_XATTR=y
+CONFIG_REISERFS_FS_POSIX_ACL=y
+CONFIG_REISERFS_FS_SECURITY=y
+CONFIG_JFS_FS=m
+CONFIG_JFS_POSIX_ACL=y
+CONFIG_JFS_SECURITY=y
+CONFIG_JFS_STATISTICS=y
+CONFIG_XFS_FS=m
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_XFS_RT=y
+CONFIG_GFS2_FS=m
+CONFIG_GFS2_FS_LOCKING_DLM=y
+CONFIG_OCFS2_FS=m
+CONFIG_OCFS2_FS_O2CB=m
+CONFIG_OCFS2_FS_USERSPACE_CLUSTER=m
+CONFIG_OCFS2_FS_STATS=y
+CONFIG_OCFS2_DEBUG_MASKLOG=y
+CONFIG_NILFS2_FS=m
+CONFIG_FS_POSIX_ACL=y
+CONFIG_EXPORTFS=y
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
+CONFIG_DNOTIFY=y
+CONFIG_INOTIFY_USER=y
+CONFIG_FANOTIFY=y
+CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_PRINT_QUOTA_WARNING=y
+CONFIG_QFMT_V1=m
+CONFIG_QUOTACTL=y
+CONFIG_AUTOFS4_FS=m
+CONFIG_FUSE_FS=y
+CONFIG_CUSE=m
+CONFIG_GENERIC_ACL=y
+CONFIG_FSCACHE=m
+CONFIG_FSCACHE_STATS=y
+CONFIG_FSCACHE_HISTOGRAM=y
+CONFIG_CACHEFILES=m
+CONFIG_ISO9660_FS=m
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=m
+CONFIG_UDF_NLS=y
+CONFIG_FAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+CONFIG_NTFS_FS=m
+CONFIG_PROC_FS=y
+CONFIG_PROC_VMCORE=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_TMPFS_XATTR=y
+CONFIG_CONFIGFS_FS=m
+CONFIG_MISC_FILESYSTEMS=y
+CONFIG_ADFS_FS=m
+CONFIG_AFFS_FS=m
+CONFIG_HFS_FS=m
+CONFIG_HFSPLUS_FS=m
+CONFIG_BEFS_FS=m
+CONFIG_BFS_FS=m
+CONFIG_EFS_FS=m
+CONFIG_JFFS2_FS_DEBUG=0
+CONFIG_JFFS2_FS_WRITEBUFFER=y
+CONFIG_JFFS2_ZLIB=y
+CONFIG_JFFS2_RTIME=y
+CONFIG_JFFS2_CMODE_FAVOURLZO=y
+CONFIG_SQUASHFS=m
+CONFIG_SQUASHFS_XATTR=y
+CONFIG_SQUASHFS_ZLIB=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_SQUASHFS_XZ=y
+CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3
+CONFIG_VXFS_FS=m
+CONFIG_MINIX_FS=m
+CONFIG_OMFS_FS=m
+CONFIG_HPFS_FS=m
+CONFIG_QNX4FS_FS=m
+CONFIG_QNX6FS_FS=m
+CONFIG_ROMFS_FS=m
+CONFIG_ROMFS_BACKED_BY_BLOCK=y
+CONFIG_ROMFS_ON_BLOCK=y
+CONFIG_PSTORE=y
+CONFIG_SYSV_FS=m
+CONFIG_UFS_FS=m
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=m
+CONFIG_NFS_V3=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_FSCACHE=y
+CONFIG_NFS_USE_KERNEL_DNS=y
+CONFIG_NFSD=m
+CONFIG_NFSD_V2_ACL=y
+CONFIG_NFSD_V3=y
+CONFIG_NFSD_V3_ACL=y
+CONFIG_NFSD_V4=y
+CONFIG_LOCKD=m
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_ACL_SUPPORT=m
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=m
+CONFIG_SUNRPC_GSS=m
+CONFIG_RPCSEC_GSS_KRB5=m
+CONFIG_CEPH_FS=m
+CONFIG_CIFS=m
+CONFIG_CIFS_WEAK_PW_HASH=y
+CONFIG_CIFS_UPCALL=y
+CONFIG_CIFS_XATTR=y
+CONFIG_CIFS_POSIX=y
+CONFIG_CIFS_DFS_UPCALL=y
+CONFIG_NCP_FS=m
+CONFIG_NCPFS_PACKET_SIGNING=y
+CONFIG_NCPFS_IOCTL_LOCKING=y
+CONFIG_NCPFS_STRONG=y
+CONFIG_NCPFS_NFS_NS=y
+CONFIG_NCPFS_OS2_NS=y
+CONFIG_NCPFS_NLS=y
+CONFIG_NCPFS_EXTRAS=y
+# CONFIG_CODA_FS is not set
+CONFIG_AFS_FS=m
+CONFIG_9P_FS=m
+CONFIG_9P_FS_POSIX_ACL=y
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_UTF8=m
+CONFIG_DLM=m
+CONFIG_DEFAULT_MESSAGE_LOGLEVEL=4
+CONFIG_FRAME_WARN=1024
+CONFIG_UNUSED_SYMBOLS=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_LOCKUP_DETECTOR=y
+CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE=0
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
+CONFIG_SCHED_DEBUG=y
+CONFIG_STACKTRACE=y
+CONFIG_DEBUG_BUGVERBOSE=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_BOOT_PRINTK_DELAY=y
+CONFIG_NOP_TRACER=y
+CONFIG_RING_BUFFER=y
+CONFIG_EVENT_TRACING=y
+CONFIG_EVENT_POWER_TRACING_DEPRECATED=y
+CONFIG_CONTEXT_SWITCH_TRACER=y
+CONFIG_TRACING=y
+CONFIG_TRACING_SUPPORT=y
+CONFIG_FTRACE=y
+CONFIG_BRANCH_PROFILE_NONE=y
+CONFIG_KPROBE_EVENT=y
+CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_KGDB=y
+CONFIG_KGDB_SERIAL_CONSOLE=y
+CONFIG_KGDB_KDB=y
+CONFIG_KDB_KEYBOARD=y
+CONFIG_TEST_KSTRTOX=m
+CONFIG_STRICT_DEVMEM=y
+CONFIG_DEBUG_USER=y
+CONFIG_DEBUG_LL=y
+CONFIG_DEBUG_LL_UART_NONE=y
+CONFIG_EARLY_PRINTK=y
+CONFIG_ENCRYPTED_KEYS=y
+CONFIG_SECURITY=y
+CONFIG_SECURITYFS=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_SECURITY_PATH=y
+CONFIG_LSM_MMAP_MIN_ADDR=0
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SELINUX_BOOTPARAM=y
+CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
+CONFIG_SECURITY_SELINUX_DISABLE=y
+CONFIG_SECURITY_SELINUX_DEVELOP=y
+CONFIG_SECURITY_SELINUX_AVC_STATS=y
+CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1
+CONFIG_SECURITY_SMACK=y
+CONFIG_SECURITY_TOMOYO=y
+CONFIG_SECURITY_TOMOYO_MAX_ACCEPT_ENTRY=2048
+CONFIG_SECURITY_TOMOYO_MAX_AUDIT_LOG=1024
+CONFIG_SECURITY_TOMOYO_POLICY_LOADER="/sbin/tomoyo-init"
+CONFIG_SECURITY_TOMOYO_ACTIVATION_TRIGGER="/sbin/init"
+CONFIG_SECURITY_APPARMOR=y
+CONFIG_SECURITY_APPARMOR_BOOTPARAM_VALUE=1
+CONFIG_SECURITY_YAMA=y
+CONFIG_INTEGRITY=y
+CONFIG_INTEGRITY_SIGNATURE=y
+CONFIG_EVM=y
+CONFIG_DEFAULT_SECURITY_APPARMOR=y
+CONFIG_DEFAULT_SECURITY="apparmor"
+CONFIG_XOR_BLOCKS=m
+CONFIG_ASYNC_CORE=m
+CONFIG_ASYNC_MEMCPY=m
+CONFIG_ASYNC_XOR=m
+CONFIG_ASYNC_PQ=m
+CONFIG_ASYNC_RAID6_RECOV=m
+CONFIG_CRYPTO=y
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
+CONFIG_CRYPTO_AEAD=m
+CONFIG_CRYPTO_AEAD2=y
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_BLKCIPHER2=y
+CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG=y
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_PCOMP=m
+CONFIG_CRYPTO_PCOMP2=y
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
+CONFIG_CRYPTO_USER=m
+CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y
+CONFIG_CRYPTO_GF128MUL=m
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_WORKQUEUE=y
+CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_AUTHENC=m
+CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_CCM=m
+CONFIG_CRYPTO_GCM=m
+CONFIG_CRYPTO_SEQIV=m
+CONFIG_CRYPTO_CBC=y
+CONFIG_CRYPTO_CTR=m
+CONFIG_CRYPTO_CTS=m
+CONFIG_CRYPTO_ECB=y
+CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_XTS=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_XCBC=m
+CONFIG_CRYPTO_VMAC=m
+CONFIG_CRYPTO_CRC32C=y
+CONFIG_CRYPTO_GHASH=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_RMD128=m
+CONFIG_CRYPTO_RMD160=m
+CONFIG_CRYPTO_RMD256=m
+CONFIG_CRYPTO_RMD320=m
+CONFIG_CRYPTO_SHA1=y
+CONFIG_CRYPTO_SHA256=y
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES=y
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_BLOWFISH_COMMON=m
+CONFIG_CRYPTO_CAMELLIA=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_DES=m
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SALSA20=m
+CONFIG_CRYPTO_SEED=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_TWOFISH_COMMON=m
+CONFIG_CRYPTO_DEFLATE=m
+CONFIG_CRYPTO_ZLIB=m
+CONFIG_CRYPTO_LZO=m
+CONFIG_CRYPTO_ANSI_CPRNG=m
+CONFIG_CRYPTO_USER_API=m
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_CRYPTO_USER_API_SKCIPHER=m
+CONFIG_CRYPTO_HW=y
+CONFIG_BINARY_PRINTF=y
+CONFIG_RAID6_PQ=m
+CONFIG_BITREVERSE=y
+CONFIG_GENERIC_IO=y
+CONFIG_CRC16=y
+CONFIG_CRC32=y
+CONFIG_CRC32_SLICEBY8=y
+CONFIG_CRC8=m
+CONFIG_AUDIT_GENERIC=y
+CONFIG_ZLIB_INFLATE=y
+CONFIG_LZO_COMPRESS=y
+CONFIG_LZO_DECOMPRESS=y
+CONFIG_XZ_DEC=y
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+CONFIG_XZ_DEC_BCJ=y
+CONFIG_XZ_DEC_TEST=m
+CONFIG_DECOMPRESS_GZIP=y
+CONFIG_DECOMPRESS_BZIP2=y
+CONFIG_DECOMPRESS_LZMA=y
+CONFIG_DECOMPRESS_XZ=y
+CONFIG_DECOMPRESS_LZO=y
+CONFIG_REED_SOLOMON=y
+CONFIG_REED_SOLOMON_ENC8=y
+CONFIG_REED_SOLOMON_DEC8=y
+CONFIG_REED_SOLOMON_DEC16=y
+CONFIG_BCH=y
+CONFIG_BCH_CONST_PARAMS=y
+CONFIG_TEXTSEARCH=y
+CONFIG_TEXTSEARCH_KMP=m
+CONFIG_TEXTSEARCH_BM=m
+CONFIG_TEXTSEARCH_FSM=m
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
+CONFIG_DQL=y
+CONFIG_NLATTR=y
+CONFIG_LRU_CACHE=m
+CONFIG_AVERAGE=y
+CONFIG_CLZ_TAB=y
+CONFIG_CORDIC=m
+CONFIG_MPILIB=y
+CONFIG_SIGNATURE=y
diff --git a/linaro/configs/vexpress-tuning.conf b/linaro/configs/vexpress-tuning.conf
new file mode 100644
index 00000000000..adea6cc66de
--- /dev/null
+++ b/linaro/configs/vexpress-tuning.conf
@@ -0,0 +1 @@
+# CONFIG_PROVE_LOCKING is not set
diff --git a/linaro/configs/vexpress.conf b/linaro/configs/vexpress.conf
new file mode 100644
index 00000000000..94ed8d8729b
--- /dev/null
+++ b/linaro/configs/vexpress.conf
@@ -0,0 +1,59 @@
+CONFIG_ARCH_VEXPRESS=y
+CONFIG_ARCH_VEXPRESS_CA9X4=y
+CONFIG_BIG_LITTLE=y
+CONFIG_ARCH_VEXPRESS_TC2=y
+CONFIG_ARCH_VEXPRESS_DCSCB=y
+CONFIG_ARM_VEXPRESS_BL_CPUFREQ=y
+CONFIG_ARM_PSCI=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
+CONFIG_CPU_FREQ_GOV_INTERACTIVE=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y
+CONFIG_HAVE_ARM_ARCH_TIMER=y
+CONFIG_NR_CPUS=8
+CONFIG_HIGHMEM=y
+CONFIG_HIGHPTE=y
+CONFIG_CMDLINE="console=ttyAMA0,38400n8 root=/dev/mmcblk0p2 rootwait mmci.fmax=4000000"
+CONFIG_VFP=y
+CONFIG_NEON=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_SMSC911X=y
+CONFIG_SMC91X=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_SERIO_AMBAKMI=y
+CONFIG_SERIAL_AMBA_PL011=y
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+CONFIG_FB=y
+CONFIG_FB_ARMCLCD=y
+CONFIG_FB_ARMHDLCD=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_ARMAACI=y
+CONFIG_USB=y
+CONFIG_USB_ISP1760_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_MMC=y
+CONFIG_MMC_ARMMMCI=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_PL031=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=y
+CONFIG_ROOT_NFS=y
+CONFIG_VEXPRESS_CONFIG=y
+CONFIG_SENSORS_VEXPRESS=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_VEXPRESS=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_CPU=y
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index e2f7f5aaaaf..a4510d49427 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2318,7 +2318,12 @@ static void collapse_huge_page(struct mm_struct *mm,
pte_unmap(pte);
spin_lock(&mm->page_table_lock);
BUG_ON(!pmd_none(*pmd));
- set_pmd_at(mm, address, pmd, _pmd);
+ /*
+ * We can only use set_pmd_at when establishing
+ * hugepmds and never for establishing regular pmds that
+ * points to regular pagetables. Use pmd_populate for that
+ */
+ pmd_populate(mm, pmd, pmd_pgtable(_pmd));
spin_unlock(&mm->page_table_lock);
anon_vma_unlock_write(vma->anon_vma);
goto out;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 2b552224f5c..9630d581ec0 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3991,8 +3991,6 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype,
if (mem_cgroup_disabled())
return NULL;
- VM_BUG_ON(PageSwapCache(page));
-
if (PageTransHuge(page)) {
nr_pages <<= compound_order(page);
VM_BUG_ON(!PageTransHuge(page));
@@ -4088,6 +4086,18 @@ void mem_cgroup_uncharge_page(struct page *page)
if (page_mapped(page))
return;
VM_BUG_ON(page->mapping && !PageAnon(page));
+ /*
+ * If the page is in swap cache, uncharge should be deferred
+ * to the swap path, which also properly accounts swap usage
+ * and handles memcg lifetime.
+ *
+ * Note that this check is not stable and reclaim may add the
+ * page to swap cache at any time after this. However, if the
+ * page is not in swap cache by the time page->mapcount hits
+ * 0, there won't be any page table references to the swap
+ * slot, and reclaim will free it and not actually write the
+ * page to disk.
+ */
if (PageSwapCache(page))
return;
__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_ANON, false);
diff --git a/mm/migrate.c b/mm/migrate.c
index 3bbaf5d230b..22ed5c165eb 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -165,7 +165,7 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
pte = arch_make_huge_pte(pte, vma, new, 0);
}
#endif
- flush_cache_page(vma, addr, pte_pfn(pte));
+ flush_dcache_page(new);
set_pte_at(mm, addr, ptep, pte);
if (PageHuge(new)) {
diff --git a/mm/mmap.c b/mm/mmap.c
index 0db0de1c2fb..0dceed852b5 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1327,15 +1327,24 @@ SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
file = fget(fd);
if (!file)
goto out;
+ if (is_file_hugepages(file))
+ len = ALIGN(len, huge_page_size(hstate_file(file)));
} else if (flags & MAP_HUGETLB) {
struct user_struct *user = NULL;
+ struct hstate *hs = hstate_sizelog((flags >> MAP_HUGE_SHIFT) &
+ SHM_HUGE_MASK);
+
+ if (!hs)
+ return -EINVAL;
+
+ len = ALIGN(len, huge_page_size(hs));
/*
* VM_NORESERVE is used because the reservations will be
* taken when vm_ops->mmap() is called
* A dummy user value is used because we are not locking
* memory so no accounting is necessary
*/
- file = hugetlb_file_setup(HUGETLB_ANON_FILE, addr, len,
+ file = hugetlb_file_setup(HUGETLB_ANON_FILE, len,
VM_NORESERVE,
&user, HUGETLB_ANONHUGE_INODE,
(flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK);
@@ -2305,7 +2314,7 @@ static void unmap_region(struct mm_struct *mm,
update_hiwater_rss(mm);
unmap_vmas(&tlb, vma, start, end);
free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
- next ? next->vm_start : 0);
+ next ? next->vm_start : USER_PGTABLES_CEILING);
tlb_finish_mmu(&tlb, start, end);
}
@@ -2685,7 +2694,7 @@ void exit_mmap(struct mm_struct *mm)
/* Use -1 here to ensure all VMAs in the mm are unmapped */
unmap_vmas(&tlb, vma, 0, -1);
- free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0);
+ free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING);
tlb_finish_mmu(&tlb, 0, -1);
/*
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
index be04122fb27..6725ff18337 100644
--- a/mm/mmu_notifier.c
+++ b/mm/mmu_notifier.c
@@ -40,48 +40,44 @@ void __mmu_notifier_release(struct mm_struct *mm)
int id;
/*
- * srcu_read_lock() here will block synchronize_srcu() in
- * mmu_notifier_unregister() until all registered
- * ->release() callouts this function makes have
- * returned.
+ * SRCU here will block mmu_notifier_unregister until
+ * ->release returns.
*/
id = srcu_read_lock(&srcu);
+ hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist)
+ /*
+ * If ->release runs before mmu_notifier_unregister it must be
+ * handled, as it's the only way for the driver to flush all
+ * existing sptes and stop the driver from establishing any more
+ * sptes before all the pages in the mm are freed.
+ */
+ if (mn->ops->release)
+ mn->ops->release(mn, mm);
+ srcu_read_unlock(&srcu, id);
+
spin_lock(&mm->mmu_notifier_mm->lock);
while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) {
mn = hlist_entry(mm->mmu_notifier_mm->list.first,
struct mmu_notifier,
hlist);
-
/*
- * Unlink. This will prevent mmu_notifier_unregister()
- * from also making the ->release() callout.
+ * We arrived before mmu_notifier_unregister so
+ * mmu_notifier_unregister will do nothing other than to wait
+ * for ->release to finish and for mmu_notifier_unregister to
+ * return.
*/
hlist_del_init_rcu(&mn->hlist);
- spin_unlock(&mm->mmu_notifier_mm->lock);
-
- /*
- * Clear sptes. (see 'release' description in mmu_notifier.h)
- */
- if (mn->ops->release)
- mn->ops->release(mn, mm);
-
- spin_lock(&mm->mmu_notifier_mm->lock);
}
spin_unlock(&mm->mmu_notifier_mm->lock);
/*
- * All callouts to ->release() which we have done are complete.
- * Allow synchronize_srcu() in mmu_notifier_unregister() to complete
- */
- srcu_read_unlock(&srcu, id);
-
- /*
- * mmu_notifier_unregister() may have unlinked a notifier and may
- * still be calling out to it. Additionally, other notifiers
- * may have been active via vmtruncate() et. al. Block here
- * to ensure that all notifier callouts for this mm have been
- * completed and the sptes are really cleaned up before returning
- * to exit_mmap().
+ * synchronize_srcu here prevents mmu_notifier_release from returning to
+ * exit_mmap (which would proceed with freeing all pages in the mm)
+ * until the ->release method returns, if it was invoked by
+ * mmu_notifier_unregister.
+ *
+ * The mmu_notifier_mm can't go away from under us because one mm_count
+ * is held by exit_mmap.
*/
synchronize_srcu(&srcu);
}
@@ -292,31 +288,34 @@ void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
{
BUG_ON(atomic_read(&mm->mm_count) <= 0);
- spin_lock(&mm->mmu_notifier_mm->lock);
if (!hlist_unhashed(&mn->hlist)) {
+ /*
+ * SRCU here will force exit_mmap to wait for ->release to
+ * finish before freeing the pages.
+ */
int id;
+ id = srcu_read_lock(&srcu);
/*
- * Ensure we synchronize up with __mmu_notifier_release().
+ * exit_mmap will block in mmu_notifier_release to guarantee
+ * that ->release is called before freeing the pages.
*/
- id = srcu_read_lock(&srcu);
-
- hlist_del_rcu(&mn->hlist);
- spin_unlock(&mm->mmu_notifier_mm->lock);
-
if (mn->ops->release)
mn->ops->release(mn, mm);
+ srcu_read_unlock(&srcu, id);
+ spin_lock(&mm->mmu_notifier_mm->lock);
/*
- * Allow __mmu_notifier_release() to complete.
+ * Can not use list_del_rcu() since __mmu_notifier_release
+ * can delete it before we hold the lock.
*/
- srcu_read_unlock(&srcu, id);
- } else
+ hlist_del_init_rcu(&mn->hlist);
spin_unlock(&mm->mmu_notifier_mm->lock);
+ }
/*
- * Wait for any running method to finish, including ->release() if it
- * was run by __mmu_notifier_release() instead of us.
+ * Wait for any running method to finish, of course including
+ * ->release if it was run by mmu_notifier_relase instead of us.
*/
synchronize_srcu(&srcu);
diff --git a/mm/page_io.c b/mm/page_io.c
index 78eee32ee48..61828703c4a 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -214,6 +214,7 @@ int swap_writepage(struct page *page, struct writeback_control *wbc)
kiocb.ki_left = PAGE_SIZE;
kiocb.ki_nbytes = PAGE_SIZE;
+ set_page_writeback(page);
unlock_page(page);
ret = mapping->a_ops->direct_IO(KERNEL_WRITE,
&kiocb, &iov,
@@ -222,7 +223,23 @@ int swap_writepage(struct page *page, struct writeback_control *wbc)
if (ret == PAGE_SIZE) {
count_vm_event(PSWPOUT);
ret = 0;
+ } else {
+ /*
+ * In the case of swap-over-nfs, this can be a
+ * temporary failure if the system has limited
+ * memory for allocating transmit buffers.
+ * Mark the page dirty and avoid
+ * rotate_reclaimable_page but rate-limit the
+ * messages but do not flag PageError like
+ * the normal direct-to-bio case as it could
+ * be temporary.
+ */
+ set_page_dirty(page);
+ ClearPageReclaim(page);
+ pr_err_ratelimited("Write error on dio swapfile (%Lu)\n",
+ page_file_offset(page));
}
+ end_page_writeback(page);
return ret;
}
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index 35aa294656c..5da2cbcfdbb 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -127,28 +127,7 @@ static int walk_hugetlb_range(struct vm_area_struct *vma,
return 0;
}
-static struct vm_area_struct* hugetlb_vma(unsigned long addr, struct mm_walk *walk)
-{
- struct vm_area_struct *vma;
-
- /* We don't need vma lookup at all. */
- if (!walk->hugetlb_entry)
- return NULL;
-
- VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem));
- vma = find_vma(walk->mm, addr);
- if (vma && vma->vm_start <= addr && is_vm_hugetlb_page(vma))
- return vma;
-
- return NULL;
-}
-
#else /* CONFIG_HUGETLB_PAGE */
-static struct vm_area_struct* hugetlb_vma(unsigned long addr, struct mm_walk *walk)
-{
- return NULL;
-}
-
static int walk_hugetlb_range(struct vm_area_struct *vma,
unsigned long addr, unsigned long end,
struct mm_walk *walk)
@@ -198,30 +177,53 @@ int walk_page_range(unsigned long addr, unsigned long end,
if (!walk->mm)
return -EINVAL;
+ VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem));
+
pgd = pgd_offset(walk->mm, addr);
do {
- struct vm_area_struct *vma;
+ struct vm_area_struct *vma = NULL;
next = pgd_addr_end(addr, end);
/*
- * handle hugetlb vma individually because pagetable walk for
- * the hugetlb page is dependent on the architecture and
- * we can't handled it in the same manner as non-huge pages.
+ * This function was not intended to be vma based.
+ * But there are vma special cases to be handled:
+ * - hugetlb vma's
+ * - VM_PFNMAP vma's
*/
- vma = hugetlb_vma(addr, walk);
+ vma = find_vma(walk->mm, addr);
if (vma) {
- if (vma->vm_end < next)
+ /*
+ * There are no page structures backing a VM_PFNMAP
+ * range, so do not allow split_huge_page_pmd().
+ */
+ if ((vma->vm_start <= addr) &&
+ (vma->vm_flags & VM_PFNMAP)) {
next = vma->vm_end;
+ pgd = pgd_offset(walk->mm, next);
+ continue;
+ }
/*
- * Hugepage is very tightly coupled with vma, so
- * walk through hugetlb entries within a given vma.
+ * Handle hugetlb vma individually because pagetable
+ * walk for the hugetlb page is dependent on the
+ * architecture and we can't handled it in the same
+ * manner as non-huge pages.
*/
- err = walk_hugetlb_range(vma, addr, next, walk);
- if (err)
- break;
- pgd = pgd_offset(walk->mm, next);
- continue;
+ if (walk->hugetlb_entry && (vma->vm_start <= addr) &&
+ is_vm_hugetlb_page(vma)) {
+ if (vma->vm_end < next)
+ next = vma->vm_end;
+ /*
+ * Hugepage is very tightly coupled with vma,
+ * so walk through hugetlb entries within a
+ * given vma.
+ */
+ err = walk_hugetlb_range(vma, addr, next, walk);
+ if (err)
+ break;
+ pgd = pgd_offset(walk->mm, next);
+ continue;
+ }
}
if (pgd_none_or_clear_bad(pgd)) {
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 19cf81bf9f6..63bd98cea2d 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -627,7 +627,7 @@ static netdev_features_t vlan_dev_fix_features(struct net_device *dev,
netdev_features_t features)
{
struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
- u32 old_features = features;
+ netdev_features_t old_features = features;
features &= real_dev->vlan_features;
features |= NETIF_F_RXCSUM;
diff --git a/net/bridge/br_stp_timer.c b/net/bridge/br_stp_timer.c
index c3530a81a33..950663d4d33 100644
--- a/net/bridge/br_stp_timer.c
+++ b/net/bridge/br_stp_timer.c
@@ -107,7 +107,7 @@ static void br_tcn_timer_expired(unsigned long arg)
br_debug(br, "tcn timer expired\n");
spin_lock(&br->lock);
- if (br->dev->flags & IFF_UP) {
+ if (!br_is_root_bridge(br) && (br->dev->flags & IFF_UP)) {
br_transmit_tcn(br);
mod_timer(&br->tcn_timer,jiffies + br->bridge_hello_time);
diff --git a/net/core/dev.c b/net/core/dev.c
index b24ab0e98eb..9a278e93042 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2458,7 +2458,7 @@ EXPORT_SYMBOL(netif_skb_features);
* 2. skb is fragmented and the device does not support SG.
*/
static inline int skb_needs_linearize(struct sk_buff *skb,
- int features)
+ netdev_features_t features)
{
return skb_is_nonlinear(skb) &&
((skb_has_frag_list(skb) &&
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 3e9b2c3e30f..41f4bdfa5e1 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -1416,7 +1416,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
void __user *useraddr = ifr->ifr_data;
u32 ethcmd;
int rc;
- u32 old_features;
+ netdev_features_t old_features;
if (!dev || !netif_device_present(dev))
return -ENODEV;
diff --git a/net/core/sock.c b/net/core/sock.c
index b261a797774..1432266c4ad 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1209,18 +1209,6 @@ static void sock_copy(struct sock *nsk, const struct sock *osk)
#endif
}
-/*
- * caches using SLAB_DESTROY_BY_RCU should let .next pointer from nulls nodes
- * un-modified. Special care is taken when initializing object to zero.
- */
-static inline void sk_prot_clear_nulls(struct sock *sk, int size)
-{
- if (offsetof(struct sock, sk_node.next) != 0)
- memset(sk, 0, offsetof(struct sock, sk_node.next));
- memset(&sk->sk_node.pprev, 0,
- size - offsetof(struct sock, sk_node.pprev));
-}
-
void sk_prot_clear_portaddr_nulls(struct sock *sk, int size)
{
unsigned long nulls1, nulls2;
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index f4fd23de9b1..3211914b1c6 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -257,6 +257,7 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
spin_lock_init(&q->lock);
atomic_set(&q->refcnt, 1);
+ INIT_LIST_HEAD(&q->lru_list);
return q;
}
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index b83a49cc381..2f672e7ab6e 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -583,8 +583,13 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
*
* Note that even if there is new data in the SYN packet
* they will be thrown away too.
+ *
+ * Reset timer after retransmitting SYNACK, similar to
+ * the idea of fast retransmit in recovery.
*/
- inet_rtx_syn_ack(sk, req);
+ if (!inet_rtx_syn_ack(sk, req))
+ req->expires = min(TCP_TIMEOUT_INIT << req->num_timeout,
+ TCP_RTO_MAX) + jiffies;
return NULL;
}
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index e4efffe2522..95d13c76d31 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -1135,6 +1135,7 @@ static int ip6gre_tunnel_ioctl(struct net_device *dev,
}
if (t == NULL)
t = netdev_priv(dev);
+ memset(&p, 0, sizeof(p));
ip6gre_tnl_parm_to_user(&p, &t->parms);
if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
err = -EFAULT;
@@ -1182,6 +1183,7 @@ static int ip6gre_tunnel_ioctl(struct net_device *dev,
if (t) {
err = 0;
+ memset(&p, 0, sizeof(p));
ip6gre_tnl_parm_to_user(&p, &t->parms);
if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
err = -EFAULT;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 46a5be85be8..0fce928a9f7 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1937,6 +1937,17 @@ void tcp6_proc_exit(struct net *net)
}
#endif
+static void tcp_v6_clear_sk(struct sock *sk, int size)
+{
+ struct inet_sock *inet = inet_sk(sk);
+
+ /* we do not want to clear pinet6 field, because of RCU lookups */
+ sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6));
+
+ size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
+ memset(&inet->pinet6 + 1, 0, size);
+}
+
struct proto tcpv6_prot = {
.name = "TCPv6",
.owner = THIS_MODULE,
@@ -1980,6 +1991,7 @@ struct proto tcpv6_prot = {
#ifdef CONFIG_MEMCG_KMEM
.proto_cgroup = tcp_proto_cgroup,
#endif
+ .clear_sk = tcp_v6_clear_sk,
};
static const struct inet6_protocol tcpv6_protocol = {
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index d8e5e852fc7..27f0f8e50f0 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1422,6 +1422,17 @@ void udp6_proc_exit(struct net *net) {
}
#endif /* CONFIG_PROC_FS */
+void udp_v6_clear_sk(struct sock *sk, int size)
+{
+ struct inet_sock *inet = inet_sk(sk);
+
+ /* we do not want to clear pinet6 field, because of RCU lookups */
+ sk_prot_clear_portaddr_nulls(sk, offsetof(struct inet_sock, pinet6));
+
+ size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
+ memset(&inet->pinet6 + 1, 0, size);
+}
+
/* ------------------------------------------------------------------------ */
struct proto udpv6_prot = {
@@ -1452,7 +1463,7 @@ struct proto udpv6_prot = {
.compat_setsockopt = compat_udpv6_setsockopt,
.compat_getsockopt = compat_udpv6_getsockopt,
#endif
- .clear_sk = sk_prot_clear_portaddr_nulls,
+ .clear_sk = udp_v6_clear_sk,
};
static struct inet_protosw udpv6_protosw = {
diff --git a/net/ipv6/udp_impl.h b/net/ipv6/udp_impl.h
index d7571046bfc..4691ed50a92 100644
--- a/net/ipv6/udp_impl.h
+++ b/net/ipv6/udp_impl.h
@@ -31,6 +31,8 @@ extern int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
extern int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb);
extern void udpv6_destroy_sock(struct sock *sk);
+extern void udp_v6_clear_sk(struct sock *sk, int size);
+
#ifdef CONFIG_PROC_FS
extern int udp6_seq_show(struct seq_file *seq, void *v);
#endif
diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c
index 1d08e21d9f6..dfcc4be4689 100644
--- a/net/ipv6/udplite.c
+++ b/net/ipv6/udplite.c
@@ -56,7 +56,7 @@ struct proto udplitev6_prot = {
.compat_setsockopt = compat_udpv6_setsockopt,
.compat_getsockopt = compat_udpv6_getsockopt,
#endif
- .clear_sk = sk_prot_clear_portaddr_nulls,
+ .clear_sk = udp_v6_clear_sk,
};
static struct inet_protosw udplite6_protosw = {
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 4ef7bdb6544..23ed03d786c 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -103,8 +103,10 @@ static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
dev_hold(dev);
xdst->u.rt6.rt6i_idev = in6_dev_get(dev);
- if (!xdst->u.rt6.rt6i_idev)
+ if (!xdst->u.rt6.rt6i_idev) {
+ dev_put(dev);
return -ENODEV;
+ }
rt6_transfer_peer(&xdst->u.rt6, rt);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index a6893602f87..843d8c492d4 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -1034,6 +1034,7 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
sta_info_flush_defer(vlan);
sta_info_flush_defer(sdata);
+ synchronize_net();
rcu_barrier();
list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
sta_info_flush_cleanup(vlan);
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 9ed49ad0380..9cbebc2eb87 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -844,11 +844,12 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
*
* sta_info_flush_cleanup() requires rcu_barrier()
* first to wait for the station call_rcu() calls
- * to complete, here we need at least sychronize_rcu()
- * it to wait for the RX path in case it is using the
+ * to complete, and we also need synchronize_rcu()
+ * to wait for the RX path in case it is using the
* interface and enqueuing frames at this very time on
* another CPU.
*/
+ synchronize_rcu();
rcu_barrier();
sta_info_flush_cleanup(sdata);
@@ -1648,6 +1649,15 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local)
ASSERT_RTNL();
+ /*
+ * Close all AP_VLAN interfaces first, as otherwise they
+ * might be closed while the AP interface they belong to
+ * is closed, causing unregister_netdevice_many() to crash.
+ */
+ list_for_each_entry(sdata, &local->interfaces, list)
+ if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+ dev_close(sdata->dev);
+
mutex_lock(&local->iflist_mtx);
list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) {
list_del(&sdata->list);
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 346ad4cfb01..0a60f4047fc 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -3182,10 +3182,6 @@ static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata)
if (WARN_ON_ONCE(!auth_data))
return -EINVAL;
- if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
- tx_flags = IEEE80211_TX_CTL_REQ_TX_STATUS |
- IEEE80211_TX_INTFL_MLME_CONN_TX;
-
auth_data->tries++;
if (auth_data->tries > IEEE80211_AUTH_MAX_TRIES) {
@@ -3219,6 +3215,10 @@ static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata)
auth_data->expected_transaction = trans;
}
+ if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
+ tx_flags = IEEE80211_TX_CTL_REQ_TX_STATUS |
+ IEEE80211_TX_INTFL_MLME_CONN_TX;
+
ieee80211_send_auth(sdata, trans, auth_data->algorithm, status,
auth_data->data, auth_data->data_len,
auth_data->bss->bssid,
@@ -3242,12 +3242,12 @@ static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata)
* will not answer to direct packet in unassociated state.
*/
ieee80211_send_probe_req(sdata, NULL, ssidie + 2, ssidie[1],
- NULL, 0, (u32) -1, true, tx_flags,
+ NULL, 0, (u32) -1, true, 0,
auth_data->bss->channel, false);
rcu_read_unlock();
}
- if (!(local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)) {
+ if (tx_flags == 0) {
auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
ifmgd->auth_data->timeout_started = true;
run_again(ifmgd, auth_data->timeout);
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index d0275f34bf7..835584ca992 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -53,8 +53,9 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
ieee80211_stop_queues_by_reason(hw,
IEEE80211_QUEUE_STOP_REASON_SUSPEND);
- /* flush out all packets */
+ /* flush out all packets and station cleanup call_rcu()s */
synchronize_net();
+ rcu_barrier();
drv_flush(local, false);
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index c6844ad080b..bb0b4577d0e 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -3032,6 +3032,9 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx,
* and location updates. Note that mac80211
* itself never looks at these frames.
*/
+ if (!multicast &&
+ !ether_addr_equal(sdata->vif.addr, hdr->addr1))
+ return 0;
if (ieee80211_is_public_action(hdr, skb->len))
return 1;
if (!ieee80211_is_beacon(hdr->frame_control))
diff --git a/net/mac80211/tkip.c b/net/mac80211/tkip.c
index 3ed801d90f1..124b1fdc20d 100644
--- a/net/mac80211/tkip.c
+++ b/net/mac80211/tkip.c
@@ -208,10 +208,10 @@ void ieee80211_get_tkip_p2k(struct ieee80211_key_conf *keyconf,
u32 iv32 = get_unaligned_le32(&data[4]);
u16 iv16 = data[2] | (data[0] << 8);
- spin_lock_bh(&key->u.tkip.txlock);
+ spin_lock(&key->u.tkip.txlock);
ieee80211_compute_tkip_p1k(key, iv32);
tkip_mixing_phase2(tk, ctx, iv16, p2k);
- spin_unlock_bh(&key->u.tkip.txlock);
+ spin_unlock(&key->u.tkip.txlock);
}
EXPORT_SYMBOL(ieee80211_get_tkip_p2k);
diff --git a/net/mac802154/mac802154.h b/net/mac802154/mac802154.h
index a4dcaf1dd4b..703c1210d22 100644
--- a/net/mac802154/mac802154.h
+++ b/net/mac802154/mac802154.h
@@ -90,7 +90,7 @@ struct mac802154_sub_if_data {
#define MAC802154_MAX_XMIT_ATTEMPTS 3
-#define MAC802154_CHAN_NONE (~(u8)0) /* No channel is assigned */
+#define MAC802154_CHAN_NONE 0xff /* No channel is assigned */
extern struct ieee802154_reduced_mlme_ops mac802154_mlme_reduced;
extern struct ieee802154_mlme_ops mac802154_mlme_wpan;
diff --git a/net/netfilter/ipvs/ip_vs_pe_sip.c b/net/netfilter/ipvs/ip_vs_pe_sip.c
index 12475ef88da..e5920fb7ad0 100644
--- a/net/netfilter/ipvs/ip_vs_pe_sip.c
+++ b/net/netfilter/ipvs/ip_vs_pe_sip.c
@@ -37,14 +37,10 @@ static int get_callid(const char *dptr, unsigned int dataoff,
if (ret > 0)
break;
if (!ret)
- return 0;
+ return -EINVAL;
dataoff += *matchoff;
}
- /* Empty callid is useless */
- if (!*matchlen)
- return -EINVAL;
-
/* Too large is useless */
if (*matchlen > IP_VS_PEDATA_MAXLEN)
return -EINVAL;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 1d6793dbfba..f83e17249dd 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -693,36 +693,33 @@ static void prb_open_block(struct tpacket_kbdq_core *pkc1,
smp_rmb();
- if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd1))) {
+ /* We could have just memset this but we will lose the
+ * flexibility of making the priv area sticky
+ */
- /* We could have just memset this but we will lose the
- * flexibility of making the priv area sticky
- */
- BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
- BLOCK_NUM_PKTS(pbd1) = 0;
- BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
- getnstimeofday(&ts);
- h1->ts_first_pkt.ts_sec = ts.tv_sec;
- h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
- pkc1->pkblk_start = (char *)pbd1;
- pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
- BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
- BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
- pbd1->version = pkc1->version;
- pkc1->prev = pkc1->nxt_offset;
- pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
- prb_thaw_queue(pkc1);
- _prb_refresh_rx_retire_blk_timer(pkc1);
+ BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
+ BLOCK_NUM_PKTS(pbd1) = 0;
+ BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
- smp_wmb();
+ getnstimeofday(&ts);
- return;
- }
+ h1->ts_first_pkt.ts_sec = ts.tv_sec;
+ h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
- WARN(1, "ERROR block:%p is NOT FREE status:%d kactive_blk_num:%d\n",
- pbd1, BLOCK_STATUS(pbd1), pkc1->kactive_blk_num);
- dump_stack();
- BUG();
+ pkc1->pkblk_start = (char *)pbd1;
+ pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
+
+ BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
+ BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
+
+ pbd1->version = pkc1->version;
+ pkc1->prev = pkc1->nxt_offset;
+ pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
+
+ prb_thaw_queue(pkc1);
+ _prb_refresh_rx_retire_blk_timer(pkc1);
+
+ smp_wmb();
}
/*
@@ -813,10 +810,6 @@ static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
prb_close_block(pkc, pbd, po, status);
return;
}
-
- WARN(1, "ERROR-pbd[%d]:%p\n", pkc->kactive_blk_num, pbd);
- dump_stack();
- BUG();
}
static int prb_curr_blk_in_use(struct tpacket_kbdq_core *pkc,
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index e0f6de64afe..60d88b6b956 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -8,7 +8,7 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
- * Copyright: Jamal Hadi Salim (2002-4)
+ * Copyright: Jamal Hadi Salim (2002-13)
*/
#include <linux/types.h>
@@ -303,17 +303,44 @@ static struct tc_action_ops act_ipt_ops = {
.walk = tcf_generic_walker
};
-MODULE_AUTHOR("Jamal Hadi Salim(2002-4)");
+static struct tc_action_ops act_xt_ops = {
+ .kind = "xt",
+ .hinfo = &ipt_hash_info,
+ .type = TCA_ACT_IPT,
+ .capab = TCA_CAP_NONE,
+ .owner = THIS_MODULE,
+ .act = tcf_ipt,
+ .dump = tcf_ipt_dump,
+ .cleanup = tcf_ipt_cleanup,
+ .lookup = tcf_hash_search,
+ .init = tcf_ipt_init,
+ .walk = tcf_generic_walker
+};
+
+MODULE_AUTHOR("Jamal Hadi Salim(2002-13)");
MODULE_DESCRIPTION("Iptables target actions");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("act_xt");
static int __init ipt_init_module(void)
{
- return tcf_register_action(&act_ipt_ops);
+ int ret1, ret2;
+ ret1 = tcf_register_action(&act_xt_ops);
+ if (ret1 < 0)
+ printk("Failed to load xt action\n");
+ ret2 = tcf_register_action(&act_ipt_ops);
+ if (ret2 < 0)
+ printk("Failed to load ipt action\n");
+
+ if (ret1 < 0 && ret2 < 0)
+ return ret1;
+ else
+ return 0;
}
static void __exit ipt_cleanup_module(void)
{
+ tcf_unregister_action(&act_xt_ops);
tcf_unregister_action(&act_ipt_ops);
}
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index f8529fc8e54..5356b120dbf 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -324,11 +324,17 @@ EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task);
* Note: If the task is ASYNC, and is being made runnable after sitting on an
* rpc_wait_queue, this must be called with the queue spinlock held to protect
* the wait queue operation.
+ * Note the ordering of rpc_test_and_set_running() and rpc_clear_queued(),
+ * which is needed to ensure that __rpc_execute() doesn't loop (due to the
+ * lockless RPC_IS_QUEUED() test) before we've had a chance to test
+ * the RPC_TASK_RUNNING flag.
*/
static void rpc_make_runnable(struct rpc_task *task)
{
+ bool need_wakeup = !rpc_test_and_set_running(task);
+
rpc_clear_queued(task);
- if (rpc_test_and_set_running(task))
+ if (!need_wakeup)
return;
if (RPC_IS_ASYNC(task)) {
INIT_WORK(&task->u.tk_work, rpc_async_schedule);
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index c3f9e1ef7f5..06bdf5a1082 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -810,11 +810,15 @@ svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp)
goto badcred;
argv->iov_base = (void*)((__be32*)argv->iov_base + slen); /* skip machname */
argv->iov_len -= slen*4;
-
+ /*
+ * Note: we skip uid_valid()/gid_valid() checks here for
+ * backwards compatibility with clients that use -1 id's.
+ * Instead, -1 uid or gid is later mapped to the
+ * (export-specific) anonymous id by nfsd_setuser.
+ * Supplementary gid's will be left alone.
+ */
cred->cr_uid = make_kuid(&init_user_ns, svc_getnl(argv)); /* uid */
cred->cr_gid = make_kgid(&init_user_ns, svc_getnl(argv)); /* gid */
- if (!uid_valid(cred->cr_uid) || !gid_valid(cred->cr_gid))
- goto badcred;
slen = svc_getnl(argv); /* gids length */
if (slen > 16 || (len -= (slen + 2)*4) < 0)
goto badcred;
@@ -823,8 +827,6 @@ svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp)
return SVC_CLOSE;
for (i = 0; i < slen; i++) {
kgid_t kgid = make_kgid(&init_user_ns, svc_getnl(argv));
- if (!gid_valid(kgid))
- goto badcred;
GROUP_AT(cred->cr_group_info, i) = kgid;
}
if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) {
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 7f93e2a42d7..2e330e8109b 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -165,7 +165,7 @@ static struct list_head vsock_bind_table[VSOCK_HASH_SIZE + 1];
static struct list_head vsock_connected_table[VSOCK_HASH_SIZE];
static DEFINE_SPINLOCK(vsock_table_lock);
-static __init void vsock_init_tables(void)
+static void vsock_init_tables(void)
{
int i;
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 6ddf74f0ae1..ed56e2bc7ba 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -638,17 +638,21 @@ int wiphy_register(struct wiphy *wiphy)
* cfg80211_mutex lock
*/
res = rfkill_register(rdev->rfkill);
- if (res)
- goto out_rm_dev;
+ if (res) {
+ device_del(&rdev->wiphy.dev);
+
+ mutex_lock(&cfg80211_mutex);
+ debugfs_remove_recursive(rdev->wiphy.debugfsdir);
+ list_del_rcu(&rdev->list);
+ wiphy_regulatory_deregister(wiphy);
+ mutex_unlock(&cfg80211_mutex);
+ return res;
+ }
rtnl_lock();
rdev->wiphy.registered = true;
rtnl_unlock();
return 0;
-
-out_rm_dev:
- device_del(&rdev->wiphy.dev);
- return res;
}
EXPORT_SYMBOL(wiphy_register);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 58e13a8c95f..34ef5227d61 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -7177,6 +7177,8 @@ static int nl80211_send_wowlan_tcp(struct sk_buff *msg,
&tcp->payload_tok))
return -ENOBUFS;
+ nla_nest_end(msg, nl_tcp);
+
return 0;
}
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 98532c00242..6dee0ad949c 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -855,7 +855,7 @@ static void handle_channel(struct wiphy *wiphy,
return;
REG_DBG_PRINT("Disabling freq %d MHz\n", chan->center_freq);
- chan->flags = IEEE80211_CHAN_DISABLED;
+ chan->flags |= IEEE80211_CHAN_DISABLED;
return;
}
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index 05acdc51c38..2251b4bafd1 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -227,6 +227,9 @@ void cfg80211_conn_work(struct work_struct *work)
mutex_lock(&rdev->sched_scan_mtx);
list_for_each_entry(wdev, &rdev->wdev_list, list) {
+ if (!wdev->netdev)
+ continue;
+
wdev_lock(wdev);
if (!netif_running(wdev->netdev)) {
wdev_unlock(wdev);
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index 7586de77a2f..3cdf17c9ed8 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -2386,6 +2386,7 @@ TRACE_EVENT(cfg80211_report_wowlan_wakeup,
TP_STRUCT__entry(
WIPHY_ENTRY
WDEV_ENTRY
+ __field(bool, non_wireless)
__field(bool, disconnect)
__field(bool, magic_pkt)
__field(bool, gtk_rekey_failure)
@@ -2394,20 +2395,22 @@ TRACE_EVENT(cfg80211_report_wowlan_wakeup,
__field(bool, rfkill_release)
__field(s32, pattern_idx)
__field(u32, packet_len)
- __dynamic_array(u8, packet, wakeup->packet_present_len)
+ __dynamic_array(u8, packet,
+ wakeup ? wakeup->packet_present_len : 0)
),
TP_fast_assign(
WIPHY_ASSIGN;
WDEV_ASSIGN;
- __entry->disconnect = wakeup->disconnect;
- __entry->magic_pkt = wakeup->magic_pkt;
- __entry->gtk_rekey_failure = wakeup->gtk_rekey_failure;
- __entry->eap_identity_req = wakeup->eap_identity_req;
- __entry->four_way_handshake = wakeup->four_way_handshake;
- __entry->rfkill_release = wakeup->rfkill_release;
- __entry->pattern_idx = wakeup->pattern_idx;
- __entry->packet_len = wakeup->packet_len;
- if (wakeup->packet && wakeup->packet_present_len)
+ __entry->non_wireless = !wakeup;
+ __entry->disconnect = wakeup ? wakeup->disconnect : false;
+ __entry->magic_pkt = wakeup ? wakeup->magic_pkt : false;
+ __entry->gtk_rekey_failure = wakeup ? wakeup->gtk_rekey_failure : false;
+ __entry->eap_identity_req = wakeup ? wakeup->eap_identity_req : false;
+ __entry->four_way_handshake = wakeup ? wakeup->four_way_handshake : false;
+ __entry->rfkill_release = wakeup ? wakeup->rfkill_release : false;
+ __entry->pattern_idx = wakeup ? wakeup->pattern_idx : false;
+ __entry->packet_len = wakeup ? wakeup->packet_len : false;
+ if (wakeup && wakeup->packet && wakeup->packet_present_len)
memcpy(__get_dynamic_array(packet), wakeup->packet,
wakeup->packet_present_len);
),
diff --git a/scripts/kconfig/list.h b/scripts/kconfig/list.h
index 0ae730be5f4..b87206cc92f 100644
--- a/scripts/kconfig/list.h
+++ b/scripts/kconfig/list.h
@@ -51,6 +51,19 @@ struct list_head {
pos = list_entry(pos->member.next, typeof(*pos), member))
/**
+ * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
+ * @pos: the type * to use as a loop cursor.
+ * @n: another type * to use as temporary storage
+ * @head: the head for your list.
+ * @member: the name of the list_struct within the struct.
+ */
+#define list_for_each_entry_safe(pos, n, head, member) \
+ for (pos = list_entry((head)->next, typeof(*pos), member), \
+ n = list_entry(pos->member.next, typeof(*pos), member); \
+ &pos->member != (head); \
+ pos = n, n = list_entry(n->member.next, typeof(*n), member))
+
+/**
* list_empty - tests whether a list is empty
* @head: the list to test.
*/
diff --git a/scripts/kconfig/mconf.c b/scripts/kconfig/mconf.c
index 566288a7637..c5418d622a0 100644
--- a/scripts/kconfig/mconf.c
+++ b/scripts/kconfig/mconf.c
@@ -389,6 +389,7 @@ again:
.targets = targets,
.keys = keys,
};
+ struct jump_key *pos, *tmp;
res = get_relations_str(sym_arr, &head);
dres = show_textbox_ext(_("Search Results"), (char *)
@@ -402,6 +403,8 @@ again:
again = true;
}
str_free(&res);
+ list_for_each_entry_safe(pos, tmp, &head, entries)
+ free(pos);
} while (again);
free(sym_arr);
str_free(&title);
diff --git a/scripts/kconfig/streamline_config.pl b/scripts/kconfig/streamline_config.pl
index 33689396953..68b85e1fe8f 100644
--- a/scripts/kconfig/streamline_config.pl
+++ b/scripts/kconfig/streamline_config.pl
@@ -156,7 +156,6 @@ sub read_kconfig {
my $state = "NONE";
my $config;
- my @kconfigs;
my $cont = 0;
my $line;
@@ -190,7 +189,13 @@ sub read_kconfig {
# collect any Kconfig sources
if (/^source\s*"(.*)"/) {
- $kconfigs[$#kconfigs+1] = $1;
+ my $kconfig = $1;
+ # prevent reading twice.
+ if (!defined($read_kconfigs{$kconfig})) {
+ $read_kconfigs{$kconfig} = 1;
+ read_kconfig($kconfig);
+ }
+ next;
}
# configs found
@@ -250,14 +255,6 @@ sub read_kconfig {
}
}
close($kinfile);
-
- # read in any configs that were found.
- foreach my $kconfig (@kconfigs) {
- if (!defined($read_kconfigs{$kconfig})) {
- $read_kconfigs{$kconfig} = 1;
- read_kconfig($kconfig);
- }
- }
}
if ($kconfig) {
diff --git a/scripts/package/builddeb b/scripts/package/builddeb
index acb86507828..ed79ca3da88 100644
--- a/scripts/package/builddeb
+++ b/scripts/package/builddeb
@@ -12,6 +12,43 @@
set -e
+# Attempt to find the correct Debian architecture
+forcearch=""
+debarch=""
+case "$UTS_MACHINE" in
+i386|ia64|alpha)
+ debarch="$UTS_MACHINE" ;;
+x86_64)
+ debarch=amd64 ;;
+sparc*)
+ debarch=sparc ;;
+s390*)
+ debarch=s390 ;;
+ppc*)
+ debarch=powerpc ;;
+parisc*)
+ debarch=hppa ;;
+mips*)
+ debarch=mips$(grep -q CPU_LITTLE_ENDIAN=y .config && echo el) ;;
+arm*)
+ debarch=arm$(grep -q CONFIG_AEABI=y .config && echo el) ;;
+*)
+ echo "" >&2
+ echo "** ** ** WARNING ** ** **" >&2
+ echo "" >&2
+ echo "Your architecture doesn't have it's equivalent" >&2
+ echo "Debian userspace architecture defined!" >&2
+ echo "Falling back to using your current userspace instead!" >&2
+ echo "Please add support for $UTS_MACHINE to ${0} ..." >&2
+ echo "" >&2
+esac
+if [ -n "$KBUILD_DEBARCH" ] ; then
+ debarch="$KBUILD_DEBARCH"
+fi
+if [ -n "$debarch" ] ; then
+ forcearch="-DArchitecture=$debarch"
+fi
+
create_package() {
local pname="$1" pdir="$2"
@@ -25,42 +62,6 @@ create_package() {
chown -R root:root "$pdir"
chmod -R go-w "$pdir"
- # Attempt to find the correct Debian architecture
- local forcearch="" debarch=""
- case "$UTS_MACHINE" in
- i386|ia64|alpha)
- debarch="$UTS_MACHINE" ;;
- x86_64)
- debarch=amd64 ;;
- sparc*)
- debarch=sparc ;;
- s390*)
- debarch=s390 ;;
- ppc*)
- debarch=powerpc ;;
- parisc*)
- debarch=hppa ;;
- mips*)
- debarch=mips$(grep -q CPU_LITTLE_ENDIAN=y .config && echo el) ;;
- arm*)
- debarch=arm$(grep -q CONFIG_AEABI=y .config && echo el) ;;
- *)
- echo "" >&2
- echo "** ** ** WARNING ** ** **" >&2
- echo "" >&2
- echo "Your architecture doesn't have it's equivalent" >&2
- echo "Debian userspace architecture defined!" >&2
- echo "Falling back to using your current userspace instead!" >&2
- echo "Please add support for $UTS_MACHINE to ${0} ..." >&2
- echo "" >&2
- esac
- if [ -n "$KBUILD_DEBARCH" ] ; then
- debarch="$KBUILD_DEBARCH"
- fi
- if [ -n "$debarch" ] ; then
- forcearch="-DArchitecture=$debarch"
- fi
-
# Create the package
dpkg-gencontrol -isp $forcearch -p$pname -P"$pdir"
dpkg --build "$pdir" ..
@@ -252,15 +253,14 @@ mkdir -p "$destdir"
(cd $objtree; tar -c -f - -T "$objtree/debian/hdrobjfiles") | (cd $destdir; tar -xf -)
ln -sf "/usr/src/linux-headers-$version" "$kernel_headers_dir/lib/modules/$version/build"
rm -f "$objtree/debian/hdrsrcfiles" "$objtree/debian/hdrobjfiles"
-arch=$(dpkg --print-architecture)
cat <<EOF >> debian/control
Package: $kernel_headers_packagename
Provides: linux-headers, linux-headers-2.6
-Architecture: $arch
-Description: Linux kernel headers for $KERNELRELEASE on $arch
- This package provides kernel header files for $KERNELRELEASE on $arch
+Architecture: $debarch
+Description: Linux kernel headers for $KERNELRELEASE on $debarch
+ This package provides kernel header files for $KERNELRELEASE on $debarch
.
This is useful for people who need to build external modules
EOF
@@ -281,6 +281,12 @@ EOF
create_package "$fwpackagename" "$fwdir"
fi
+# Copy device tree files if generated
+stat arch/$ARCH/boot/dts/*.dtb && {
+ mkdir -p "$tmpdir/lib/firmware/$version/device-tree"
+ cp arch/$ARCH/boot/dts/*.dtb "$tmpdir/lib/firmware/$version/device-tree"
+}
+
cat <<EOF >> debian/control
Package: $libc_headers_packagename
diff --git a/sound/pci/emu10k1/emu10k1_main.c b/sound/pci/emu10k1/emu10k1_main.c
index e6b01669324..bdd888ec9a8 100644
--- a/sound/pci/emu10k1/emu10k1_main.c
+++ b/sound/pci/emu10k1/emu10k1_main.c
@@ -657,14 +657,14 @@ static int snd_emu10k1_cardbus_init(struct snd_emu10k1 *emu)
return 0;
}
-static int snd_emu1010_load_firmware(struct snd_emu10k1 *emu)
+static int snd_emu1010_load_firmware(struct snd_emu10k1 *emu,
+ const struct firmware *fw_entry)
{
int n, i;
int reg;
int value;
unsigned int write_post;
unsigned long flags;
- const struct firmware *fw_entry = emu->firmware;
if (!fw_entry)
return -EIO;
@@ -725,9 +725,34 @@ static int emu1010_firmware_thread(void *data)
/* Return to Audio Dock programming mode */
snd_printk(KERN_INFO "emu1010: Loading Audio Dock Firmware\n");
snd_emu1010_fpga_write(emu, EMU_HANA_FPGA_CONFIG, EMU_HANA_FPGA_CONFIG_AUDIODOCK);
- err = snd_emu1010_load_firmware(emu);
- if (err != 0)
- continue;
+
+ if (!emu->dock_fw) {
+ const char *filename = NULL;
+ switch (emu->card_capabilities->emu_model) {
+ case EMU_MODEL_EMU1010:
+ filename = DOCK_FILENAME;
+ break;
+ case EMU_MODEL_EMU1010B:
+ filename = MICRO_DOCK_FILENAME;
+ break;
+ case EMU_MODEL_EMU1616:
+ filename = MICRO_DOCK_FILENAME;
+ break;
+ }
+ if (filename) {
+ err = request_firmware(&emu->dock_fw,
+ filename,
+ &emu->pci->dev);
+ if (err)
+ continue;
+ }
+ }
+
+ if (emu->dock_fw) {
+ err = snd_emu1010_load_firmware(emu, emu->dock_fw);
+ if (err)
+ continue;
+ }
snd_emu1010_fpga_write(emu, EMU_HANA_FPGA_CONFIG, 0);
snd_emu1010_fpga_read(emu, EMU_HANA_IRQ_STATUS, &reg);
@@ -862,7 +887,7 @@ static int snd_emu10k1_emu1010_init(struct snd_emu10k1 *emu)
filename, emu->firmware->size);
}
- err = snd_emu1010_load_firmware(emu);
+ err = snd_emu1010_load_firmware(emu, emu->firmware);
if (err != 0) {
snd_printk(KERN_INFO "emu1010: Loading Firmware failed\n");
return err;
@@ -1253,6 +1278,8 @@ static int snd_emu10k1_free(struct snd_emu10k1 *emu)
kthread_stop(emu->emu1010.firmware_thread);
if (emu->firmware)
release_firmware(emu->firmware);
+ if (emu->dock_fw)
+ release_firmware(emu->dock_fw);
if (emu->irq >= 0)
free_irq(emu->irq, emu);
/* remove reserved page */
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 4aba7646dd9..c414cdd9cfc 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -681,6 +681,9 @@ int snd_hda_queue_unsol_event(struct hda_bus *bus, u32 res, u32 res_ex)
struct hda_bus_unsolicited *unsol;
unsigned int wp;
+ if (!bus || !bus->workq)
+ return 0;
+
trace_hda_unsol_event(bus, res, res_ex);
unsol = bus->unsol;
if (!unsol)
@@ -1577,7 +1580,7 @@ void snd_hda_codec_setup_stream(struct hda_codec *codec, hda_nid_t nid,
"NID=0x%x, stream=0x%x, channel=%d, format=0x%x\n",
nid, stream_tag, channel_id, format);
p = get_hda_cvt_setup(codec, nid);
- if (!p || p->active)
+ if (!p)
return;
if (codec->pcm_format_first)
@@ -1624,7 +1627,7 @@ void __snd_hda_codec_cleanup_stream(struct hda_codec *codec, hda_nid_t nid,
snd_printdd("hda_codec_cleanup_stream: NID=0x%x\n", nid);
p = get_hda_cvt_setup(codec, nid);
- if (p && p->active) {
+ if (p) {
/* here we just clear the active flag when do_now isn't set;
* actual clean-ups will be done later in
* purify_inactive_streams() called from snd_hda_codec_prpapre()
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index 2dbe767be16..222b6dee027 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -772,6 +772,8 @@ static void set_pin_eapd(struct hda_codec *codec, hda_nid_t pin, bool enable)
return;
if (codec->inv_eapd)
enable = !enable;
+ if (spec->keep_eapd_on && !enable)
+ return;
snd_hda_codec_update_cache(codec, pin, 0,
AC_VERB_SET_EAPD_BTLENABLE,
enable ? 0x02 : 0x00);
@@ -2072,6 +2074,14 @@ get_multiio_path(struct hda_codec *codec, int idx)
static void update_automute_all(struct hda_codec *codec);
+/* Default value to be passed as aamix argument for snd_hda_activate_path();
+ * used for output paths
+ */
+static bool aamix_default(struct hda_gen_spec *spec)
+{
+ return !spec->have_aamix_ctl || spec->aamix_mode;
+}
+
static int set_multi_io(struct hda_codec *codec, int idx, bool output)
{
struct hda_gen_spec *spec = codec->spec;
@@ -2087,11 +2097,11 @@ static int set_multi_io(struct hda_codec *codec, int idx, bool output)
if (output) {
set_pin_target(codec, nid, PIN_OUT, true);
- snd_hda_activate_path(codec, path, true, true);
+ snd_hda_activate_path(codec, path, true, aamix_default(spec));
set_pin_eapd(codec, nid, true);
} else {
set_pin_eapd(codec, nid, false);
- snd_hda_activate_path(codec, path, false, true);
+ snd_hda_activate_path(codec, path, false, aamix_default(spec));
set_pin_target(codec, nid, spec->multi_io[idx].ctl_in, true);
path_power_down_sync(codec, path);
}
@@ -2182,8 +2192,8 @@ static void update_aamix_paths(struct hda_codec *codec, bool do_mix,
snd_hda_activate_path(codec, mix_path, true, true);
path_power_down_sync(codec, nomix_path);
} else {
- snd_hda_activate_path(codec, mix_path, false, true);
- snd_hda_activate_path(codec, nomix_path, true, true);
+ snd_hda_activate_path(codec, mix_path, false, false);
+ snd_hda_activate_path(codec, nomix_path, true, false);
path_power_down_sync(codec, mix_path);
}
}
@@ -3663,6 +3673,36 @@ static void update_automute_all(struct hda_codec *codec)
snd_hda_gen_mic_autoswitch(codec, NULL);
}
+/* call appropriate hooks */
+static void call_hp_automute(struct hda_codec *codec, struct hda_jack_tbl *jack)
+{
+ struct hda_gen_spec *spec = codec->spec;
+ if (spec->hp_automute_hook)
+ spec->hp_automute_hook(codec, jack);
+ else
+ snd_hda_gen_hp_automute(codec, jack);
+}
+
+static void call_line_automute(struct hda_codec *codec,
+ struct hda_jack_tbl *jack)
+{
+ struct hda_gen_spec *spec = codec->spec;
+ if (spec->line_automute_hook)
+ spec->line_automute_hook(codec, jack);
+ else
+ snd_hda_gen_line_automute(codec, jack);
+}
+
+static void call_mic_autoswitch(struct hda_codec *codec,
+ struct hda_jack_tbl *jack)
+{
+ struct hda_gen_spec *spec = codec->spec;
+ if (spec->mic_autoswitch_hook)
+ spec->mic_autoswitch_hook(codec, jack);
+ else
+ snd_hda_gen_mic_autoswitch(codec, jack);
+}
+
/*
* Auto-Mute mode mixer enum support
*/
@@ -3797,9 +3837,7 @@ static int check_auto_mute_availability(struct hda_codec *codec)
snd_printdd("hda-codec: Enable HP auto-muting on NID 0x%x\n",
nid);
snd_hda_jack_detect_enable_callback(codec, nid, HDA_GEN_HP_EVENT,
- spec->hp_automute_hook ?
- spec->hp_automute_hook :
- snd_hda_gen_hp_automute);
+ call_hp_automute);
spec->detect_hp = 1;
}
@@ -3812,9 +3850,7 @@ static int check_auto_mute_availability(struct hda_codec *codec)
snd_printdd("hda-codec: Enable Line-Out auto-muting on NID 0x%x\n", nid);
snd_hda_jack_detect_enable_callback(codec, nid,
HDA_GEN_FRONT_EVENT,
- spec->line_automute_hook ?
- spec->line_automute_hook :
- snd_hda_gen_line_automute);
+ call_line_automute);
spec->detect_lo = 1;
}
spec->automute_lo_possible = spec->detect_hp;
@@ -3856,9 +3892,7 @@ static bool auto_mic_check_imux(struct hda_codec *codec)
snd_hda_jack_detect_enable_callback(codec,
spec->am_entry[i].pin,
HDA_GEN_MIC_EVENT,
- spec->mic_autoswitch_hook ?
- spec->mic_autoswitch_hook :
- snd_hda_gen_mic_autoswitch);
+ call_mic_autoswitch);
return true;
}
@@ -4729,7 +4763,8 @@ static void set_output_and_unmute(struct hda_codec *codec, int path_idx)
return;
pin = path->path[path->depth - 1];
restore_pin_ctl(codec, pin);
- snd_hda_activate_path(codec, path, path->active, true);
+ snd_hda_activate_path(codec, path, path->active,
+ aamix_default(codec->spec));
set_pin_eapd(codec, pin, path->active);
}
@@ -4779,7 +4814,8 @@ static void init_multi_io(struct hda_codec *codec)
if (!spec->multi_io[i].ctl_in)
spec->multi_io[i].ctl_in =
snd_hda_codec_get_pin_target(codec, pin);
- snd_hda_activate_path(codec, path, path->active, true);
+ snd_hda_activate_path(codec, path, path->active,
+ aamix_default(spec));
}
}
diff --git a/sound/pci/hda/hda_generic.h b/sound/pci/hda/hda_generic.h
index 009b57be96d..aee3238f5a9 100644
--- a/sound/pci/hda/hda_generic.h
+++ b/sound/pci/hda/hda_generic.h
@@ -205,6 +205,7 @@ struct hda_gen_spec {
unsigned int multi_cap_vol:1; /* allow multiple capture xxx volumes */
unsigned int inv_dmic_split:1; /* inverted dmic w/a for conexant */
unsigned int own_eapd_ctl:1; /* set EAPD by own function */
+ unsigned int keep_eapd_on:1; /* don't turn off EAPD automatically */
unsigned int vmaster_mute_enum:1; /* add vmaster mute mode enum */
unsigned int indep_hp:1; /* independent HP supported */
unsigned int prefer_hp_amp:1; /* enable HP amp for speaker if any */
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 2a89d1eefeb..1e5a30fef3d 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -64,6 +64,7 @@ struct conexant_spec {
/* extra EAPD pins */
unsigned int num_eapds;
hda_nid_t eapds[4];
+ bool dynamic_eapd;
#ifdef ENABLE_CXT_STATIC_QUIRKS
const struct snd_kcontrol_new *mixers[5];
@@ -3152,7 +3153,7 @@ static void cx_auto_parse_eapd(struct hda_codec *codec)
* thus it might control over all pins.
*/
if (spec->num_eapds > 2)
- spec->gen.own_eapd_ctl = 1;
+ spec->dynamic_eapd = 1;
}
static void cx_auto_turn_eapd(struct hda_codec *codec, int num_pins,
@@ -3191,6 +3192,15 @@ static int cx_auto_build_controls(struct hda_codec *codec)
return 0;
}
+static int cx_auto_init(struct hda_codec *codec)
+{
+ struct conexant_spec *spec = codec->spec;
+ snd_hda_gen_init(codec);
+ if (!spec->dynamic_eapd)
+ cx_auto_turn_eapd(codec, spec->num_eapds, spec->eapds, true);
+ return 0;
+}
+
static void cx_auto_free(struct hda_codec *codec)
{
snd_hda_detach_beep_device(codec);
@@ -3200,7 +3210,7 @@ static void cx_auto_free(struct hda_codec *codec)
static const struct hda_codec_ops cx_auto_patch_ops = {
.build_controls = cx_auto_build_controls,
.build_pcms = snd_hda_gen_build_pcms,
- .init = snd_hda_gen_init,
+ .init = cx_auto_init,
.free = cx_auto_free,
.unsol_event = snd_hda_jack_unsol_event,
#ifdef CONFIG_PM
@@ -3350,7 +3360,8 @@ static int patch_conexant_auto(struct hda_codec *codec)
cx_auto_parse_beep(codec);
cx_auto_parse_eapd(codec);
- if (spec->gen.own_eapd_ctl)
+ spec->gen.own_eapd_ctl = 1;
+ if (spec->dynamic_eapd)
spec->gen.vmaster_mute.hook = cx_auto_vmaster_hook;
switch (codec->vendor_id) {
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index f15c36bde54..fd1970cf0b2 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -2515,6 +2515,7 @@ enum {
ALC269_TYPE_ALC280,
ALC269_TYPE_ALC282,
ALC269_TYPE_ALC284,
+ ALC269_TYPE_ALC286,
};
/*
@@ -2538,6 +2539,7 @@ static int alc269_parse_auto_config(struct hda_codec *codec)
case ALC269_TYPE_ALC269VB:
case ALC269_TYPE_ALC269VD:
case ALC269_TYPE_ALC282:
+ case ALC269_TYPE_ALC286:
ssids = alc269_ssids;
break;
default:
@@ -3172,6 +3174,9 @@ static int patch_alc269(struct hda_codec *codec)
case 0x10ec0292:
spec->codec_variant = ALC269_TYPE_ALC284;
break;
+ case 0x10ec0286:
+ spec->codec_variant = ALC269_TYPE_ALC286;
+ break;
}
/* automatic parse from the BIOS config */
@@ -3878,6 +3883,7 @@ static const struct hda_codec_preset snd_hda_preset_realtek[] = {
{ .id = 0x10ec0282, .name = "ALC282", .patch = patch_alc269 },
{ .id = 0x10ec0283, .name = "ALC283", .patch = patch_alc269 },
{ .id = 0x10ec0284, .name = "ALC284", .patch = patch_alc269 },
+ { .id = 0x10ec0286, .name = "ALC286", .patch = patch_alc269 },
{ .id = 0x10ec0290, .name = "ALC290", .patch = patch_alc269 },
{ .id = 0x10ec0292, .name = "ALC292", .patch = patch_alc269 },
{ .id = 0x10ec0861, .rev = 0x100340, .name = "ALC660",
diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
index c35338a8771..9a9a0320287 100644
--- a/sound/pci/hda/patch_via.c
+++ b/sound/pci/hda/patch_via.c
@@ -136,6 +136,7 @@ static struct via_spec *via_new_spec(struct hda_codec *codec)
spec->codec_type = VT1708S;
spec->no_pin_power_ctl = 1;
spec->gen.indep_hp = 1;
+ spec->gen.keep_eapd_on = 1;
spec->gen.pcm_playback_hook = via_playback_pcm_hook;
return spec;
}
@@ -231,9 +232,14 @@ static void vt1708_update_hp_work(struct hda_codec *codec)
static void set_widgets_power_state(struct hda_codec *codec)
{
+#if 0 /* FIXME: the assumed connections don't match always with the
+ * actual routes by the generic parser, so better to disable
+ * the control for safety.
+ */
struct via_spec *spec = codec->spec;
if (spec->set_widgets_power_state)
spec->set_widgets_power_state(codec);
+#endif
}
static void update_power_state(struct hda_codec *codec, hda_nid_t nid,
@@ -478,7 +484,9 @@ static int via_suspend(struct hda_codec *codec)
/* Fix pop noise on headphones */
int i;
for (i = 0; i < spec->gen.autocfg.hp_outs; i++)
- snd_hda_set_pin_ctl(codec, spec->gen.autocfg.hp_pins[i], 0);
+ snd_hda_codec_write(codec, spec->gen.autocfg.hp_pins[i],
+ 0, AC_VERB_SET_PIN_WIDGET_CONTROL,
+ 0x00);
}
return 0;
diff --git a/sound/soc/codecs/cs42l52.c b/sound/soc/codecs/cs42l52.c
index 0f6f481cec0..c92a05651d0 100644
--- a/sound/soc/codecs/cs42l52.c
+++ b/sound/soc/codecs/cs42l52.c
@@ -86,7 +86,7 @@ static const struct reg_default cs42l52_reg_defaults[] = {
{ CS42L52_BEEP_VOL, 0x00 }, /* r1D Beep Volume off Time */
{ CS42L52_BEEP_TONE_CTL, 0x00 }, /* r1E Beep Tone Cfg. */
{ CS42L52_TONE_CTL, 0x00 }, /* r1F Tone Ctl */
- { CS42L52_MASTERA_VOL, 0x88 }, /* r20 Master A Volume */
+ { CS42L52_MASTERA_VOL, 0x00 }, /* r20 Master A Volume */
{ CS42L52_MASTERB_VOL, 0x00 }, /* r21 Master B Volume */
{ CS42L52_HPA_VOL, 0x00 }, /* r22 Headphone A Volume */
{ CS42L52_HPB_VOL, 0x00 }, /* r23 Headphone B Volume */
diff --git a/sound/soc/codecs/da7213.c b/sound/soc/codecs/da7213.c
index 41230ad1c3e..4a6f1daf911 100644
--- a/sound/soc/codecs/da7213.c
+++ b/sound/soc/codecs/da7213.c
@@ -1488,17 +1488,17 @@ static int da7213_probe(struct snd_soc_codec *codec)
DA7213_DMIC_DATA_SEL_SHIFT);
break;
}
- switch (pdata->dmic_data_sel) {
+ switch (pdata->dmic_samplephase) {
case DA7213_DMIC_SAMPLE_ON_CLKEDGE:
case DA7213_DMIC_SAMPLE_BETWEEN_CLKEDGE:
- dmic_cfg |= (pdata->dmic_data_sel <<
+ dmic_cfg |= (pdata->dmic_samplephase <<
DA7213_DMIC_SAMPLEPHASE_SHIFT);
break;
}
- switch (pdata->dmic_data_sel) {
+ switch (pdata->dmic_clk_rate) {
case DA7213_DMIC_CLK_3_0MHZ:
case DA7213_DMIC_CLK_1_5MHZ:
- dmic_cfg |= (pdata->dmic_data_sel <<
+ dmic_cfg |= (pdata->dmic_clk_rate <<
DA7213_DMIC_CLK_RATE_SHIFT);
break;
}
diff --git a/sound/soc/codecs/max98088.c b/sound/soc/codecs/max98088.c
index a4c16fd70f7..5d3631921ed 100644
--- a/sound/soc/codecs/max98088.c
+++ b/sound/soc/codecs/max98088.c
@@ -2006,7 +2006,7 @@ static int max98088_probe(struct snd_soc_codec *codec)
ret);
goto err_access;
}
- dev_info(codec->dev, "revision %c\n", ret + 'A');
+ dev_info(codec->dev, "revision %c\n", ret - 0x40 + 'A');
snd_soc_write(codec, M98088_REG_51_PWR_SYS, M98088_PWRSV);
diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c
index cdeb301da1f..eaeab83619e 100644
--- a/sound/soc/codecs/wm5110.c
+++ b/sound/soc/codecs/wm5110.c
@@ -190,7 +190,7 @@ ARIZONA_MIXER_CONTROLS("DSP2R", ARIZONA_DSP2RMIX_INPUT_1_SOURCE),
ARIZONA_MIXER_CONTROLS("DSP3L", ARIZONA_DSP3LMIX_INPUT_1_SOURCE),
ARIZONA_MIXER_CONTROLS("DSP3R", ARIZONA_DSP3RMIX_INPUT_1_SOURCE),
ARIZONA_MIXER_CONTROLS("DSP4L", ARIZONA_DSP4LMIX_INPUT_1_SOURCE),
-ARIZONA_MIXER_CONTROLS("DSP5R", ARIZONA_DSP4RMIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("DSP4R", ARIZONA_DSP4RMIX_INPUT_1_SOURCE),
ARIZONA_MIXER_CONTROLS("Mic", ARIZONA_MICMIX_INPUT_1_SOURCE),
ARIZONA_MIXER_CONTROLS("Noise", ARIZONA_NOISEMIX_INPUT_1_SOURCE),
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index c9bd445c497..e5f96c98c5a 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -2841,6 +2841,7 @@ static int wm8994_aif3_hw_params(struct snd_pcm_substream *substream,
default:
return 0;
}
+ break;
default:
return 0;
}
diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
index 9321e5c9d8c..4eee59d9bad 100644
--- a/sound/soc/davinci/davinci-mcasp.c
+++ b/sound/soc/davinci/davinci-mcasp.c
@@ -626,7 +626,8 @@ static int davinci_config_channel_size(struct davinci_audio_dev *dev,
int word_length)
{
u32 fmt;
- u32 rotate = (word_length / 4) & 0x7;
+ u32 tx_rotate = (word_length / 4) & 0x7;
+ u32 rx_rotate = (32 - word_length) / 4;
u32 mask = (1ULL << word_length) - 1;
/*
@@ -647,9 +648,9 @@ static int davinci_config_channel_size(struct davinci_audio_dev *dev,
RXSSZ(fmt), RXSSZ(0x0F));
mcasp_mod_bits(dev->base + DAVINCI_MCASP_TXFMT_REG,
TXSSZ(fmt), TXSSZ(0x0F));
- mcasp_mod_bits(dev->base + DAVINCI_MCASP_TXFMT_REG, TXROT(rotate),
+ mcasp_mod_bits(dev->base + DAVINCI_MCASP_TXFMT_REG, TXROT(tx_rotate),
TXROT(7));
- mcasp_mod_bits(dev->base + DAVINCI_MCASP_RXFMT_REG, RXROT(rotate),
+ mcasp_mod_bits(dev->base + DAVINCI_MCASP_RXFMT_REG, RXROT(rx_rotate),
RXROT(7));
mcasp_set_reg(dev->base + DAVINCI_MCASP_TXMASK_REG, mask);
mcasp_set_reg(dev->base + DAVINCI_MCASP_RXMASK_REG, mask);
diff --git a/sound/usb/6fire/pcm.c b/sound/usb/6fire/pcm.c
index e2ca12fe92e..40dd50a80f5 100644
--- a/sound/usb/6fire/pcm.c
+++ b/sound/usb/6fire/pcm.c
@@ -575,7 +575,6 @@ static void usb6fire_pcm_init_urb(struct pcm_urb *urb,
urb->instance.pipe = in ? usb_rcvisocpipe(chip->dev, ep)
: usb_sndisocpipe(chip->dev, ep);
urb->instance.interval = 1;
- urb->instance.transfer_flags = URB_ISO_ASAP;
urb->instance.complete = handler;
urb->instance.context = urb;
urb->instance.number_of_packets = PCM_N_PACKETS_PER_URB;
diff --git a/sound/usb/caiaq/audio.c b/sound/usb/caiaq/audio.c
index fde9a7a29cb..b45e29b8c67 100644
--- a/sound/usb/caiaq/audio.c
+++ b/sound/usb/caiaq/audio.c
@@ -670,7 +670,6 @@ static void read_completed(struct urb *urb)
if (send_it) {
out->number_of_packets = outframe;
- out->transfer_flags = URB_ISO_ASAP;
usb_submit_urb(out, GFP_ATOMIC);
} else {
struct snd_usb_caiaq_cb_info *oinfo = out->context;
@@ -686,7 +685,6 @@ requeue:
}
urb->number_of_packets = FRAMES_PER_URB;
- urb->transfer_flags = URB_ISO_ASAP;
usb_submit_urb(urb, GFP_ATOMIC);
}
@@ -751,7 +749,6 @@ static struct urb **alloc_urbs(struct snd_usb_caiaqdev *dev, int dir, int *ret)
* BYTES_PER_FRAME;
urbs[i]->context = &dev->data_cb_info[i];
urbs[i]->interval = 1;
- urbs[i]->transfer_flags = URB_ISO_ASAP;
urbs[i]->number_of_packets = FRAMES_PER_URB;
urbs[i]->complete = (dir == SNDRV_PCM_STREAM_CAPTURE) ?
read_completed : write_completed;
diff --git a/sound/usb/card.c b/sound/usb/card.c
index 2da8ad75fd9..b79b7dc4920 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -627,7 +627,9 @@ int snd_usb_autoresume(struct snd_usb_audio *chip)
int err = -ENODEV;
down_read(&chip->shutdown_rwsem);
- if (!chip->shutdown && !chip->probing)
+ if (chip->probing)
+ err = 0;
+ else if (!chip->shutdown)
err = usb_autopm_get_interface(chip->pm_intf);
up_read(&chip->shutdown_rwsem);
diff --git a/sound/usb/card.h b/sound/usb/card.h
index 8a751b4887e..d32ea411545 100644
--- a/sound/usb/card.h
+++ b/sound/usb/card.h
@@ -116,6 +116,7 @@ struct snd_usb_substream {
unsigned int altset_idx; /* USB data format: index of alternate setting */
unsigned int txfr_quirk:1; /* allow sub-frame alignment */
unsigned int fmt_type; /* USB audio format type (1-3) */
+ unsigned int pkt_offset_adj; /* Bytes to drop from beginning of packets (for non-compliant devices) */
unsigned int running: 1; /* running status */
diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
index 21049b882ee..63cca3a219c 100644
--- a/sound/usb/endpoint.c
+++ b/sound/usb/endpoint.c
@@ -677,7 +677,7 @@ static int data_ep_set_params(struct snd_usb_endpoint *ep,
if (!u->urb->transfer_buffer)
goto out_of_memory;
u->urb->pipe = ep->pipe;
- u->urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
+ u->urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
u->urb->interval = 1 << ep->datainterval;
u->urb->context = u;
u->urb->complete = snd_complete_urb;
@@ -716,8 +716,7 @@ static int sync_ep_set_params(struct snd_usb_endpoint *ep,
u->urb->transfer_dma = ep->sync_dma + i * 4;
u->urb->transfer_buffer_length = 4;
u->urb->pipe = ep->pipe;
- u->urb->transfer_flags = URB_ISO_ASAP |
- URB_NO_TRANSFER_DMA_MAP;
+ u->urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
u->urb->number_of_packets = 1;
u->urb->interval = 1 << ep->syncinterval;
u->urb->context = u;
diff --git a/sound/usb/midi.c b/sound/usb/midi.c
index 34b9bb7fe87..e5fee1800a4 100644
--- a/sound/usb/midi.c
+++ b/sound/usb/midi.c
@@ -126,7 +126,6 @@ struct snd_usb_midi {
struct snd_usb_midi_in_endpoint *in;
} endpoints[MIDI_MAX_ENDPOINTS];
unsigned long input_triggered;
- bool autopm_reference;
unsigned int opened[2];
unsigned char disconnected;
unsigned char input_running;
@@ -1040,7 +1039,6 @@ static int substream_open(struct snd_rawmidi_substream *substream, int dir,
{
struct snd_usb_midi* umidi = substream->rmidi->private_data;
struct snd_kcontrol *ctl;
- int err;
down_read(&umidi->disc_rwsem);
if (umidi->disconnected) {
@@ -1051,13 +1049,6 @@ static int substream_open(struct snd_rawmidi_substream *substream, int dir,
mutex_lock(&umidi->mutex);
if (open) {
if (!umidi->opened[0] && !umidi->opened[1]) {
- err = usb_autopm_get_interface(umidi->iface);
- umidi->autopm_reference = err >= 0;
- if (err < 0 && err != -EACCES) {
- mutex_unlock(&umidi->mutex);
- up_read(&umidi->disc_rwsem);
- return -EIO;
- }
if (umidi->roland_load_ctl) {
ctl = umidi->roland_load_ctl;
ctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_INACTIVE;
@@ -1080,8 +1071,6 @@ static int substream_open(struct snd_rawmidi_substream *substream, int dir,
snd_ctl_notify(umidi->card,
SNDRV_CTL_EVENT_MASK_INFO, &ctl->id);
}
- if (umidi->autopm_reference)
- usb_autopm_put_interface(umidi->iface);
}
}
mutex_unlock(&umidi->mutex);
@@ -2256,6 +2245,8 @@ int snd_usbmidi_create(struct snd_card *card,
return err;
}
+ usb_autopm_get_interface_no_resume(umidi->iface);
+
list_add_tail(&umidi->list, midi_list);
return 0;
}
diff --git a/sound/usb/misc/ua101.c b/sound/usb/misc/ua101.c
index 8b81cb54026..6ad617b9473 100644
--- a/sound/usb/misc/ua101.c
+++ b/sound/usb/misc/ua101.c
@@ -1120,8 +1120,7 @@ static int alloc_stream_urbs(struct ua101 *ua, struct ua101_stream *stream,
usb_init_urb(&urb->urb);
urb->urb.dev = ua->dev;
urb->urb.pipe = stream->usb_pipe;
- urb->urb.transfer_flags = URB_ISO_ASAP |
- URB_NO_TRANSFER_DMA_MAP;
+ urb->urb.transfer_flags = URB_NO_TRANSFER_DMA_MAP;
urb->urb.transfer_buffer = addr;
urb->urb.transfer_dma = dma;
urb->urb.transfer_buffer_length = max_packet_size;
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index ca4739c3f65..e5c7f9f20fd 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -886,6 +886,7 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
case USB_ID(0x046d, 0x0808):
case USB_ID(0x046d, 0x0809):
case USB_ID(0x046d, 0x081d): /* HD Webcam c510 */
+ case USB_ID(0x046d, 0x0825): /* HD Webcam c270 */
case USB_ID(0x046d, 0x0991):
/* Most audio usb devices lie about volume resolution.
* Most Logitech webcams have res = 384.
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index f94397b42aa..a481fea39e8 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -1170,7 +1170,7 @@ static void retire_capture_urb(struct snd_usb_substream *subs,
stride = runtime->frame_bits >> 3;
for (i = 0; i < urb->number_of_packets; i++) {
- cp = (unsigned char *)urb->transfer_buffer + urb->iso_frame_desc[i].offset;
+ cp = (unsigned char *)urb->transfer_buffer + urb->iso_frame_desc[i].offset + subs->pkt_offset_adj;
if (urb->iso_frame_desc[i].status && printk_ratelimit()) {
snd_printdd(KERN_ERR "frame %d active: %d\n", i, urb->iso_frame_desc[i].status);
// continue;
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index c39f898b15d..1c719d86013 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -215,7 +215,13 @@
.bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL
},
{
- USB_DEVICE(0x046d, 0x0990),
+ .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
+ USB_DEVICE_ID_MATCH_INT_CLASS |
+ USB_DEVICE_ID_MATCH_INT_SUBCLASS,
+ .idVendor = 0x046d,
+ .idProduct = 0x0990,
+ .bInterfaceClass = USB_CLASS_AUDIO,
+ .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
.driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
.vendor_name = "Logitech, Inc.",
.product_name = "QuickCam Pro 9000",
@@ -1714,7 +1720,11 @@ YAMAHA_DEVICE(0x7010, "UB99"),
USB_DEVICE_VENDOR_SPEC(0x0582, 0x0108),
.driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
.ifnum = 0,
- .type = QUIRK_MIDI_STANDARD_INTERFACE
+ .type = QUIRK_MIDI_FIXED_ENDPOINT,
+ .data = & (const struct snd_usb_midi_endpoint_info) {
+ .out_cables = 0x0007,
+ .in_cables = 0x0007
+ }
}
},
{
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 9c5ab22358b..0c7eb496a8e 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -837,6 +837,7 @@ static void set_format_emu_quirk(struct snd_usb_substream *subs,
break;
}
snd_emuusb_set_samplerate(subs->stream->chip, emu_samplerate_id);
+ subs->pkt_offset_adj = (emu_samplerate_id >= EMU_QUIRK_SR_176400HZ) ? 4 : 0;
}
void snd_usb_set_format_quirk(struct snd_usb_substream *subs,
diff --git a/sound/usb/stream.c b/sound/usb/stream.c
index ad181d538bd..cfc4d4eaf42 100644
--- a/sound/usb/stream.c
+++ b/sound/usb/stream.c
@@ -94,6 +94,7 @@ static void snd_usb_init_substream(struct snd_usb_stream *as,
subs->dev = as->chip->dev;
subs->txfr_quirk = as->chip->txfr_quirk;
subs->speed = snd_usb_get_speed(subs->dev);
+ subs->pkt_offset_adj = 0;
snd_usb_set_pcm_ops(as->pcm, stream);
@@ -396,6 +397,14 @@ static int parse_uac_endpoint_attributes(struct snd_usb_audio *chip,
if (!csep && altsd->bNumEndpoints >= 2)
csep = snd_usb_find_desc(alts->endpoint[1].extra, alts->endpoint[1].extralen, NULL, USB_DT_CS_ENDPOINT);
+ /*
+ * If we can't locate the USB_DT_CS_ENDPOINT descriptor in the extra
+ * bytes after the first endpoint, go search the entire interface.
+ * Some devices have it directly *before* the standard endpoint.
+ */
+ if (!csep)
+ csep = snd_usb_find_desc(alts->extra, alts->extralen, NULL, USB_DT_CS_ENDPOINT);
+
if (!csep || csep->bLength < 7 ||
csep->bDescriptorSubtype != UAC_EP_GENERAL) {
snd_printk(KERN_WARNING "%d:%u:%d : no or invalid"
diff --git a/sound/usb/usx2y/usb_stream.c b/sound/usb/usx2y/usb_stream.c
index 1e7a47a8660..bf618e1500a 100644
--- a/sound/usb/usx2y/usb_stream.c
+++ b/sound/usb/usx2y/usb_stream.c
@@ -69,7 +69,6 @@ static void init_pipe_urbs(struct usb_stream_kernel *sk, unsigned use_packsize,
++u, transfer += transfer_length) {
struct urb *urb = urbs[u];
struct usb_iso_packet_descriptor *desc;
- urb->transfer_flags = URB_ISO_ASAP;
urb->transfer_buffer = transfer;
urb->dev = dev;
urb->pipe = pipe;
diff --git a/sound/usb/usx2y/usbusx2yaudio.c b/sound/usb/usx2y/usbusx2yaudio.c
index 520ef96d7c7..b37653247ef 100644
--- a/sound/usb/usx2y/usbusx2yaudio.c
+++ b/sound/usb/usx2y/usbusx2yaudio.c
@@ -503,7 +503,6 @@ static int usX2Y_urbs_start(struct snd_usX2Y_substream *subs)
if (0 == i)
atomic_set(&subs->state, state_STARTING3);
urb->dev = usX2Y->dev;
- urb->transfer_flags = URB_ISO_ASAP;
for (pack = 0; pack < nr_of_packs(); pack++) {
urb->iso_frame_desc[pack].offset = subs->maxpacksize * pack;
urb->iso_frame_desc[pack].length = subs->maxpacksize;
diff --git a/sound/usb/usx2y/usx2yhwdeppcm.c b/sound/usb/usx2y/usx2yhwdeppcm.c
index cc56007791e..f2a1acdc4d8 100644
--- a/sound/usb/usx2y/usx2yhwdeppcm.c
+++ b/sound/usb/usx2y/usx2yhwdeppcm.c
@@ -443,7 +443,6 @@ static int usX2Y_usbpcm_urbs_start(struct snd_usX2Y_substream *subs)
if (0 == u)
atomic_set(&subs->state, state_STARTING3);
urb->dev = usX2Y->dev;
- urb->transfer_flags = URB_ISO_ASAP;
for (pack = 0; pack < nr_of_packs(); pack++) {
urb->iso_frame_desc[pack].offset = subs->maxpacksize * (pack + u * nr_of_packs());
urb->iso_frame_desc[pack].length = subs->maxpacksize;
diff --git a/tools/perf/config/utilities.mak b/tools/perf/config/utilities.mak
index 8ef3bd30a54..3e897198d1f 100644
--- a/tools/perf/config/utilities.mak
+++ b/tools/perf/config/utilities.mak
@@ -173,7 +173,7 @@ _ge-abspath = $(if $(is-executable),$(1))
# Usage: absolute-executable-path-or-empty = $(call get-executable-or-default,variable,default)
#
define get-executable-or-default
-$(if $($(1)),$(call _ge_attempt,$($(1)),$(1)),$(call _ge_attempt,$(2),$(1)))
+$(if $($(1)),$(call _ge_attempt,$($(1)),$(1)),$(call _ge_attempt,$(2)))
endef
_ge_attempt = $(if $(get-executable),$(get-executable),$(_gea_warn)$(call _gea_err,$(2)))
_gea_warn = $(warning The path '$(1)' is not executable.)
diff --git a/tools/perf/scripts/python/net_dropmonitor.py b/tools/perf/scripts/python/net_dropmonitor.py
index a4ffc950002..4c116056091 100755
--- a/tools/perf/scripts/python/net_dropmonitor.py
+++ b/tools/perf/scripts/python/net_dropmonitor.py
@@ -40,9 +40,9 @@ def get_kallsyms_table():
def get_sym(sloc):
loc = int(sloc)
- for i in kallsyms:
- if (i['loc'] >= loc):
- return (i['name'], i['loc']-loc)
+ for i in kallsyms[::-1]:
+ if loc >= i['loc']:
+ return (i['name'], loc - i['loc'])
return (None, 0)
def print_drop_table():
@@ -64,7 +64,7 @@ def trace_end():
# called from perf, when it finds a correspoinding event
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
- skbaddr, protocol, location):
+ skbaddr, location, protocol):
slocation = str(location)
try:
drop_log[slocation] = drop_log[slocation] + 1