aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/DMA-mapping.txt6
-rw-r--r--Documentation/DocBook/kernel-hacking.tmpl2
-rw-r--r--Documentation/filesystems/proc.txt4
-rw-r--r--Documentation/gpio.txt4
-rw-r--r--Documentation/misc-devices/c2port.txt (renamed from Documentation/c2port.txt)0
-rw-r--r--Documentation/misc-devices/ics932s401 (renamed from Documentation/ics932s401)0
-rw-r--r--MAINTAINERS3
-rw-r--r--arch/arm/configs/da830_omapl137_defconfig1254
-rw-r--r--arch/arm/configs/da8xx_omapl_defconfig (renamed from arch/arm/configs/da850_omapl138_defconfig)257
-rw-r--r--arch/arm/configs/davinci_all_defconfig126
-rw-r--r--arch/arm/mach-davinci/Kconfig59
-rw-r--r--arch/arm/mach-davinci/Makefile5
-rw-r--r--arch/arm/mach-davinci/board-da830-evm.c466
-rw-r--r--arch/arm/mach-davinci/board-da850-evm.c437
-rw-r--r--arch/arm/mach-davinci/board-dm355-evm.c16
-rw-r--r--arch/arm/mach-davinci/board-dm355-leopard.c13
-rw-r--r--arch/arm/mach-davinci/board-dm365-evm.c57
-rw-r--r--arch/arm/mach-davinci/board-dm644x-evm.c15
-rw-r--r--arch/arm/mach-davinci/board-dm646x-evm.c86
-rw-r--r--arch/arm/mach-davinci/board-neuros-osd2.c323
-rw-r--r--arch/arm/mach-davinci/board-sffsdr.c20
-rw-r--r--arch/arm/mach-davinci/clock.c231
-rw-r--r--arch/arm/mach-davinci/clock.h17
-rw-r--r--arch/arm/mach-davinci/common.c4
-rw-r--r--arch/arm/mach-davinci/cp_intc.c3
-rw-r--r--arch/arm/mach-davinci/cpufreq.c226
-rw-r--r--arch/arm/mach-davinci/cpuidle.c197
-rw-r--r--arch/arm/mach-davinci/da830.c75
-rw-r--r--arch/arm/mach-davinci/da850.c298
-rw-r--r--arch/arm/mach-davinci/devices-da8xx.c106
-rw-r--r--arch/arm/mach-davinci/devices.c6
-rw-r--r--arch/arm/mach-davinci/dm355.c2
-rw-r--r--arch/arm/mach-davinci/dm365.c107
-rw-r--r--arch/arm/mach-davinci/dm644x.c7
-rw-r--r--arch/arm/mach-davinci/dm646x.c11
-rw-r--r--arch/arm/mach-davinci/dma.c105
-rw-r--r--arch/arm/mach-davinci/gpio.c9
-rw-r--r--arch/arm/mach-davinci/include/mach/asp.h3
-rw-r--r--arch/arm/mach-davinci/include/mach/common.h6
-rw-r--r--arch/arm/mach-davinci/include/mach/cpufreq.h26
-rw-r--r--arch/arm/mach-davinci/include/mach/cpuidle.h17
-rw-r--r--arch/arm/mach-davinci/include/mach/da8xx.h26
-rw-r--r--arch/arm/mach-davinci/include/mach/dm365.h10
-rw-r--r--arch/arm/mach-davinci/include/mach/dm644x.h1
-rw-r--r--arch/arm/mach-davinci/include/mach/irqs.h1
-rw-r--r--arch/arm/mach-davinci/include/mach/mux.h20
-rw-r--r--arch/arm/mach-davinci/include/mach/system.h3
-rw-r--r--arch/arm/mach-davinci/include/mach/usb.h59
-rw-r--r--arch/arm/mach-davinci/mux.c1
-rw-r--r--arch/arm/mach-davinci/psc.c3
-rw-r--r--arch/arm/mach-davinci/serial.c6
-rw-r--r--arch/arm/mach-davinci/sram.c3
-rw-r--r--arch/arm/mach-davinci/time.c6
-rw-r--r--arch/arm/mach-davinci/usb.c84
-rw-r--r--arch/blackfin/kernel/process.c6
-rw-r--r--arch/frv/kernel/process.c5
-rw-r--r--arch/h8300/kernel/process.c5
-rw-r--r--arch/ia64/kernel/time.c4
-rw-r--r--arch/m68knommu/kernel/process.c5
-rw-r--r--arch/mips/include/asm/time.h14
-rw-r--r--arch/mips/kernel/time.c33
-rw-r--r--arch/mn10300/kernel/process.c12
-rw-r--r--arch/powerpc/kernel/time.c7
-rw-r--r--arch/s390/Kconfig15
-rw-r--r--arch/s390/crypto/prng.c2
-rw-r--r--arch/s390/defconfig1
-rw-r--r--arch/s390/include/asm/atomic.h8
-rw-r--r--arch/s390/include/asm/ccwdev.h4
-rw-r--r--arch/s390/include/asm/cputime.h8
-rw-r--r--arch/s390/include/asm/mmu_context.h4
-rw-r--r--arch/s390/include/asm/pgalloc.h3
-rw-r--r--arch/s390/include/asm/pgtable.h4
-rw-r--r--arch/s390/include/asm/setup.h17
-rw-r--r--arch/s390/include/asm/smp.h54
-rw-r--r--arch/s390/include/asm/sockios.h21
-rw-r--r--arch/s390/include/asm/termbits.h206
-rw-r--r--arch/s390/include/asm/todclk.h23
-rw-r--r--arch/s390/include/asm/uaccess.h2
-rw-r--r--arch/s390/kernel/Makefile1
-rw-r--r--arch/s390/kernel/compat_linux.c6
-rw-r--r--arch/s390/kernel/compat_linux.h4
-rw-r--r--arch/s390/kernel/head64.S3
-rw-r--r--arch/s390/kernel/s390_ext.c2
-rw-r--r--arch/s390/kernel/setup.c36
-rw-r--r--arch/s390/kernel/time.c7
-rw-r--r--arch/s390/kernel/vdso.c9
-rw-r--r--arch/s390/kernel/vtime.c2
-rw-r--r--arch/s390/kvm/Kconfig1
-rw-r--r--arch/s390/lib/uaccess_mvcos.c4
-rw-r--r--arch/s390/lib/uaccess_pt.c147
-rw-r--r--arch/s390/mm/cmm.c61
-rw-r--r--arch/s390/mm/fault.c378
-rw-r--r--arch/s390/mm/pgtable.c2
-rw-r--r--arch/s390/mm/vmem.c11
-rw-r--r--arch/sh/Kconfig29
-rw-r--r--arch/sh/Makefile4
-rw-r--r--arch/sh/boards/Makefile1
-rw-r--r--arch/sh/boards/mach-ap325rxa/Makefile2
-rw-r--r--arch/sh/boards/mach-ap325rxa/sdram.S69
-rw-r--r--arch/sh/boards/mach-ap325rxa/setup.c (renamed from arch/sh/boards/board-ap325rxa.c)97
-rw-r--r--arch/sh/boards/mach-ecovec24/Makefile2
-rw-r--r--arch/sh/boards/mach-ecovec24/sdram.S52
-rw-r--r--arch/sh/boards/mach-ecovec24/setup.c197
-rw-r--r--arch/sh/boards/mach-highlander/setup.c2
-rw-r--r--arch/sh/boards/mach-kfr2r09/Makefile2
-rw-r--r--arch/sh/boards/mach-kfr2r09/sdram.S80
-rw-r--r--arch/sh/boards/mach-kfr2r09/setup.c238
-rw-r--r--arch/sh/boards/mach-migor/Makefile2
-rw-r--r--arch/sh/boards/mach-migor/sdram.S69
-rw-r--r--arch/sh/boards/mach-migor/setup.c65
-rw-r--r--arch/sh/boards/mach-r2d/irq.c2
-rw-r--r--arch/sh/boards/mach-se/7722/irq.c32
-rw-r--r--arch/sh/boards/mach-se/7722/setup.c17
-rw-r--r--arch/sh/boards/mach-se/7724/Makefile2
-rw-r--r--arch/sh/boards/mach-se/7724/sdram.S52
-rw-r--r--arch/sh/boards/mach-se/7724/setup.c96
-rw-r--r--arch/sh/boot/compressed/misc.c2
-rw-r--r--arch/sh/boot/romimage/Makefile12
-rw-r--r--arch/sh/boot/romimage/head.S38
-rw-r--r--arch/sh/drivers/dma/dma-sysfs.c2
-rw-r--r--arch/sh/drivers/pci/Kconfig19
-rw-r--r--arch/sh/include/asm/addrspace.h9
-rw-r--r--arch/sh/include/asm/atomic.h9
-rw-r--r--arch/sh/include/asm/bitops.h4
-rw-r--r--arch/sh/include/asm/bugs.h4
-rw-r--r--arch/sh/include/asm/dma-mapping.h233
-rw-r--r--arch/sh/include/asm/dwarf.h28
-rw-r--r--arch/sh/include/asm/fixmap.h12
-rw-r--r--arch/sh/include/asm/fpu.h26
-rw-r--r--arch/sh/include/asm/ftrace.h17
-rw-r--r--arch/sh/include/asm/gpio.h82
-rw-r--r--arch/sh/include/asm/hardirq.h13
-rw-r--r--arch/sh/include/asm/io.h16
-rw-r--r--arch/sh/include/asm/irqflags.h31
-rw-r--r--arch/sh/include/asm/irqflags_32.h99
-rw-r--r--arch/sh/include/asm/irqflags_64.h85
-rw-r--r--arch/sh/include/asm/mmu.h13
-rw-r--r--arch/sh/include/asm/pci.h30
-rw-r--r--arch/sh/include/asm/perf_event.h31
-rw-r--r--arch/sh/include/asm/pgtable.h26
-rw-r--r--arch/sh/include/asm/pgtable_32.h2
-rw-r--r--arch/sh/include/asm/processor_32.h3
-rw-r--r--arch/sh/include/asm/scatterlist.h2
-rw-r--r--arch/sh/include/asm/suspend.h65
-rw-r--r--arch/sh/include/asm/system.h4
-rw-r--r--arch/sh/include/asm/system_32.h29
-rw-r--r--arch/sh/include/asm/system_64.h26
-rw-r--r--arch/sh/include/asm/thread_info.h30
-rw-r--r--arch/sh/include/asm/topology.h8
-rw-r--r--arch/sh/include/asm/ubc.h11
-rw-r--r--arch/sh/include/asm/watchdog.h59
-rw-r--r--arch/sh/include/cpu-sh4/cpu/watchdog.h13
-rw-r--r--arch/sh/include/mach-ecovec24/mach/partner-jet-setup.txt3
-rw-r--r--arch/sh/include/mach-se/mach/se7722.h11
-rw-r--r--arch/sh/kernel/Makefile10
-rw-r--r--arch/sh/kernel/asm-offsets.c23
-rw-r--r--arch/sh/kernel/cpu/Makefile1
-rw-r--r--arch/sh/kernel/cpu/init.c28
-rw-r--r--arch/sh/kernel/cpu/sh2a/fpu.c27
-rw-r--r--arch/sh/kernel/cpu/sh3/entry.S33
-rw-r--r--arch/sh/kernel/cpu/sh4/Makefile8
-rw-r--r--arch/sh/kernel/cpu/sh4/fpu.c28
-rw-r--r--arch/sh/kernel/cpu/sh4/perf_event.c253
-rw-r--r--arch/sh/kernel/cpu/sh4a/Makefile1
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7724.c2
-rw-r--r--arch/sh/kernel/cpu/sh4a/perf_event.c269
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7724.c264
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-shx3.c45
-rw-r--r--arch/sh/kernel/cpu/sh4a/smp-shx3.c37
-rw-r--r--arch/sh/kernel/cpu/sh5/entry.S2
-rw-r--r--arch/sh/kernel/cpu/shmobile/cpuidle.c42
-rw-r--r--arch/sh/kernel/cpu/shmobile/pm.c117
-rw-r--r--arch/sh/kernel/cpu/shmobile/pm_runtime.c17
-rw-r--r--arch/sh/kernel/cpu/shmobile/sleep.S344
-rw-r--r--arch/sh/kernel/cpu/ubc.S59
-rw-r--r--arch/sh/kernel/dma-nommu.c82
-rw-r--r--arch/sh/kernel/dwarf.c222
-rw-r--r--arch/sh/kernel/entry-common.S2
-rw-r--r--arch/sh/kernel/ftrace.c146
-rw-r--r--arch/sh/kernel/head_32.S2
-rw-r--r--arch/sh/kernel/idle.c78
-rw-r--r--arch/sh/kernel/io_generic.c4
-rw-r--r--arch/sh/kernel/irq.c14
-rw-r--r--arch/sh/kernel/irq_32.c57
-rw-r--r--arch/sh/kernel/irq_64.c51
-rw-r--r--arch/sh/kernel/machine_kexec.c6
-rw-r--r--arch/sh/kernel/machvec.c4
-rw-r--r--arch/sh/kernel/module.c9
-rw-r--r--arch/sh/kernel/perf_callchain.c98
-rw-r--r--arch/sh/kernel/perf_event.c312
-rw-r--r--arch/sh/kernel/process_32.c42
-rw-r--r--arch/sh/kernel/process_64.c2
-rw-r--r--arch/sh/kernel/return_address.c54
-rw-r--r--arch/sh/kernel/setup.c4
-rw-r--r--arch/sh/kernel/sh_ksyms_32.c67
-rw-r--r--arch/sh/kernel/sh_ksyms_64.c10
-rw-r--r--arch/sh/kernel/signal_32.c24
-rw-r--r--arch/sh/kernel/signal_64.c13
-rw-r--r--arch/sh/kernel/smp.c4
-rw-r--r--arch/sh/kernel/topology.c26
-rw-r--r--arch/sh/kernel/traps.c8
-rw-r--r--arch/sh/kernel/traps_32.c82
-rw-r--r--arch/sh/lib/Makefile7
-rw-r--r--arch/sh/lib/memset-sh4.S107
-rw-r--r--arch/sh/math-emu/math.c6
-rw-r--r--arch/sh/mm/Kconfig19
-rw-r--r--arch/sh/mm/Makefile3
-rw-r--r--arch/sh/mm/cache-sh4.c501
-rw-r--r--arch/sh/mm/cache-sh5.c2
-rw-r--r--arch/sh/mm/cache-sh7705.c2
-rw-r--r--arch/sh/mm/cache.c18
-rw-r--r--arch/sh/mm/consistent.c28
-rw-r--r--arch/sh/mm/init.c19
-rw-r--r--arch/sh/mm/kmap.c4
-rw-r--r--arch/sh/mm/numa.c2
-rw-r--r--arch/sh/mm/pmb-fixed.c45
-rw-r--r--arch/sh/mm/pmb.c268
-rw-r--r--arch/sh/oprofile/Makefile4
-rw-r--r--arch/sh/oprofile/common.c38
-rw-r--r--arch/sh/oprofile/op_impl.h2
-rw-r--r--arch/sh/oprofile/op_model_sh7750.c255
-rw-r--r--arch/sparc/kernel/time_64.c2
-rw-r--r--arch/um/drivers/mmapper_kern.c11
-rw-r--r--arch/um/drivers/random.c3
-rw-r--r--arch/x86/crypto/Makefile3
-rw-r--r--arch/x86/crypto/aesni-intel_asm.S517
-rw-r--r--arch/x86/crypto/ghash-clmulni-intel_asm.S157
-rw-r--r--arch/x86/crypto/ghash-clmulni-intel_glue.c333
-rw-r--r--arch/x86/include/asm/cpufeature.h1
-rw-r--r--arch/x86/include/asm/hpet.h7
-rw-r--r--arch/x86/include/asm/i387.h7
-rw-r--r--arch/x86/include/asm/inst.h150
-rw-r--r--arch/x86/kernel/acpi/boot.c1
-rw-r--r--arch/x86/kernel/apic/apic.c2
-rw-r--r--arch/x86/kernel/apic/io_apic.c49
-rw-r--r--arch/x86/kernel/hpet.c77
-rw-r--r--arch/x86/kernel/vmiclock_32.c2
-rw-r--r--arch/x86/kernel/vsyscall_64.c5
-rw-r--r--crypto/Kconfig9
-rw-r--r--crypto/ansi_cprng.c82
-rw-r--r--crypto/cryptd.c7
-rw-r--r--crypto/digest.c240
-rw-r--r--crypto/hash.c183
-rw-r--r--crypto/proc.c7
-rw-r--r--crypto/testmgr.c11
-rw-r--r--crypto/testmgr.h15
-rw-r--r--drivers/cdrom/gdrom.c10
-rw-r--r--drivers/char/agp/frontend.c28
-rw-r--r--drivers/char/cs5535_gpio.c3
-rw-r--r--drivers/char/efirtc.c12
-rw-r--r--drivers/char/generic_nvram.c9
-rw-r--r--drivers/char/hw_random/core.c109
-rw-r--r--drivers/char/hw_random/virtio-rng.c78
-rw-r--r--drivers/char/mem.c17
-rw-r--r--drivers/char/misc.c5
-rw-r--r--drivers/char/nvram.c5
-rw-r--r--drivers/char/pc8736x_gpio.c2
-rw-r--r--drivers/char/scx200_gpio.c2
-rw-r--r--drivers/char/tb0219.c6
-rw-r--r--drivers/infiniband/hw/ipath/ipath_file_ops.c1
-rw-r--r--drivers/input/keyboard/sh_keysc.c2
-rw-r--r--drivers/input/misc/hp_sdc_rtc.c2
-rw-r--r--drivers/macintosh/ans-lcd.c45
-rw-r--r--drivers/mfd/Kconfig8
-rw-r--r--drivers/mfd/Makefile1
-rw-r--r--drivers/mfd/sh_mobile_sdhi.c156
-rw-r--r--drivers/mmc/host/Kconfig2
-rw-r--r--drivers/parisc/eisa_eeprom.c10
-rw-r--r--drivers/pci/intr_remapping.c89
-rw-r--r--drivers/pci/intr_remapping.h7
-rw-r--r--drivers/rtc/rtc-ds1302.c2
-rw-r--r--drivers/s390/block/dasd.c237
-rw-r--r--drivers/s390/block/dasd_3990_erp.c47
-rw-r--r--drivers/s390/block/dasd_alias.c77
-rw-r--r--drivers/s390/block/dasd_diag.c20
-rw-r--r--drivers/s390/block/dasd_eckd.c170
-rw-r--r--drivers/s390/block/dasd_eckd.h4
-rw-r--r--drivers/s390/block/dasd_eer.c5
-rw-r--r--drivers/s390/block/dasd_fba.c11
-rw-r--r--drivers/s390/block/dasd_int.h13
-rw-r--r--drivers/s390/block/dasd_ioctl.c4
-rw-r--r--drivers/s390/char/con3215.c1
-rw-r--r--drivers/s390/char/con3270.c1
-rw-r--r--drivers/s390/char/fs3270.c10
-rw-r--r--drivers/s390/char/monreader.c8
-rw-r--r--drivers/s390/char/monwriter.c7
-rw-r--r--drivers/s390/char/sclp_cmd.c1
-rw-r--r--drivers/s390/char/tape.h9
-rw-r--r--drivers/s390/char/tape_34xx.c8
-rw-r--r--drivers/s390/char/tape_3590.c2
-rw-r--r--drivers/s390/char/tape_block.c17
-rw-r--r--drivers/s390/char/tape_char.c54
-rw-r--r--drivers/s390/char/tape_core.c65
-rw-r--r--drivers/s390/char/tape_proc.c2
-rw-r--r--drivers/s390/char/tty3270.c20
-rw-r--r--drivers/s390/char/vmlogrdr.c8
-rw-r--r--drivers/s390/char/vmur.c3
-rw-r--r--drivers/s390/char/vmwatchdog.c29
-rw-r--r--drivers/s390/cio/Makefile2
-rw-r--r--drivers/s390/cio/ccwreq.c328
-rw-r--r--drivers/s390/cio/cio.c1
-rw-r--r--drivers/s390/cio/cio.h8
-rw-r--r--drivers/s390/cio/css.c57
-rw-r--r--drivers/s390/cio/css.h3
-rw-r--r--drivers/s390/cio/device.c1006
-rw-r--r--drivers/s390/cio/device.h25
-rw-r--r--drivers/s390/cio/device_fsm.c411
-rw-r--r--drivers/s390/cio/device_id.c375
-rw-r--r--drivers/s390/cio/device_ops.c142
-rw-r--r--drivers/s390/cio/device_pgid.c963
-rw-r--r--drivers/s390/cio/device_status.c3
-rw-r--r--drivers/s390/cio/io_sch.h73
-rw-r--r--drivers/s390/crypto/ap_bus.c31
-rw-r--r--drivers/s390/crypto/ap_bus.h18
-rw-r--r--drivers/s390/crypto/zcrypt_api.c11
-rw-r--r--drivers/s390/crypto/zcrypt_api.h2
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.c75
-rw-r--r--drivers/s390/crypto/zcrypt_pcica.c2
-rw-r--r--drivers/s390/crypto/zcrypt_pcicc.c2
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.c38
-rw-r--r--drivers/serial/Kconfig2
-rw-r--r--drivers/serial/sh-sci.c59
-rw-r--r--drivers/serial/sh-sci.h2
-rw-r--r--drivers/sh/Makefile1
-rw-r--r--drivers/sh/intc.c123
-rw-r--r--drivers/sh/maple/maple.c4
-rw-r--r--drivers/sh/pfc.c (renamed from arch/sh/kernel/gpio.c)43
-rw-r--r--drivers/spi/spidev.c3
-rw-r--r--drivers/video/sh_mobile_lcdcfb.c32
-rw-r--r--drivers/watchdog/iTCO_wdt.c68
-rw-r--r--drivers/watchdog/s3c2410_wdt.c89
-rw-r--r--fs/reiserfs/Makefile2
-rw-r--r--fs/reiserfs/bitmap.c4
-rw-r--r--fs/reiserfs/dir.c10
-rw-r--r--fs/reiserfs/do_balan.c17
-rw-r--r--fs/reiserfs/file.c2
-rw-r--r--fs/reiserfs/fix_node.c19
-rw-r--r--fs/reiserfs/inode.c97
-rw-r--r--fs/reiserfs/ioctl.c77
-rw-r--r--fs/reiserfs/journal.c130
-rw-r--r--fs/reiserfs/lock.c88
-rw-r--r--fs/reiserfs/namei.c20
-rw-r--r--fs/reiserfs/prints.c4
-rw-r--r--fs/reiserfs/resize.c2
-rw-r--r--fs/reiserfs/stree.c53
-rw-r--r--fs/reiserfs/super.c52
-rw-r--r--fs/reiserfs/xattr.c6
-rw-r--r--include/crypto/algapi.h1
-rw-r--r--include/crypto/cryptd.h1
-rw-r--r--include/linux/clockchips.h19
-rw-r--r--include/linux/clocksource.h18
-rw-r--r--include/linux/crypto.h27
-rw-r--r--include/linux/dmar.h10
-rw-r--r--include/linux/hpet.h2
-rw-r--r--include/linux/hrtimer.h4
-rw-r--r--include/linux/hw_random.h7
-rw-r--r--include/linux/input/sh_keysc.h (renamed from arch/sh/include/asm/sh_keysc.h)6
-rw-r--r--include/linux/mfd/sh_mobile_sdhi.h8
-rw-r--r--include/linux/reiserfs_fs.h71
-rw-r--r--include/linux/reiserfs_fs_sb.h20
-rw-r--r--include/linux/sh_intc.h7
-rw-r--r--include/linux/sh_pfc.h96
-rw-r--r--include/linux/tick.h5
-rw-r--r--include/linux/time.h1
-rw-r--r--include/linux/timex.h9
-rw-r--r--init/calibrate.c24
-rw-r--r--kernel/cpu.c5
-rw-r--r--kernel/hrtimer.c3
-rw-r--r--kernel/irq/manage.c2
-rw-r--r--kernel/itimer.c7
-rw-r--r--kernel/pm_qos_params.c20
-rw-r--r--kernel/posix-cpu-timers.c5
-rw-r--r--kernel/sys.c14
-rw-r--r--kernel/time.c1
-rw-r--r--kernel/time/clockevents.c13
-rw-r--r--kernel/time/clocksource.c97
-rw-r--r--kernel/time/tick-oneshot.c4
-rw-r--r--kernel/time/tick-sched.c141
-rw-r--r--kernel/time/timekeeping.c125
-rw-r--r--kernel/time/timer_list.c10
380 files changed, 12835 insertions, 8409 deletions
diff --git a/Documentation/DMA-mapping.txt b/Documentation/DMA-mapping.txt
index 01f24e94bdb..ecad88d9fe5 100644
--- a/Documentation/DMA-mapping.txt
+++ b/Documentation/DMA-mapping.txt
@@ -214,7 +214,7 @@ most specific mask.
Here is pseudo-code showing how this might be done:
#define PLAYBACK_ADDRESS_BITS DMA_BIT_MASK(32)
- #define RECORD_ADDRESS_BITS 0x00ffffff
+ #define RECORD_ADDRESS_BITS DMA_BIT_MASK(24)
struct my_sound_card *card;
struct pci_dev *pdev;
@@ -224,14 +224,14 @@ Here is pseudo-code showing how this might be done:
card->playback_enabled = 1;
} else {
card->playback_enabled = 0;
- printk(KERN_WARN "%s: Playback disabled due to DMA limitations.\n",
+ printk(KERN_WARNING "%s: Playback disabled due to DMA limitations.\n",
card->name);
}
if (!pci_set_dma_mask(pdev, RECORD_ADDRESS_BITS)) {
card->record_enabled = 1;
} else {
card->record_enabled = 0;
- printk(KERN_WARN "%s: Record disabled due to DMA limitations.\n",
+ printk(KERN_WARNING "%s: Record disabled due to DMA limitations.\n",
card->name);
}
diff --git a/Documentation/DocBook/kernel-hacking.tmpl b/Documentation/DocBook/kernel-hacking.tmpl
index 992e67e6be7..7b3f4936341 100644
--- a/Documentation/DocBook/kernel-hacking.tmpl
+++ b/Documentation/DocBook/kernel-hacking.tmpl
@@ -352,7 +352,7 @@ asmlinkage long sys_mycall(int arg)
</para>
<programlisting>
-if (signal_pending())
+if (signal_pending(current))
return -ERESTARTSYS;
</programlisting>
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index 4af0018533f..94b9f2056f4 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -1089,8 +1089,8 @@ The "processes" line gives the number of processes and threads created, which
includes (but is not limited to) those created by calls to the fork() and
clone() system calls.
-The "procs_running" line gives the number of processes currently running on
-CPUs.
+The "procs_running" line gives the total number of threads that are
+running or ready to run (i.e., the total number of runnable threads).
The "procs_blocked" line gives the number of processes currently blocked,
waiting for I/O to complete.
diff --git a/Documentation/gpio.txt b/Documentation/gpio.txt
index fa4dc077ae0..e4e7daed2ba 100644
--- a/Documentation/gpio.txt
+++ b/Documentation/gpio.txt
@@ -380,7 +380,7 @@ rare; use gpiochip_remove() when it is unavoidable.
Most often a gpio_chip is part of an instance-specific structure with state
not exposed by the GPIO interfaces, such as addressing, power management,
-and more. Chips such as codecs will have complex non-GPIO state,
+and more. Chips such as codecs will have complex non-GPIO state.
Any debugfs dump method should normally ignore signals which haven't been
requested as GPIOs. They can use gpiochip_is_requested(), which returns
@@ -531,7 +531,7 @@ and have the following read/write attributes:
This file exists only if the pin can be configured as an
interrupt generating input pin.
-GPIO controllers have paths like /sys/class/gpio/chipchip42/ (for the
+GPIO controllers have paths like /sys/class/gpio/gpiochip42/ (for the
controller implementing GPIOs starting at #42) and have the following
read-only attributes:
diff --git a/Documentation/c2port.txt b/Documentation/misc-devices/c2port.txt
index d9bf93ea439..d9bf93ea439 100644
--- a/Documentation/c2port.txt
+++ b/Documentation/misc-devices/c2port.txt
diff --git a/Documentation/ics932s401 b/Documentation/misc-devices/ics932s401
index 07a739f406d..07a739f406d 100644
--- a/Documentation/ics932s401
+++ b/Documentation/misc-devices/ics932s401
diff --git a/MAINTAINERS b/MAINTAINERS
index ea781c1cfb5..c8973c6102d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3139,6 +3139,7 @@ S: Supported
F: Documentation/s390/kvm.txt
F: arch/s390/include/asm/kvm*
F: arch/s390/kvm/
+F: drivers/s390/kvm/
KEXEC
M: Eric Biederman <ebiederm@xmission.com>
@@ -4553,6 +4554,7 @@ L: linux-s390@vger.kernel.org
W: http://www.ibm.com/developerworks/linux/linux390/
S: Supported
F: arch/s390/
+F: drivers/s390/
S390 NETWORK DRIVERS
M: Ursula Braun <ursula.braun@de.ibm.com>
@@ -4568,6 +4570,7 @@ M: Felix Beck <felix.beck@de.ibm.com>
M: Ralph Wuerthner <ralph.wuerthner@de.ibm.com>
M: linux390@de.ibm.com
L: linux-s390@vger.kernel.org
+W: http://www.ibm.com/developerworks/linux/linux390/
S: Supported
F: drivers/s390/crypto/
diff --git a/arch/arm/configs/da830_omapl137_defconfig b/arch/arm/configs/da830_omapl137_defconfig
deleted file mode 100644
index 7c8e38f5c5a..00000000000
--- a/arch/arm/configs/da830_omapl137_defconfig
+++ /dev/null
@@ -1,1254 +0,0 @@
-#
-# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.30-rc2-davinci1
-# Wed May 13 15:33:29 2009
-#
-CONFIG_ARM=y
-CONFIG_SYS_SUPPORTS_APM_EMULATION=y
-CONFIG_GENERIC_GPIO=y
-CONFIG_GENERIC_TIME=y
-CONFIG_GENERIC_CLOCKEVENTS=y
-CONFIG_MMU=y
-# CONFIG_NO_IOPORT is not set
-CONFIG_GENERIC_HARDIRQS=y
-CONFIG_STACKTRACE_SUPPORT=y
-CONFIG_HAVE_LATENCYTOP_SUPPORT=y
-CONFIG_LOCKDEP_SUPPORT=y
-CONFIG_TRACE_IRQFLAGS_SUPPORT=y
-CONFIG_HARDIRQS_SW_RESEND=y
-CONFIG_GENERIC_IRQ_PROBE=y
-CONFIG_RWSEM_GENERIC_SPINLOCK=y
-# CONFIG_ARCH_HAS_ILOG2_U32 is not set
-# CONFIG_ARCH_HAS_ILOG2_U64 is not set
-CONFIG_GENERIC_HWEIGHT=y
-CONFIG_GENERIC_CALIBRATE_DELAY=y
-CONFIG_ZONE_DMA=y
-CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
-CONFIG_VECTORS_BASE=0xffff0000
-CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
-
-#
-# General setup
-#
-CONFIG_EXPERIMENTAL=y
-CONFIG_BROKEN_ON_SMP=y
-CONFIG_LOCK_KERNEL=y
-CONFIG_INIT_ENV_ARG_LIMIT=32
-CONFIG_LOCALVERSION=""
-CONFIG_LOCALVERSION_AUTO=y
-# CONFIG_SWAP is not set
-CONFIG_SYSVIPC=y
-CONFIG_SYSVIPC_SYSCTL=y
-CONFIG_POSIX_MQUEUE=y
-CONFIG_POSIX_MQUEUE_SYSCTL=y
-# CONFIG_BSD_PROCESS_ACCT is not set
-# CONFIG_TASKSTATS is not set
-# CONFIG_AUDIT is not set
-
-#
-# RCU Subsystem
-#
-CONFIG_CLASSIC_RCU=y
-# CONFIG_TREE_RCU is not set
-# CONFIG_PREEMPT_RCU is not set
-# CONFIG_TREE_RCU_TRACE is not set
-# CONFIG_PREEMPT_RCU_TRACE is not set
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_GROUP_SCHED=y
-CONFIG_FAIR_GROUP_SCHED=y
-# CONFIG_RT_GROUP_SCHED is not set
-CONFIG_USER_SCHED=y
-# CONFIG_CGROUP_SCHED is not set
-# CONFIG_CGROUPS is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
-# CONFIG_RELAY is not set
-# CONFIG_NAMESPACES is not set
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE=""
-CONFIG_RD_GZIP=y
-# CONFIG_RD_BZIP2 is not set
-# CONFIG_RD_LZMA is not set
-CONFIG_CC_OPTIMIZE_FOR_SIZE=y
-CONFIG_SYSCTL=y
-CONFIG_ANON_INODES=y
-CONFIG_EMBEDDED=y
-CONFIG_UID16=y
-CONFIG_SYSCTL_SYSCALL=y
-CONFIG_KALLSYMS=y
-# CONFIG_KALLSYMS_ALL is not set
-# CONFIG_KALLSYMS_EXTRA_PASS is not set
-# CONFIG_STRIP_ASM_SYMS is not set
-CONFIG_HOTPLUG=y
-CONFIG_PRINTK=y
-CONFIG_BUG=y
-CONFIG_ELF_CORE=y
-CONFIG_BASE_FULL=y
-CONFIG_FUTEX=y
-CONFIG_EPOLL=y
-CONFIG_SIGNALFD=y
-CONFIG_TIMERFD=y
-CONFIG_EVENTFD=y
-CONFIG_SHMEM=y
-CONFIG_AIO=y
-CONFIG_VM_EVENT_COUNTERS=y
-CONFIG_SLUB_DEBUG=y
-CONFIG_COMPAT_BRK=y
-# CONFIG_SLAB is not set
-CONFIG_SLUB=y
-# CONFIG_SLOB is not set
-# CONFIG_PROFILING is not set
-# CONFIG_MARKERS is not set
-CONFIG_HAVE_OPROFILE=y
-# CONFIG_KPROBES is not set
-CONFIG_HAVE_KPROBES=y
-CONFIG_HAVE_KRETPROBES=y
-CONFIG_HAVE_CLK=y
-# CONFIG_SLOW_WORK is not set
-CONFIG_HAVE_GENERIC_DMA_COHERENT=y
-CONFIG_SLABINFO=y
-CONFIG_RT_MUTEXES=y
-CONFIG_BASE_SMALL=0
-CONFIG_MODULES=y
-# CONFIG_MODULE_FORCE_LOAD is not set
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-CONFIG_MODVERSIONS=y
-# CONFIG_MODULE_SRCVERSION_ALL is not set
-CONFIG_BLOCK=y
-# CONFIG_LBD is not set
-# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_BLK_DEV_INTEGRITY is not set
-
-#
-# IO Schedulers
-#
-CONFIG_IOSCHED_NOOP=y
-CONFIG_IOSCHED_AS=y
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
-CONFIG_DEFAULT_AS=y
-# CONFIG_DEFAULT_DEADLINE is not set
-# CONFIG_DEFAULT_CFQ is not set
-# CONFIG_DEFAULT_NOOP is not set
-CONFIG_DEFAULT_IOSCHED="anticipatory"
-# CONFIG_FREEZER is not set
-
-#
-# System Type
-#
-# CONFIG_ARCH_AAEC2000 is not set
-# CONFIG_ARCH_INTEGRATOR is not set
-# CONFIG_ARCH_REALVIEW is not set
-# CONFIG_ARCH_VERSATILE is not set
-# CONFIG_ARCH_AT91 is not set
-# CONFIG_ARCH_CLPS711X is not set
-# CONFIG_ARCH_EBSA110 is not set
-# CONFIG_ARCH_EP93XX is not set
-# CONFIG_ARCH_GEMINI is not set
-# CONFIG_ARCH_FOOTBRIDGE is not set
-# CONFIG_ARCH_NETX is not set
-# CONFIG_ARCH_H720X is not set
-# CONFIG_ARCH_IMX is not set
-# CONFIG_ARCH_IOP13XX is not set
-# CONFIG_ARCH_IOP32X is not set
-# CONFIG_ARCH_IOP33X is not set
-# CONFIG_ARCH_IXP23XX is not set
-# CONFIG_ARCH_IXP2000 is not set
-# CONFIG_ARCH_IXP4XX is not set
-# CONFIG_ARCH_L7200 is not set
-# CONFIG_ARCH_KIRKWOOD is not set
-# CONFIG_ARCH_KS8695 is not set
-# CONFIG_ARCH_NS9XXX is not set
-# CONFIG_ARCH_LOKI is not set
-# CONFIG_ARCH_MV78XX0 is not set
-# CONFIG_ARCH_MXC is not set
-# CONFIG_ARCH_ORION5X is not set
-# CONFIG_ARCH_PNX4008 is not set
-# CONFIG_ARCH_PXA is not set
-# CONFIG_ARCH_MMP is not set
-# CONFIG_ARCH_RPC is not set
-# CONFIG_ARCH_SA1100 is not set
-# CONFIG_ARCH_S3C2410 is not set
-# CONFIG_ARCH_S3C64XX is not set
-# CONFIG_ARCH_SHARK is not set
-# CONFIG_ARCH_LH7A40X is not set
-CONFIG_ARCH_DAVINCI=y
-# CONFIG_ARCH_OMAP is not set
-# CONFIG_ARCH_MSM is not set
-# CONFIG_ARCH_W90X900 is not set
-CONFIG_CP_INTC=y
-
-#
-# TI DaVinci Implementations
-#
-
-#
-# DaVinci Core Type
-#
-# CONFIG_ARCH_DAVINCI_DM644x is not set
-# CONFIG_ARCH_DAVINCI_DM646x is not set
-# CONFIG_ARCH_DAVINCI_DM355 is not set
-CONFIG_ARCH_DAVINCI_DA830=y
-
-#
-# DaVinci Board Type
-#
-CONFIG_MACH_DAVINCI_DA830_EVM=y
-CONFIG_DAVINCI_MUX=y
-# CONFIG_DAVINCI_MUX_DEBUG is not set
-# CONFIG_DAVINCI_MUX_WARNINGS is not set
-CONFIG_DAVINCI_RESET_CLOCKS=y
-
-#
-# Processor Type
-#
-CONFIG_CPU_32=y
-CONFIG_CPU_ARM926T=y
-CONFIG_CPU_32v5=y
-CONFIG_CPU_ABRT_EV5TJ=y
-CONFIG_CPU_PABRT_NOIFAR=y
-CONFIG_CPU_CACHE_VIVT=y
-CONFIG_CPU_COPY_V4WB=y
-CONFIG_CPU_TLB_V4WBI=y
-CONFIG_CPU_CP15=y
-CONFIG_CPU_CP15_MMU=y
-
-#
-# Processor Features
-#
-CONFIG_ARM_THUMB=y
-# CONFIG_CPU_ICACHE_DISABLE is not set
-# CONFIG_CPU_DCACHE_DISABLE is not set
-CONFIG_CPU_DCACHE_WRITETHROUGH=y
-# CONFIG_CPU_CACHE_ROUND_ROBIN is not set
-# CONFIG_OUTER_CACHE is not set
-CONFIG_COMMON_CLKDEV=y
-
-#
-# Bus support
-#
-# CONFIG_PCI_SYSCALL is not set
-# CONFIG_ARCH_SUPPORTS_MSI is not set
-# CONFIG_PCCARD is not set
-
-#
-# Kernel Features
-#
-CONFIG_TICK_ONESHOT=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
-CONFIG_VMSPLIT_3G=y
-# CONFIG_VMSPLIT_2G is not set
-# CONFIG_VMSPLIT_1G is not set
-CONFIG_PAGE_OFFSET=0xC0000000
-CONFIG_PREEMPT=y
-CONFIG_HZ=100
-CONFIG_AEABI=y
-# CONFIG_OABI_COMPAT is not set
-CONFIG_ARCH_FLATMEM_HAS_HOLES=y
-# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set
-# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set
-# CONFIG_HIGHMEM is not set
-CONFIG_SELECT_MEMORY_MODEL=y
-CONFIG_FLATMEM_MANUAL=y
-# CONFIG_DISCONTIGMEM_MANUAL is not set
-# CONFIG_SPARSEMEM_MANUAL is not set
-CONFIG_FLATMEM=y
-CONFIG_FLAT_NODE_MEM_MAP=y
-CONFIG_PAGEFLAGS_EXTENDED=y
-CONFIG_SPLIT_PTLOCK_CPUS=4096
-# CONFIG_PHYS_ADDR_T_64BIT is not set
-CONFIG_ZONE_DMA_FLAG=1
-CONFIG_BOUNCE=y
-CONFIG_VIRT_TO_BUS=y
-CONFIG_UNEVICTABLE_LRU=y
-CONFIG_HAVE_MLOCK=y
-CONFIG_HAVE_MLOCKED_PAGE_BIT=y
-CONFIG_LEDS=y
-# CONFIG_LEDS_CPU is not set
-CONFIG_ALIGNMENT_TRAP=y
-
-#
-# Boot options
-#
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_CMDLINE=""
-# CONFIG_XIP_KERNEL is not set
-# CONFIG_KEXEC is not set
-
-#
-# CPU Power Management
-#
-# CONFIG_CPU_IDLE is not set
-
-#
-# Floating point emulation
-#
-
-#
-# At least one emulation must be selected
-#
-# CONFIG_VFP is not set
-
-#
-# Userspace binary formats
-#
-CONFIG_BINFMT_ELF=y
-# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
-CONFIG_HAVE_AOUT=y
-# CONFIG_BINFMT_AOUT is not set
-# CONFIG_BINFMT_MISC is not set
-
-#
-# Power management options
-#
-# CONFIG_PM is not set
-CONFIG_ARCH_SUSPEND_POSSIBLE=y
-CONFIG_NET=y
-
-#
-# Networking options
-#
-CONFIG_PACKET=y
-# CONFIG_PACKET_MMAP is not set
-CONFIG_UNIX=y
-CONFIG_XFRM=y
-# CONFIG_XFRM_USER is not set
-# CONFIG_XFRM_SUB_POLICY is not set
-# CONFIG_XFRM_MIGRATE is not set
-# CONFIG_XFRM_STATISTICS is not set
-# CONFIG_NET_KEY is not set
-CONFIG_INET=y
-# CONFIG_IP_MULTICAST is not set
-# CONFIG_IP_ADVANCED_ROUTER is not set
-CONFIG_IP_FIB_HASH=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-# CONFIG_IP_PNP_BOOTP is not set
-# CONFIG_IP_PNP_RARP is not set
-# CONFIG_NET_IPIP is not set
-# CONFIG_NET_IPGRE is not set
-# CONFIG_ARPD is not set
-# CONFIG_SYN_COOKIES is not set
-# CONFIG_INET_AH is not set
-# CONFIG_INET_ESP is not set
-# CONFIG_INET_IPCOMP is not set
-# CONFIG_INET_XFRM_TUNNEL is not set
-CONFIG_INET_TUNNEL=m
-CONFIG_INET_XFRM_MODE_TRANSPORT=y
-CONFIG_INET_XFRM_MODE_TUNNEL=y
-CONFIG_INET_XFRM_MODE_BEET=y
-# CONFIG_INET_LRO is not set
-CONFIG_INET_DIAG=y
-CONFIG_INET_TCP_DIAG=y
-# CONFIG_TCP_CONG_ADVANCED is not set
-CONFIG_TCP_CONG_CUBIC=y
-CONFIG_DEFAULT_TCP_CONG="cubic"
-# CONFIG_TCP_MD5SIG is not set
-CONFIG_IPV6=m
-# CONFIG_IPV6_PRIVACY is not set
-# CONFIG_IPV6_ROUTER_PREF is not set
-# CONFIG_IPV6_OPTIMISTIC_DAD is not set
-# CONFIG_INET6_AH is not set
-# CONFIG_INET6_ESP is not set
-# CONFIG_INET6_IPCOMP is not set
-# CONFIG_IPV6_MIP6 is not set
-# CONFIG_INET6_XFRM_TUNNEL is not set
-# CONFIG_INET6_TUNNEL is not set
-CONFIG_INET6_XFRM_MODE_TRANSPORT=m
-CONFIG_INET6_XFRM_MODE_TUNNEL=m
-CONFIG_INET6_XFRM_MODE_BEET=m
-# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
-CONFIG_IPV6_SIT=m
-CONFIG_IPV6_NDISC_NODETYPE=y
-# CONFIG_IPV6_TUNNEL is not set
-# CONFIG_IPV6_MULTIPLE_TABLES is not set
-# CONFIG_IPV6_MROUTE is not set
-# CONFIG_NETWORK_SECMARK is not set
-CONFIG_NETFILTER=y
-# CONFIG_NETFILTER_DEBUG is not set
-CONFIG_NETFILTER_ADVANCED=y
-
-#
-# Core Netfilter Configuration
-#
-# CONFIG_NETFILTER_NETLINK_QUEUE is not set
-# CONFIG_NETFILTER_NETLINK_LOG is not set
-# CONFIG_NF_CONNTRACK is not set
-# CONFIG_NETFILTER_XTABLES is not set
-# CONFIG_IP_VS is not set
-
-#
-# IP: Netfilter Configuration
-#
-# CONFIG_NF_DEFRAG_IPV4 is not set
-# CONFIG_IP_NF_QUEUE is not set
-# CONFIG_IP_NF_IPTABLES is not set
-# CONFIG_IP_NF_ARPTABLES is not set
-
-#
-# IPv6: Netfilter Configuration
-#
-# CONFIG_IP6_NF_QUEUE is not set
-# CONFIG_IP6_NF_IPTABLES is not set
-# CONFIG_IP_DCCP is not set
-# CONFIG_IP_SCTP is not set
-# CONFIG_TIPC is not set
-# CONFIG_ATM is not set
-# CONFIG_BRIDGE is not set
-# CONFIG_NET_DSA is not set
-# CONFIG_VLAN_8021Q is not set
-# CONFIG_DECNET is not set
-# CONFIG_LLC2 is not set
-# CONFIG_IPX is not set
-# CONFIG_ATALK is not set
-# CONFIG_X25 is not set
-# CONFIG_LAPB is not set
-# CONFIG_ECONET is not set
-# CONFIG_WAN_ROUTER is not set
-# CONFIG_PHONET is not set
-# CONFIG_NET_SCHED is not set
-# CONFIG_DCB is not set
-
-#
-# Network testing
-#
-# CONFIG_NET_PKTGEN is not set
-# CONFIG_HAMRADIO is not set
-# CONFIG_CAN is not set
-# CONFIG_IRDA is not set
-# CONFIG_BT is not set
-# CONFIG_AF_RXRPC is not set
-# CONFIG_WIRELESS is not set
-# CONFIG_WIMAX is not set
-# CONFIG_RFKILL is not set
-# CONFIG_NET_9P is not set
-
-#
-# Device Drivers
-#
-
-#
-# Generic Driver Options
-#
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_STANDALONE=y
-CONFIG_PREVENT_FIRMWARE_BUILD=y
-# CONFIG_FW_LOADER is not set
-# CONFIG_DEBUG_DRIVER is not set
-# CONFIG_DEBUG_DEVRES is not set
-# CONFIG_SYS_HYPERVISOR is not set
-# CONFIG_CONNECTOR is not set
-# CONFIG_MTD is not set
-# CONFIG_PARPORT is not set
-CONFIG_BLK_DEV=y
-# CONFIG_BLK_DEV_COW_COMMON is not set
-CONFIG_BLK_DEV_LOOP=m
-# CONFIG_BLK_DEV_CRYPTOLOOP is not set
-# CONFIG_BLK_DEV_NBD is not set
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_COUNT=1
-CONFIG_BLK_DEV_RAM_SIZE=32768
-# CONFIG_BLK_DEV_XIP is not set
-# CONFIG_CDROM_PKTCDVD is not set
-# CONFIG_ATA_OVER_ETH is not set
-CONFIG_MISC_DEVICES=y
-# CONFIG_ICS932S401 is not set
-# CONFIG_ENCLOSURE_SERVICES is not set
-# CONFIG_ISL29003 is not set
-# CONFIG_C2PORT is not set
-
-#
-# EEPROM support
-#
-CONFIG_EEPROM_AT24=y
-# CONFIG_EEPROM_LEGACY is not set
-# CONFIG_EEPROM_93CX6 is not set
-CONFIG_HAVE_IDE=y
-# CONFIG_IDE is not set
-
-#
-# SCSI device support
-#
-# CONFIG_RAID_ATTRS is not set
-CONFIG_SCSI=m
-CONFIG_SCSI_DMA=y
-# CONFIG_SCSI_TGT is not set
-# CONFIG_SCSI_NETLINK is not set
-CONFIG_SCSI_PROC_FS=y
-
-#
-# SCSI support type (disk, tape, CD-ROM)
-#
-CONFIG_BLK_DEV_SD=m
-# CONFIG_CHR_DEV_ST is not set
-# CONFIG_CHR_DEV_OSST is not set
-# CONFIG_BLK_DEV_SR is not set
-# CONFIG_CHR_DEV_SG is not set
-# CONFIG_CHR_DEV_SCH is not set
-
-#
-# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
-#
-# CONFIG_SCSI_MULTI_LUN is not set
-# CONFIG_SCSI_CONSTANTS is not set
-# CONFIG_SCSI_LOGGING is not set
-# CONFIG_SCSI_SCAN_ASYNC is not set
-CONFIG_SCSI_WAIT_SCAN=m
-
-#
-# SCSI Transports
-#
-# CONFIG_SCSI_SPI_ATTRS is not set
-# CONFIG_SCSI_FC_ATTRS is not set
-# CONFIG_SCSI_ISCSI_ATTRS is not set
-# CONFIG_SCSI_SAS_LIBSAS is not set
-# CONFIG_SCSI_SRP_ATTRS is not set
-CONFIG_SCSI_LOWLEVEL=y
-# CONFIG_ISCSI_TCP is not set
-# CONFIG_LIBFC is not set
-# CONFIG_LIBFCOE is not set
-# CONFIG_SCSI_DEBUG is not set
-# CONFIG_SCSI_DH is not set
-# CONFIG_SCSI_OSD_INITIATOR is not set
-# CONFIG_ATA is not set
-# CONFIG_MD is not set
-CONFIG_NETDEVICES=y
-CONFIG_COMPAT_NET_DEV_OPS=y
-# CONFIG_DUMMY is not set
-# CONFIG_BONDING is not set
-# CONFIG_MACVLAN is not set
-# CONFIG_EQUALIZER is not set
-CONFIG_TUN=m
-# CONFIG_VETH is not set
-CONFIG_PHYLIB=y
-
-#
-# MII PHY device drivers
-#
-# CONFIG_MARVELL_PHY is not set
-# CONFIG_DAVICOM_PHY is not set
-# CONFIG_QSEMI_PHY is not set
-CONFIG_LXT_PHY=y
-# CONFIG_CICADA_PHY is not set
-# CONFIG_VITESSE_PHY is not set
-# CONFIG_SMSC_PHY is not set
-# CONFIG_BROADCOM_PHY is not set
-# CONFIG_ICPLUS_PHY is not set
-# CONFIG_REALTEK_PHY is not set
-# CONFIG_NATIONAL_PHY is not set
-# CONFIG_STE10XP is not set
-CONFIG_LSI_ET1011C_PHY=y
-# CONFIG_FIXED_PHY is not set
-# CONFIG_MDIO_BITBANG is not set
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
-# CONFIG_AX88796 is not set
-# CONFIG_SMC91X is not set
-CONFIG_TI_DAVINCI_EMAC=y
-# CONFIG_DM9000 is not set
-# CONFIG_ETHOC is not set
-# CONFIG_SMC911X is not set
-# CONFIG_SMSC911X is not set
-# CONFIG_DNET is not set
-# CONFIG_IBM_NEW_EMAC_ZMII is not set
-# CONFIG_IBM_NEW_EMAC_RGMII is not set
-# CONFIG_IBM_NEW_EMAC_TAH is not set
-# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
-# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
-# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
-# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
-# CONFIG_B44 is not set
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
-
-#
-# Wireless LAN
-#
-# CONFIG_WLAN_PRE80211 is not set
-# CONFIG_WLAN_80211 is not set
-
-#
-# Enable WiMAX (Networking options) to see the WiMAX drivers
-#
-# CONFIG_WAN is not set
-# CONFIG_PPP is not set
-# CONFIG_SLIP is not set
-CONFIG_NETCONSOLE=y
-# CONFIG_NETCONSOLE_DYNAMIC is not set
-CONFIG_NETPOLL=y
-CONFIG_NETPOLL_TRAP=y
-CONFIG_NET_POLL_CONTROLLER=y
-# CONFIG_ISDN is not set
-
-#
-# Input device support
-#
-CONFIG_INPUT=y
-# CONFIG_INPUT_FF_MEMLESS is not set
-# CONFIG_INPUT_POLLDEV is not set
-
-#
-# Userland interfaces
-#
-CONFIG_INPUT_MOUSEDEV=m
-CONFIG_INPUT_MOUSEDEV_PSAUX=y
-CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
-CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
-# CONFIG_INPUT_JOYDEV is not set
-CONFIG_INPUT_EVDEV=m
-CONFIG_INPUT_EVBUG=m
-
-#
-# Input Device Drivers
-#
-CONFIG_INPUT_KEYBOARD=y
-CONFIG_KEYBOARD_ATKBD=m
-# CONFIG_KEYBOARD_SUNKBD is not set
-# CONFIG_KEYBOARD_LKKBD is not set
-CONFIG_KEYBOARD_XTKBD=m
-# CONFIG_KEYBOARD_NEWTON is not set
-# CONFIG_KEYBOARD_STOWAWAY is not set
-CONFIG_KEYBOARD_GPIO=y
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_INPUT_JOYSTICK is not set
-# CONFIG_INPUT_TABLET is not set
-CONFIG_INPUT_TOUCHSCREEN=y
-# CONFIG_TOUCHSCREEN_AD7879_I2C is not set
-# CONFIG_TOUCHSCREEN_AD7879 is not set
-# CONFIG_TOUCHSCREEN_FUJITSU is not set
-# CONFIG_TOUCHSCREEN_GUNZE is not set
-# CONFIG_TOUCHSCREEN_ELO is not set
-# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
-# CONFIG_TOUCHSCREEN_MTOUCH is not set
-# CONFIG_TOUCHSCREEN_INEXIO is not set
-# CONFIG_TOUCHSCREEN_MK712 is not set
-# CONFIG_TOUCHSCREEN_PENMOUNT is not set
-# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set
-# CONFIG_TOUCHSCREEN_TOUCHWIN is not set
-# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
-# CONFIG_TOUCHSCREEN_TSC2007 is not set
-# CONFIG_INPUT_MISC is not set
-
-#
-# Hardware I/O ports
-#
-CONFIG_SERIO=y
-CONFIG_SERIO_SERPORT=y
-CONFIG_SERIO_LIBPS2=y
-# CONFIG_SERIO_RAW is not set
-# CONFIG_GAMEPORT is not set
-
-#
-# Character devices
-#
-CONFIG_VT=y
-CONFIG_CONSOLE_TRANSLATIONS=y
-# CONFIG_VT_CONSOLE is not set
-CONFIG_HW_CONSOLE=y
-# CONFIG_VT_HW_CONSOLE_BINDING is not set
-CONFIG_DEVKMEM=y
-# CONFIG_SERIAL_NONSTANDARD is not set
-
-#
-# Serial drivers
-#
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_NR_UARTS=3
-CONFIG_SERIAL_8250_RUNTIME_UARTS=3
-# CONFIG_SERIAL_8250_EXTENDED is not set
-
-#
-# Non-8250 serial port support
-#
-CONFIG_SERIAL_CORE=y
-CONFIG_SERIAL_CORE_CONSOLE=y
-CONFIG_UNIX98_PTYS=y
-# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
-CONFIG_LEGACY_PTYS=y
-CONFIG_LEGACY_PTY_COUNT=256
-# CONFIG_IPMI_HANDLER is not set
-CONFIG_HW_RANDOM=m
-# CONFIG_HW_RANDOM_TIMERIOMEM is not set
-# CONFIG_R3964 is not set
-# CONFIG_RAW_DRIVER is not set
-# CONFIG_TCG_TPM is not set
-CONFIG_I2C=y
-CONFIG_I2C_BOARDINFO=y
-CONFIG_I2C_CHARDEV=y
-CONFIG_I2C_HELPER_AUTO=y
-
-#
-# I2C Hardware Bus support
-#
-
-#
-# I2C system bus drivers (mostly embedded / system-on-chip)
-#
-CONFIG_I2C_DAVINCI=y
-# CONFIG_I2C_GPIO is not set
-# CONFIG_I2C_OCORES is not set
-# CONFIG_I2C_SIMTEC is not set
-
-#
-# External I2C/SMBus adapter drivers
-#
-# CONFIG_I2C_PARPORT_LIGHT is not set
-# CONFIG_I2C_TAOS_EVM is not set
-
-#
-# Other I2C/SMBus bus drivers
-#
-# CONFIG_I2C_PCA_PLATFORM is not set
-# CONFIG_I2C_STUB is not set
-
-#
-# Miscellaneous I2C Chip support
-#
-# CONFIG_DS1682 is not set
-# CONFIG_SENSORS_PCA9539 is not set
-# CONFIG_SENSORS_MAX6875 is not set
-# CONFIG_SENSORS_TSL2550 is not set
-# CONFIG_I2C_DEBUG_CORE is not set
-# CONFIG_I2C_DEBUG_ALGO is not set
-# CONFIG_I2C_DEBUG_BUS is not set
-# CONFIG_I2C_DEBUG_CHIP is not set
-# CONFIG_SPI is not set
-CONFIG_ARCH_REQUIRE_GPIOLIB=y
-CONFIG_GPIOLIB=y
-# CONFIG_DEBUG_GPIO is not set
-# CONFIG_GPIO_SYSFS is not set
-
-#
-# Memory mapped GPIO expanders:
-#
-
-#
-# I2C GPIO expanders:
-#
-# CONFIG_GPIO_MAX732X is not set
-# CONFIG_GPIO_PCA953X is not set
-CONFIG_GPIO_PCF857X=m
-
-#
-# PCI GPIO expanders:
-#
-
-#
-# SPI GPIO expanders:
-#
-# CONFIG_W1 is not set
-# CONFIG_POWER_SUPPLY is not set
-# CONFIG_HWMON is not set
-# CONFIG_THERMAL is not set
-# CONFIG_THERMAL_HWMON is not set
-CONFIG_WATCHDOG=y
-# CONFIG_WATCHDOG_NOWAYOUT is not set
-
-#
-# Watchdog Device Drivers
-#
-# CONFIG_SOFT_WATCHDOG is not set
-# CONFIG_DAVINCI_WATCHDOG is not set
-CONFIG_SSB_POSSIBLE=y
-
-#
-# Sonics Silicon Backplane
-#
-# CONFIG_SSB is not set
-
-#
-# Multifunction device drivers
-#
-# CONFIG_MFD_CORE is not set
-# CONFIG_MFD_SM501 is not set
-# CONFIG_MFD_ASIC3 is not set
-# CONFIG_HTC_EGPIO is not set
-# CONFIG_HTC_PASIC3 is not set
-# CONFIG_TPS65010 is not set
-# CONFIG_TWL4030_CORE is not set
-# CONFIG_MFD_TMIO is not set
-# CONFIG_MFD_T7L66XB is not set
-# CONFIG_MFD_TC6387XB is not set
-# CONFIG_MFD_TC6393XB is not set
-# CONFIG_PMIC_DA903X is not set
-# CONFIG_MFD_WM8400 is not set
-# CONFIG_MFD_WM8350_I2C is not set
-# CONFIG_MFD_PCF50633 is not set
-
-#
-# Multimedia devices
-#
-
-#
-# Multimedia core support
-#
-# CONFIG_VIDEO_DEV is not set
-# CONFIG_DVB_CORE is not set
-# CONFIG_VIDEO_MEDIA is not set
-
-#
-# Multimedia drivers
-#
-# CONFIG_DAB is not set
-
-#
-# Graphics support
-#
-# CONFIG_VGASTATE is not set
-# CONFIG_VIDEO_OUTPUT_CONTROL is not set
-# CONFIG_FB is not set
-# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
-
-#
-# Display device support
-#
-# CONFIG_DISPLAY_SUPPORT is not set
-
-#
-# Console display driver support
-#
-# CONFIG_VGA_CONSOLE is not set
-CONFIG_DUMMY_CONSOLE=y
-CONFIG_SOUND=m
-# CONFIG_SOUND_OSS_CORE is not set
-CONFIG_SND=m
-CONFIG_SND_TIMER=m
-CONFIG_SND_PCM=m
-CONFIG_SND_JACK=y
-# CONFIG_SND_SEQUENCER is not set
-# CONFIG_SND_MIXER_OSS is not set
-# CONFIG_SND_PCM_OSS is not set
-# CONFIG_SND_HRTIMER is not set
-# CONFIG_SND_DYNAMIC_MINORS is not set
-CONFIG_SND_SUPPORT_OLD_API=y
-CONFIG_SND_VERBOSE_PROCFS=y
-# CONFIG_SND_VERBOSE_PRINTK is not set
-# CONFIG_SND_DEBUG is not set
-CONFIG_SND_DRIVERS=y
-# CONFIG_SND_DUMMY is not set
-# CONFIG_SND_MTPAV is not set
-# CONFIG_SND_SERIAL_U16550 is not set
-# CONFIG_SND_MPU401 is not set
-CONFIG_SND_ARM=y
-CONFIG_SND_SOC=m
-CONFIG_SND_DAVINCI_SOC=m
-CONFIG_SND_SOC_I2C_AND_SPI=m
-# CONFIG_SND_SOC_ALL_CODECS is not set
-# CONFIG_SOUND_PRIME is not set
-# CONFIG_HID_SUPPORT is not set
-# CONFIG_USB_SUPPORT is not set
-# CONFIG_USB_MUSB_HOST is not set
-# CONFIG_USB_MUSB_PERIPHERAL is not set
-# CONFIG_USB_MUSB_OTG is not set
-# CONFIG_USB_GADGET_MUSB_HDRC is not set
-# CONFIG_USB_GADGET_AT91 is not set
-# CONFIG_USB_GADGET_ATMEL_USBA is not set
-# CONFIG_USB_GADGET_FSL_USB2 is not set
-# CONFIG_USB_GADGET_LH7A40X is not set
-# CONFIG_USB_GADGET_OMAP is not set
-# CONFIG_USB_GADGET_PXA25X is not set
-# CONFIG_USB_GADGET_PXA27X is not set
-# CONFIG_USB_GADGET_S3C2410 is not set
-# CONFIG_USB_GADGET_IMX is not set
-# CONFIG_USB_GADGET_M66592 is not set
-# CONFIG_USB_GADGET_AMD5536UDC is not set
-# CONFIG_USB_GADGET_FSL_QE is not set
-# CONFIG_USB_GADGET_CI13XXX is not set
-# CONFIG_USB_GADGET_NET2280 is not set
-# CONFIG_USB_GADGET_GOKU is not set
-# CONFIG_USB_GADGET_DUMMY_HCD is not set
-# CONFIG_USB_ZERO is not set
-# CONFIG_USB_ETH is not set
-# CONFIG_USB_GADGETFS is not set
-# CONFIG_USB_FILE_STORAGE is not set
-# CONFIG_USB_G_SERIAL is not set
-# CONFIG_USB_MIDI_GADGET is not set
-# CONFIG_USB_G_PRINTER is not set
-# CONFIG_USB_CDC_COMPOSITE is not set
-# CONFIG_MMC is not set
-# CONFIG_MEMSTICK is not set
-# CONFIG_ACCESSIBILITY is not set
-# CONFIG_NEW_LEDS is not set
-CONFIG_RTC_LIB=y
-# CONFIG_RTC_CLASS is not set
-# CONFIG_DMADEVICES is not set
-# CONFIG_AUXDISPLAY is not set
-# CONFIG_REGULATOR is not set
-# CONFIG_UIO is not set
-# CONFIG_STAGING is not set
-
-#
-# File systems
-#
-CONFIG_EXT2_FS=y
-# CONFIG_EXT2_FS_XATTR is not set
-# CONFIG_EXT2_FS_XIP is not set
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_EXT3_FS_XATTR=y
-# CONFIG_EXT3_FS_POSIX_ACL is not set
-# CONFIG_EXT3_FS_SECURITY is not set
-# CONFIG_EXT4_FS is not set
-CONFIG_JBD=y
-# CONFIG_JBD_DEBUG is not set
-CONFIG_FS_MBCACHE=y
-# CONFIG_REISERFS_FS is not set
-# CONFIG_JFS_FS is not set
-# CONFIG_FS_POSIX_ACL is not set
-CONFIG_FILE_LOCKING=y
-CONFIG_XFS_FS=m
-# CONFIG_XFS_QUOTA is not set
-# CONFIG_XFS_POSIX_ACL is not set
-# CONFIG_XFS_RT is not set
-# CONFIG_XFS_DEBUG is not set
-# CONFIG_OCFS2_FS is not set
-# CONFIG_BTRFS_FS is not set
-CONFIG_DNOTIFY=y
-CONFIG_INOTIFY=y
-CONFIG_INOTIFY_USER=y
-# CONFIG_QUOTA is not set
-# CONFIG_AUTOFS_FS is not set
-CONFIG_AUTOFS4_FS=m
-# CONFIG_FUSE_FS is not set
-
-#
-# Caches
-#
-# CONFIG_FSCACHE is not set
-
-#
-# CD-ROM/DVD Filesystems
-#
-# CONFIG_ISO9660_FS is not set
-# CONFIG_UDF_FS is not set
-
-#
-# DOS/FAT/NT Filesystems
-#
-CONFIG_FAT_FS=y
-CONFIG_MSDOS_FS=y
-CONFIG_VFAT_FS=y
-CONFIG_FAT_DEFAULT_CODEPAGE=437
-CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
-# CONFIG_NTFS_FS is not set
-
-#
-# Pseudo filesystems
-#
-CONFIG_PROC_FS=y
-CONFIG_PROC_SYSCTL=y
-CONFIG_PROC_PAGE_MONITOR=y
-CONFIG_SYSFS=y
-CONFIG_TMPFS=y
-# CONFIG_TMPFS_POSIX_ACL is not set
-# CONFIG_HUGETLB_PAGE is not set
-# CONFIG_CONFIGFS_FS is not set
-CONFIG_MISC_FILESYSTEMS=y
-# CONFIG_ADFS_FS is not set
-# CONFIG_AFFS_FS is not set
-# CONFIG_HFS_FS is not set
-# CONFIG_HFSPLUS_FS is not set
-# CONFIG_BEFS_FS is not set
-# CONFIG_BFS_FS is not set
-# CONFIG_EFS_FS is not set
-CONFIG_CRAMFS=y
-# CONFIG_SQUASHFS is not set
-# CONFIG_VXFS_FS is not set
-CONFIG_MINIX_FS=m
-# CONFIG_OMFS_FS is not set
-# CONFIG_HPFS_FS is not set
-# CONFIG_QNX4FS_FS is not set
-# CONFIG_ROMFS_FS is not set
-# CONFIG_SYSV_FS is not set
-# CONFIG_UFS_FS is not set
-# CONFIG_NILFS2_FS is not set
-CONFIG_NETWORK_FILESYSTEMS=y
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
-# CONFIG_NFS_V3_ACL is not set
-# CONFIG_NFS_V4 is not set
-CONFIG_ROOT_NFS=y
-CONFIG_NFSD=m
-CONFIG_NFSD_V3=y
-# CONFIG_NFSD_V3_ACL is not set
-# CONFIG_NFSD_V4 is not set
-CONFIG_LOCKD=y
-CONFIG_LOCKD_V4=y
-CONFIG_EXPORTFS=m
-CONFIG_NFS_COMMON=y
-CONFIG_SUNRPC=y
-# CONFIG_RPCSEC_GSS_KRB5 is not set
-# CONFIG_RPCSEC_GSS_SPKM3 is not set
-CONFIG_SMB_FS=m
-# CONFIG_SMB_NLS_DEFAULT is not set
-# CONFIG_CIFS is not set
-# CONFIG_NCP_FS is not set
-# CONFIG_CODA_FS is not set
-# CONFIG_AFS_FS is not set
-
-#
-# Partition Types
-#
-CONFIG_PARTITION_ADVANCED=y
-# CONFIG_ACORN_PARTITION is not set
-# CONFIG_OSF_PARTITION is not set
-# CONFIG_AMIGA_PARTITION is not set
-# CONFIG_ATARI_PARTITION is not set
-# CONFIG_MAC_PARTITION is not set
-CONFIG_MSDOS_PARTITION=y
-# CONFIG_BSD_DISKLABEL is not set
-# CONFIG_MINIX_SUBPARTITION is not set
-# CONFIG_SOLARIS_X86_PARTITION is not set
-# CONFIG_UNIXWARE_DISKLABEL is not set
-# CONFIG_LDM_PARTITION is not set
-# CONFIG_SGI_PARTITION is not set
-# CONFIG_ULTRIX_PARTITION is not set
-# CONFIG_SUN_PARTITION is not set
-# CONFIG_KARMA_PARTITION is not set
-# CONFIG_EFI_PARTITION is not set
-# CONFIG_SYSV68_PARTITION is not set
-CONFIG_NLS=y
-CONFIG_NLS_DEFAULT="iso8859-1"
-CONFIG_NLS_CODEPAGE_437=y
-# CONFIG_NLS_CODEPAGE_737 is not set
-# CONFIG_NLS_CODEPAGE_775 is not set
-# CONFIG_NLS_CODEPAGE_850 is not set
-# CONFIG_NLS_CODEPAGE_852 is not set
-# CONFIG_NLS_CODEPAGE_855 is not set
-# CONFIG_NLS_CODEPAGE_857 is not set
-# CONFIG_NLS_CODEPAGE_860 is not set
-# CONFIG_NLS_CODEPAGE_861 is not set
-# CONFIG_NLS_CODEPAGE_862 is not set
-# CONFIG_NLS_CODEPAGE_863 is not set
-# CONFIG_NLS_CODEPAGE_864 is not set
-# CONFIG_NLS_CODEPAGE_865 is not set
-# CONFIG_NLS_CODEPAGE_866 is not set
-# CONFIG_NLS_CODEPAGE_869 is not set
-# CONFIG_NLS_CODEPAGE_936 is not set
-# CONFIG_NLS_CODEPAGE_950 is not set
-# CONFIG_NLS_CODEPAGE_932 is not set
-# CONFIG_NLS_CODEPAGE_949 is not set
-# CONFIG_NLS_CODEPAGE_874 is not set
-# CONFIG_NLS_ISO8859_8 is not set
-# CONFIG_NLS_CODEPAGE_1250 is not set
-# CONFIG_NLS_CODEPAGE_1251 is not set
-CONFIG_NLS_ASCII=m
-CONFIG_NLS_ISO8859_1=y
-# CONFIG_NLS_ISO8859_2 is not set
-# CONFIG_NLS_ISO8859_3 is not set
-# CONFIG_NLS_ISO8859_4 is not set
-# CONFIG_NLS_ISO8859_5 is not set
-# CONFIG_NLS_ISO8859_6 is not set
-# CONFIG_NLS_ISO8859_7 is not set
-# CONFIG_NLS_ISO8859_9 is not set
-# CONFIG_NLS_ISO8859_13 is not set
-# CONFIG_NLS_ISO8859_14 is not set
-# CONFIG_NLS_ISO8859_15 is not set
-# CONFIG_NLS_KOI8_R is not set
-# CONFIG_NLS_KOI8_U is not set
-CONFIG_NLS_UTF8=m
-# CONFIG_DLM is not set
-
-#
-# Kernel hacking
-#
-# CONFIG_PRINTK_TIME is not set
-CONFIG_ENABLE_WARN_DEPRECATED=y
-CONFIG_ENABLE_MUST_CHECK=y
-CONFIG_FRAME_WARN=1024
-# CONFIG_MAGIC_SYSRQ is not set
-# CONFIG_UNUSED_SYMBOLS is not set
-CONFIG_DEBUG_FS=y
-# CONFIG_HEADERS_CHECK is not set
-CONFIG_DEBUG_KERNEL=y
-# CONFIG_DEBUG_SHIRQ is not set
-CONFIG_DETECT_SOFTLOCKUP=y
-# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
-CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
-CONFIG_DETECT_HUNG_TASK=y
-# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
-CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
-CONFIG_SCHED_DEBUG=y
-# CONFIG_SCHEDSTATS is not set
-CONFIG_TIMER_STATS=y
-# CONFIG_DEBUG_OBJECTS is not set
-# CONFIG_SLUB_DEBUG_ON is not set
-# CONFIG_SLUB_STATS is not set
-CONFIG_DEBUG_PREEMPT=y
-CONFIG_DEBUG_RT_MUTEXES=y
-CONFIG_DEBUG_PI_LIST=y
-# CONFIG_RT_MUTEX_TESTER is not set
-# CONFIG_DEBUG_SPINLOCK is not set
-CONFIG_DEBUG_MUTEXES=y
-# CONFIG_DEBUG_LOCK_ALLOC is not set
-# CONFIG_PROVE_LOCKING is not set
-# CONFIG_LOCK_STAT is not set
-# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
-# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
-# CONFIG_DEBUG_KOBJECT is not set
-CONFIG_DEBUG_BUGVERBOSE=y
-# CONFIG_DEBUG_INFO is not set
-# CONFIG_DEBUG_VM is not set
-# CONFIG_DEBUG_WRITECOUNT is not set
-# CONFIG_DEBUG_MEMORY_INIT is not set
-# CONFIG_DEBUG_LIST is not set
-# CONFIG_DEBUG_SG is not set
-# CONFIG_DEBUG_NOTIFIERS is not set
-# CONFIG_BOOT_PRINTK_DELAY is not set
-# CONFIG_RCU_TORTURE_TEST is not set
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-# CONFIG_BACKTRACE_SELF_TEST is not set
-# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
-# CONFIG_FAULT_INJECTION is not set
-# CONFIG_LATENCYTOP is not set
-# CONFIG_SYSCTL_SYSCALL_CHECK is not set
-# CONFIG_PAGE_POISONING is not set
-CONFIG_HAVE_FUNCTION_TRACER=y
-CONFIG_TRACING_SUPPORT=y
-
-#
-# Tracers
-#
-# CONFIG_FUNCTION_TRACER is not set
-# CONFIG_IRQSOFF_TRACER is not set
-# CONFIG_PREEMPT_TRACER is not set
-# CONFIG_SCHED_TRACER is not set
-# CONFIG_CONTEXT_SWITCH_TRACER is not set
-# CONFIG_EVENT_TRACER is not set
-# CONFIG_BOOT_TRACER is not set
-# CONFIG_TRACE_BRANCH_PROFILING is not set
-# CONFIG_STACK_TRACER is not set
-# CONFIG_KMEMTRACE is not set
-# CONFIG_WORKQUEUE_TRACER is not set
-# CONFIG_BLK_DEV_IO_TRACE is not set
-# CONFIG_DYNAMIC_DEBUG is not set
-# CONFIG_SAMPLES is not set
-CONFIG_HAVE_ARCH_KGDB=y
-# CONFIG_KGDB is not set
-CONFIG_ARM_UNWIND=y
-CONFIG_DEBUG_USER=y
-CONFIG_DEBUG_ERRORS=y
-# CONFIG_DEBUG_STACK_USAGE is not set
-# CONFIG_DEBUG_LL is not set
-
-#
-# Security options
-#
-# CONFIG_KEYS is not set
-# CONFIG_SECURITY is not set
-# CONFIG_SECURITYFS is not set
-# CONFIG_SECURITY_FILE_CAPABILITIES is not set
-CONFIG_CRYPTO=y
-
-#
-# Crypto core or helper
-#
-# CONFIG_CRYPTO_FIPS is not set
-# CONFIG_CRYPTO_MANAGER is not set
-# CONFIG_CRYPTO_MANAGER2 is not set
-# CONFIG_CRYPTO_GF128MUL is not set
-# CONFIG_CRYPTO_NULL is not set
-# CONFIG_CRYPTO_CRYPTD is not set
-# CONFIG_CRYPTO_AUTHENC is not set
-# CONFIG_CRYPTO_TEST is not set
-
-#
-# Authenticated Encryption with Associated Data
-#
-# CONFIG_CRYPTO_CCM is not set
-# CONFIG_CRYPTO_GCM is not set
-# CONFIG_CRYPTO_SEQIV is not set
-
-#
-# Block modes
-#
-# CONFIG_CRYPTO_CBC is not set
-# CONFIG_CRYPTO_CTR is not set
-# CONFIG_CRYPTO_CTS is not set
-# CONFIG_CRYPTO_ECB is not set
-# CONFIG_CRYPTO_LRW is not set
-# CONFIG_CRYPTO_PCBC is not set
-# CONFIG_CRYPTO_XTS is not set
-
-#
-# Hash modes
-#
-# CONFIG_CRYPTO_HMAC is not set
-# CONFIG_CRYPTO_XCBC is not set
-
-#
-# Digest
-#
-# CONFIG_CRYPTO_CRC32C is not set
-# CONFIG_CRYPTO_MD4 is not set
-# CONFIG_CRYPTO_MD5 is not set
-# CONFIG_CRYPTO_MICHAEL_MIC is not set
-# CONFIG_CRYPTO_RMD128 is not set
-# CONFIG_CRYPTO_RMD160 is not set
-# CONFIG_CRYPTO_RMD256 is not set
-# CONFIG_CRYPTO_RMD320 is not set
-# CONFIG_CRYPTO_SHA1 is not set
-# CONFIG_CRYPTO_SHA256 is not set
-# CONFIG_CRYPTO_SHA512 is not set
-# CONFIG_CRYPTO_TGR192 is not set
-# CONFIG_CRYPTO_WP512 is not set
-
-#
-# Ciphers
-#
-# CONFIG_CRYPTO_AES is not set
-# CONFIG_CRYPTO_ANUBIS is not set
-# CONFIG_CRYPTO_ARC4 is not set
-# CONFIG_CRYPTO_BLOWFISH is not set
-# CONFIG_CRYPTO_CAMELLIA is not set
-# CONFIG_CRYPTO_CAST5 is not set
-# CONFIG_CRYPTO_CAST6 is not set
-# CONFIG_CRYPTO_DES is not set
-# CONFIG_CRYPTO_FCRYPT is not set
-# CONFIG_CRYPTO_KHAZAD is not set
-# CONFIG_CRYPTO_SALSA20 is not set
-# CONFIG_CRYPTO_SEED is not set
-# CONFIG_CRYPTO_SERPENT is not set
-# CONFIG_CRYPTO_TEA is not set
-# CONFIG_CRYPTO_TWOFISH is not set
-
-#
-# Compression
-#
-# CONFIG_CRYPTO_DEFLATE is not set
-# CONFIG_CRYPTO_ZLIB is not set
-# CONFIG_CRYPTO_LZO is not set
-
-#
-# Random Number Generation
-#
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
-# CONFIG_CRYPTO_HW is not set
-# CONFIG_BINARY_PRINTF is not set
-
-#
-# Library routines
-#
-CONFIG_BITREVERSE=y
-CONFIG_GENERIC_FIND_LAST_BIT=y
-CONFIG_CRC_CCITT=m
-# CONFIG_CRC16 is not set
-CONFIG_CRC_T10DIF=m
-# CONFIG_CRC_ITU_T is not set
-CONFIG_CRC32=y
-# CONFIG_CRC7 is not set
-# CONFIG_LIBCRC32C is not set
-CONFIG_ZLIB_INFLATE=y
-CONFIG_DECOMPRESS_GZIP=y
-CONFIG_GENERIC_ALLOCATOR=y
-CONFIG_HAS_IOMEM=y
-CONFIG_HAS_IOPORT=y
-CONFIG_HAS_DMA=y
-CONFIG_NLATTR=y
diff --git a/arch/arm/configs/da850_omapl138_defconfig b/arch/arm/configs/da8xx_omapl_defconfig
index 842a70b079b..50bd25a10f0 100644
--- a/arch/arm/configs/da850_omapl138_defconfig
+++ b/arch/arm/configs/da8xx_omapl_defconfig
@@ -1,15 +1,13 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.30-davinci1
-# Mon Jun 29 07:54:15 2009
+# Linux kernel version: 2.6.32-rc5
+# Thu Oct 22 12:19:19 2009
#
CONFIG_ARM=y
CONFIG_SYS_SUPPORTS_APM_EMULATION=y
CONFIG_GENERIC_GPIO=y
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
-CONFIG_MMU=y
-# CONFIG_NO_IOPORT is not set
CONFIG_GENERIC_HARDIRQS=y
CONFIG_STACKTRACE_SUPPORT=y
CONFIG_HAVE_LATENCYTOP_SUPPORT=y
@@ -18,14 +16,14 @@ CONFIG_TRACE_IRQFLAGS_SUPPORT=y
CONFIG_HARDIRQS_SW_RESEND=y
CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_RWSEM_GENERIC_SPINLOCK=y
-# CONFIG_ARCH_HAS_ILOG2_U32 is not set
-# CONFIG_ARCH_HAS_ILOG2_U64 is not set
+CONFIG_ARCH_HAS_CPUFREQ=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
CONFIG_ZONE_DMA=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_VECTORS_BASE=0xffff0000
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -48,11 +46,12 @@ CONFIG_POSIX_MQUEUE_SYSCTL=y
#
# RCU Subsystem
#
-CONFIG_CLASSIC_RCU=y
-# CONFIG_TREE_RCU is not set
-# CONFIG_PREEMPT_RCU is not set
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
# CONFIG_TREE_RCU_TRACE is not set
-# CONFIG_PREEMPT_RCU_TRACE is not set
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
@@ -62,8 +61,7 @@ CONFIG_FAIR_GROUP_SCHED=y
CONFIG_USER_SCHED=y
# CONFIG_CGROUP_SCHED is not set
# CONFIG_CGROUPS is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
# CONFIG_NAMESPACES is not set
CONFIG_BLK_DEV_INITRD=y
@@ -80,7 +78,6 @@ CONFIG_SYSCTL_SYSCALL=y
CONFIG_KALLSYMS=y
# CONFIG_KALLSYMS_ALL is not set
# CONFIG_KALLSYMS_EXTRA_PASS is not set
-# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_HOTPLUG=y
CONFIG_PRINTK=y
CONFIG_BUG=y
@@ -93,6 +90,10 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
+
+#
+# Kernel Performance Events And Counters
+#
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_SLUB_DEBUG=y
CONFIG_COMPAT_BRK=y
@@ -100,12 +101,16 @@ CONFIG_COMPAT_BRK=y
CONFIG_SLUB=y
# CONFIG_SLOB is not set
# CONFIG_PROFILING is not set
-# CONFIG_MARKERS is not set
CONFIG_HAVE_OPROFILE=y
# CONFIG_KPROBES is not set
CONFIG_HAVE_KPROBES=y
CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_CLK=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
# CONFIG_SLOW_WORK is not set
CONFIG_HAVE_GENERIC_DMA_COHERENT=y
CONFIG_SLABINFO=y
@@ -118,7 +123,7 @@ CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
# CONFIG_MODULE_SRCVERSION_ALL is not set
CONFIG_BLOCK=y
-# CONFIG_LBD is not set
+CONFIG_LBDAF=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
@@ -139,19 +144,22 @@ CONFIG_DEFAULT_IOSCHED="anticipatory"
#
# System Type
#
+CONFIG_MMU=y
# CONFIG_ARCH_AAEC2000 is not set
# CONFIG_ARCH_INTEGRATOR is not set
# CONFIG_ARCH_REALVIEW is not set
# CONFIG_ARCH_VERSATILE is not set
# CONFIG_ARCH_AT91 is not set
# CONFIG_ARCH_CLPS711X is not set
+# CONFIG_ARCH_GEMINI is not set
# CONFIG_ARCH_EBSA110 is not set
# CONFIG_ARCH_EP93XX is not set
-# CONFIG_ARCH_GEMINI is not set
# CONFIG_ARCH_FOOTBRIDGE is not set
+# CONFIG_ARCH_MXC is not set
+# CONFIG_ARCH_STMP3XXX is not set
# CONFIG_ARCH_NETX is not set
# CONFIG_ARCH_H720X is not set
-# CONFIG_ARCH_IMX is not set
+# CONFIG_ARCH_NOMADIK is not set
# CONFIG_ARCH_IOP13XX is not set
# CONFIG_ARCH_IOP32X is not set
# CONFIG_ARCH_IOP33X is not set
@@ -160,25 +168,27 @@ CONFIG_DEFAULT_IOSCHED="anticipatory"
# CONFIG_ARCH_IXP4XX is not set
# CONFIG_ARCH_L7200 is not set
# CONFIG_ARCH_KIRKWOOD is not set
-# CONFIG_ARCH_KS8695 is not set
-# CONFIG_ARCH_NS9XXX is not set
# CONFIG_ARCH_LOKI is not set
# CONFIG_ARCH_MV78XX0 is not set
-# CONFIG_ARCH_MXC is not set
# CONFIG_ARCH_ORION5X is not set
+# CONFIG_ARCH_MMP is not set
+# CONFIG_ARCH_KS8695 is not set
+# CONFIG_ARCH_NS9XXX is not set
+# CONFIG_ARCH_W90X900 is not set
# CONFIG_ARCH_PNX4008 is not set
# CONFIG_ARCH_PXA is not set
-# CONFIG_ARCH_MMP is not set
+# CONFIG_ARCH_MSM is not set
# CONFIG_ARCH_RPC is not set
# CONFIG_ARCH_SA1100 is not set
# CONFIG_ARCH_S3C2410 is not set
# CONFIG_ARCH_S3C64XX is not set
+# CONFIG_ARCH_S5PC1XX is not set
# CONFIG_ARCH_SHARK is not set
# CONFIG_ARCH_LH7A40X is not set
+# CONFIG_ARCH_U300 is not set
CONFIG_ARCH_DAVINCI=y
# CONFIG_ARCH_OMAP is not set
-# CONFIG_ARCH_MSM is not set
-# CONFIG_ARCH_W90X900 is not set
+# CONFIG_ARCH_BCMRING is not set
CONFIG_CP_INTC=y
#
@@ -191,7 +201,7 @@ CONFIG_CP_INTC=y
# CONFIG_ARCH_DAVINCI_DM644x is not set
# CONFIG_ARCH_DAVINCI_DM355 is not set
# CONFIG_ARCH_DAVINCI_DM646x is not set
-# CONFIG_ARCH_DAVINCI_DA830 is not set
+CONFIG_ARCH_DAVINCI_DA830=y
CONFIG_ARCH_DAVINCI_DA850=y
CONFIG_ARCH_DAVINCI_DA8XX=y
# CONFIG_ARCH_DAVINCI_DM365 is not set
@@ -199,7 +209,14 @@ CONFIG_ARCH_DAVINCI_DA8XX=y
#
# DaVinci Board Type
#
+CONFIG_MACH_DAVINCI_DA830_EVM=y
+CONFIG_DA830_UI=y
+CONFIG_DA830_UI_LCD=y
+# CONFIG_DA830_UI_NAND is not set
CONFIG_MACH_DAVINCI_DA850_EVM=y
+CONFIG_DA850_UI_EXP=y
+CONFIG_DA850_UI_NONE=y
+# CONFIG_DA850_UI_RMII is not set
CONFIG_DAVINCI_MUX=y
# CONFIG_DAVINCI_MUX_DEBUG is not set
# CONFIG_DAVINCI_MUX_WARNINGS is not set
@@ -212,7 +229,7 @@ CONFIG_CPU_32=y
CONFIG_CPU_ARM926T=y
CONFIG_CPU_32v5=y
CONFIG_CPU_ABRT_EV5TJ=y
-CONFIG_CPU_PABRT_NOIFAR=y
+CONFIG_CPU_PABRT_LEGACY=y
CONFIG_CPU_CACHE_VIVT=y
CONFIG_CPU_COPY_V4WB=y
CONFIG_CPU_TLB_V4WBI=y
@@ -225,9 +242,9 @@ CONFIG_CPU_CP15_MMU=y
CONFIG_ARM_THUMB=y
# CONFIG_CPU_ICACHE_DISABLE is not set
# CONFIG_CPU_DCACHE_DISABLE is not set
-# CONFIG_CPU_DCACHE_WRITETHROUGH is not set
+CONFIG_CPU_DCACHE_WRITETHROUGH=y
# CONFIG_CPU_CACHE_ROUND_ROBIN is not set
-# CONFIG_OUTER_CACHE is not set
+CONFIG_ARM_L1_CACHE_SHIFT=5
CONFIG_COMMON_CLKDEV=y
#
@@ -248,11 +265,12 @@ CONFIG_VMSPLIT_3G=y
# CONFIG_VMSPLIT_2G is not set
# CONFIG_VMSPLIT_1G is not set
CONFIG_PAGE_OFFSET=0xC0000000
+# CONFIG_PREEMPT_NONE is not set
+# CONFIG_PREEMPT_VOLUNTARY is not set
CONFIG_PREEMPT=y
CONFIG_HZ=100
CONFIG_AEABI=y
# CONFIG_OABI_COMPAT is not set
-# CONFIG_ARCH_HAS_HOLES_MEMORYMODEL is not set
# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set
# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set
# CONFIG_HIGHMEM is not set
@@ -268,12 +286,14 @@ CONFIG_SPLIT_PTLOCK_CPUS=4096
CONFIG_ZONE_DMA_FLAG=1
CONFIG_BOUNCE=y
CONFIG_VIRT_TO_BUS=y
-CONFIG_UNEVICTABLE_LRU=y
CONFIG_HAVE_MLOCK=y
CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
CONFIG_LEDS=y
# CONFIG_LEDS_CPU is not set
CONFIG_ALIGNMENT_TRAP=y
+# CONFIG_UACCESS_WITH_MEMCPY is not set
#
# Boot options
@@ -287,7 +307,24 @@ CONFIG_CMDLINE=""
#
# CPU Power Management
#
-# CONFIG_CPU_IDLE is not set
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_TABLE=y
+# CONFIG_CPU_FREQ_DEBUG is not set
+CONFIG_CPU_FREQ_STAT=y
+# CONFIG_CPU_FREQ_STAT_DETAILS is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
+CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y
+# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=m
+CONFIG_CPU_FREQ_GOV_POWERSAVE=m
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=m
+# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set
+CONFIG_CPU_IDLE=y
+CONFIG_CPU_IDLE_GOV_LADDER=y
+CONFIG_CPU_IDLE_GOV_MENU=y
#
# Floating point emulation
@@ -401,6 +438,7 @@ CONFIG_NETFILTER_ADVANCED=y
# CONFIG_IP6_NF_IPTABLES is not set
# CONFIG_IP_DCCP is not set
# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
# CONFIG_BRIDGE is not set
@@ -415,6 +453,7 @@ CONFIG_NETFILTER_ADVANCED=y
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
# CONFIG_NET_SCHED is not set
# CONFIG_DCB is not set
@@ -440,6 +479,7 @@ CONFIG_NETFILTER_ADVANCED=y
# Generic Driver Options
#
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
# CONFIG_FW_LOADER is not set
@@ -460,6 +500,7 @@ CONFIG_BLK_DEV_RAM_SIZE=32768
# CONFIG_BLK_DEV_XIP is not set
# CONFIG_CDROM_PKTCDVD is not set
# CONFIG_ATA_OVER_ETH is not set
+# CONFIG_MG_DISK is not set
CONFIG_MISC_DEVICES=y
# CONFIG_ICS932S401 is not set
# CONFIG_ENCLOSURE_SERVICES is not set
@@ -471,6 +512,7 @@ CONFIG_MISC_DEVICES=y
#
CONFIG_EEPROM_AT24=y
# CONFIG_EEPROM_LEGACY is not set
+# CONFIG_EEPROM_MAX6875 is not set
# CONFIG_EEPROM_93CX6 is not set
CONFIG_HAVE_IDE=y
# CONFIG_IDE is not set
@@ -494,10 +536,6 @@ CONFIG_BLK_DEV_SD=m
# CONFIG_BLK_DEV_SR is not set
# CONFIG_CHR_DEV_SG is not set
# CONFIG_CHR_DEV_SCH is not set
-
-#
-# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
-#
# CONFIG_SCSI_MULTI_LUN is not set
# CONFIG_SCSI_CONSTANTS is not set
# CONFIG_SCSI_LOGGING is not set
@@ -522,7 +560,6 @@ CONFIG_SCSI_LOWLEVEL=y
# CONFIG_ATA is not set
# CONFIG_MD is not set
CONFIG_NETDEVICES=y
-CONFIG_COMPAT_NET_DEV_OPS=y
# CONFIG_DUMMY is not set
# CONFIG_BONDING is not set
# CONFIG_MACVLAN is not set
@@ -553,7 +590,7 @@ CONFIG_NET_ETHERNET=y
CONFIG_MII=y
# CONFIG_AX88796 is not set
# CONFIG_SMC91X is not set
-# CONFIG_TI_DAVINCI_EMAC is not set
+CONFIG_TI_DAVINCI_EMAC=y
# CONFIG_DM9000 is not set
# CONFIG_ETHOC is not set
# CONFIG_SMC911X is not set
@@ -567,12 +604,11 @@ CONFIG_MII=y
# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
# CONFIG_B44 is not set
+# CONFIG_KS8842 is not set
+# CONFIG_KS8851_MLL is not set
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
-
-#
-# Wireless LAN
-#
+CONFIG_WLAN=y
# CONFIG_WLAN_PRE80211 is not set
# CONFIG_WLAN_80211 is not set
@@ -588,6 +624,7 @@ CONFIG_NETPOLL=y
CONFIG_NETPOLL_TRAP=y
CONFIG_NET_POLL_CONTROLLER=y
# CONFIG_ISDN is not set
+# CONFIG_PHONE is not set
#
# Input device support
@@ -611,23 +648,30 @@ CONFIG_INPUT_EVBUG=m
# Input Device Drivers
#
CONFIG_INPUT_KEYBOARD=y
+# CONFIG_KEYBOARD_ADP5588 is not set
CONFIG_KEYBOARD_ATKBD=m
-# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_QT2160 is not set
# CONFIG_KEYBOARD_LKKBD is not set
-CONFIG_KEYBOARD_XTKBD=m
+CONFIG_KEYBOARD_GPIO=y
+# CONFIG_KEYBOARD_MATRIX is not set
+# CONFIG_KEYBOARD_MAX7359 is not set
# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_OPENCORES is not set
# CONFIG_KEYBOARD_STOWAWAY is not set
-CONFIG_KEYBOARD_GPIO=y
+# CONFIG_KEYBOARD_SUNKBD is not set
+CONFIG_KEYBOARD_XTKBD=m
# CONFIG_INPUT_MOUSE is not set
# CONFIG_INPUT_JOYSTICK is not set
# CONFIG_INPUT_TABLET is not set
CONFIG_INPUT_TOUCHSCREEN=y
# CONFIG_TOUCHSCREEN_AD7879_I2C is not set
# CONFIG_TOUCHSCREEN_AD7879 is not set
+# CONFIG_TOUCHSCREEN_EETI is not set
# CONFIG_TOUCHSCREEN_FUJITSU is not set
# CONFIG_TOUCHSCREEN_GUNZE is not set
# CONFIG_TOUCHSCREEN_ELO is not set
# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
+# CONFIG_TOUCHSCREEN_MCS5000 is not set
# CONFIG_TOUCHSCREEN_MTOUCH is not set
# CONFIG_TOUCHSCREEN_INEXIO is not set
# CONFIG_TOUCHSCREEN_MK712 is not set
@@ -636,6 +680,7 @@ CONFIG_INPUT_TOUCHSCREEN=y
# CONFIG_TOUCHSCREEN_TOUCHWIN is not set
# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
# CONFIG_TOUCHSCREEN_TSC2007 is not set
+# CONFIG_TOUCHSCREEN_W90X900 is not set
# CONFIG_INPUT_MISC is not set
#
@@ -684,6 +729,7 @@ CONFIG_HW_RANDOM=m
# CONFIG_TCG_TPM is not set
CONFIG_I2C=y
CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_HELPER_AUTO=y
@@ -695,6 +741,7 @@ CONFIG_I2C_HELPER_AUTO=y
# I2C system bus drivers (mostly embedded / system-on-chip)
#
CONFIG_I2C_DAVINCI=y
+# CONFIG_I2C_DESIGNWARE is not set
# CONFIG_I2C_GPIO is not set
# CONFIG_I2C_OCORES is not set
# CONFIG_I2C_SIMTEC is not set
@@ -715,14 +762,17 @@ CONFIG_I2C_DAVINCI=y
# Miscellaneous I2C Chip support
#
# CONFIG_DS1682 is not set
-# CONFIG_SENSORS_PCA9539 is not set
-# CONFIG_SENSORS_MAX6875 is not set
# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
# CONFIG_I2C_DEBUG_BUS is not set
# CONFIG_I2C_DEBUG_CHIP is not set
# CONFIG_SPI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
CONFIG_ARCH_REQUIRE_GPIOLIB=y
CONFIG_GPIOLIB=y
# CONFIG_DEBUG_GPIO is not set
@@ -736,8 +786,8 @@ CONFIG_GPIOLIB=y
# I2C GPIO expanders:
#
# CONFIG_GPIO_MAX732X is not set
-# CONFIG_GPIO_PCA953X is not set
-CONFIG_GPIO_PCF857X=m
+CONFIG_GPIO_PCA953X=y
+CONFIG_GPIO_PCF857X=y
#
# PCI GPIO expanders:
@@ -746,11 +796,14 @@ CONFIG_GPIO_PCF857X=m
#
# SPI GPIO expanders:
#
+
+#
+# AC97 GPIO expanders:
+#
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
# CONFIG_HWMON is not set
# CONFIG_THERMAL is not set
-# CONFIG_THERMAL_HWMON is not set
CONFIG_WATCHDOG=y
# CONFIG_WATCHDOG_NOWAYOUT is not set
@@ -782,31 +835,56 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_MFD_TC6393XB is not set
# CONFIG_PMIC_DA903X is not set
# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X is not set
# CONFIG_MFD_WM8350_I2C is not set
# CONFIG_MFD_PCF50633 is not set
-
-#
-# Multimedia devices
-#
-
-#
-# Multimedia core support
-#
-# CONFIG_VIDEO_DEV is not set
-# CONFIG_DVB_CORE is not set
-# CONFIG_VIDEO_MEDIA is not set
-
-#
-# Multimedia drivers
-#
-# CONFIG_DAB is not set
+# CONFIG_AB3100_CORE is not set
+CONFIG_REGULATOR=y
+# CONFIG_REGULATOR_DEBUG is not set
+# CONFIG_REGULATOR_FIXED_VOLTAGE is not set
+# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set
+# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set
+# CONFIG_REGULATOR_BQ24022 is not set
+# CONFIG_REGULATOR_MAX1586 is not set
+# CONFIG_REGULATOR_LP3971 is not set
+# CONFIG_REGULATOR_TPS65023 is not set
+CONFIG_REGULATOR_TPS6507X=y
+# CONFIG_MEDIA_SUPPORT is not set
#
# Graphics support
#
# CONFIG_VGASTATE is not set
# CONFIG_VIDEO_OUTPUT_CONTROL is not set
-# CONFIG_FB is not set
+CONFIG_FB=y
+# CONFIG_FIRMWARE_EDID is not set
+# CONFIG_FB_DDC is not set
+# CONFIG_FB_BOOT_VESA_SUPPORT is not set
+CONFIG_FB_CFB_FILLRECT=y
+CONFIG_FB_CFB_COPYAREA=y
+CONFIG_FB_CFB_IMAGEBLIT=y
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
+# CONFIG_FB_SYS_FILLRECT is not set
+# CONFIG_FB_SYS_COPYAREA is not set
+# CONFIG_FB_SYS_IMAGEBLIT is not set
+# CONFIG_FB_FOREIGN_ENDIAN is not set
+# CONFIG_FB_SYS_FOPS is not set
+# CONFIG_FB_SVGALIB is not set
+# CONFIG_FB_MACMODES is not set
+# CONFIG_FB_BACKLIGHT is not set
+# CONFIG_FB_MODE_HELPERS is not set
+# CONFIG_FB_TILEBLITTING is not set
+
+#
+# Frame buffer hardware drivers
+#
+# CONFIG_FB_S1D13XXX is not set
+# CONFIG_FB_DAVINCI is not set
+# CONFIG_FB_VIRTUAL is not set
+CONFIG_FB_DA8XX=y
+# CONFIG_FB_METRONOME is not set
+# CONFIG_FB_MB862XX is not set
+# CONFIG_FB_BROADSHEET is not set
# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
#
@@ -819,6 +897,16 @@ CONFIG_SSB_POSSIBLE=y
#
# CONFIG_VGA_CONSOLE is not set
CONFIG_DUMMY_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set
+# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
+# CONFIG_FONTS is not set
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+CONFIG_LOGO=y
+CONFIG_LOGO_LINUX_MONO=y
+CONFIG_LOGO_LINUX_VGA16=y
+CONFIG_LOGO_LINUX_CLUT224=y
CONFIG_SOUND=m
# CONFIG_SOUND_OSS_CORE is not set
CONFIG_SND=m
@@ -834,6 +922,11 @@ CONFIG_SND_SUPPORT_OLD_API=y
CONFIG_SND_VERBOSE_PROCFS=y
# CONFIG_SND_VERBOSE_PRINTK is not set
# CONFIG_SND_DEBUG is not set
+# CONFIG_SND_RAWMIDI_SEQ is not set
+# CONFIG_SND_OPL3_LIB_SEQ is not set
+# CONFIG_SND_OPL4_LIB_SEQ is not set
+# CONFIG_SND_SBAWE_SEQ is not set
+# CONFIG_SND_EMU10K1_SEQ is not set
CONFIG_SND_DRIVERS=y
# CONFIG_SND_DUMMY is not set
# CONFIG_SND_MTPAV is not set
@@ -842,6 +935,8 @@ CONFIG_SND_DRIVERS=y
CONFIG_SND_ARM=y
CONFIG_SND_SOC=m
CONFIG_SND_DAVINCI_SOC=m
+# CONFIG_SND_DA830_SOC_EVM is not set
+# CONFIG_SND_DA850_SOC_EVM is not set
CONFIG_SND_SOC_I2C_AND_SPI=m
# CONFIG_SND_SOC_ALL_CODECS is not set
# CONFIG_SOUND_PRIME is not set
@@ -849,14 +944,17 @@ CONFIG_SND_SOC_I2C_AND_SPI=m
# CONFIG_USB_SUPPORT is not set
# CONFIG_MMC is not set
# CONFIG_MEMSTICK is not set
-# CONFIG_ACCESSIBILITY is not set
# CONFIG_NEW_LEDS is not set
+# CONFIG_ACCESSIBILITY is not set
CONFIG_RTC_LIB=y
# CONFIG_RTC_CLASS is not set
# CONFIG_DMADEVICES is not set
# CONFIG_AUXDISPLAY is not set
-# CONFIG_REGULATOR is not set
# CONFIG_UIO is not set
+
+#
+# TI VLYNQ
+#
# CONFIG_STAGING is not set
#
@@ -877,14 +975,17 @@ CONFIG_FS_MBCACHE=y
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
-CONFIG_FILE_LOCKING=y
CONFIG_XFS_FS=m
# CONFIG_XFS_QUOTA is not set
# CONFIG_XFS_POSIX_ACL is not set
# CONFIG_XFS_RT is not set
# CONFIG_XFS_DEBUG is not set
+# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
CONFIG_DNOTIFY=y
CONFIG_INOTIFY=y
CONFIG_INOTIFY_USER=y
@@ -943,7 +1044,6 @@ CONFIG_MINIX_FS=m
# CONFIG_ROMFS_FS is not set
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
-# CONFIG_NILFS2_FS is not set
CONFIG_NETWORK_FILESYSTEMS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V3=y
@@ -1039,6 +1139,7 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
CONFIG_ENABLE_MUST_CHECK=y
CONFIG_FRAME_WARN=1024
# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
@@ -1056,6 +1157,7 @@ CONFIG_TIMER_STATS=y
# CONFIG_DEBUG_OBJECTS is not set
# CONFIG_SLUB_DEBUG_ON is not set
# CONFIG_SLUB_STATS is not set
+# CONFIG_DEBUG_KMEMLEAK is not set
CONFIG_DEBUG_PREEMPT=y
CONFIG_DEBUG_RT_MUTEXES=y
CONFIG_DEBUG_PI_LIST=y
@@ -1076,29 +1178,29 @@ CONFIG_DEBUG_BUGVERBOSE=y
# CONFIG_DEBUG_LIST is not set
# CONFIG_DEBUG_SG is not set
# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
# CONFIG_BOOT_PRINTK_DELAY is not set
# CONFIG_RCU_TORTURE_TEST is not set
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_BACKTRACE_SELF_TEST is not set
# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
# CONFIG_FAULT_INJECTION is not set
# CONFIG_LATENCYTOP is not set
# CONFIG_SYSCTL_SYSCALL_CHECK is not set
# CONFIG_PAGE_POISONING is not set
CONFIG_HAVE_FUNCTION_TRACER=y
CONFIG_TRACING_SUPPORT=y
-
-#
-# Tracers
-#
+CONFIG_FTRACE=y
# CONFIG_FUNCTION_TRACER is not set
# CONFIG_IRQSOFF_TRACER is not set
# CONFIG_PREEMPT_TRACER is not set
# CONFIG_SCHED_TRACER is not set
-# CONFIG_CONTEXT_SWITCH_TRACER is not set
-# CONFIG_EVENT_TRACER is not set
+# CONFIG_ENABLE_DEFAULT_TRACERS is not set
# CONFIG_BOOT_TRACER is not set
-# CONFIG_TRACE_BRANCH_PROFILING is not set
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
# CONFIG_STACK_TRACER is not set
# CONFIG_KMEMTRACE is not set
# CONFIG_WORKQUEUE_TRACER is not set
@@ -1125,7 +1227,6 @@ CONFIG_CRYPTO=y
#
# Crypto core or helper
#
-# CONFIG_CRYPTO_FIPS is not set
# CONFIG_CRYPTO_MANAGER is not set
# CONFIG_CRYPTO_MANAGER2 is not set
# CONFIG_CRYPTO_GF128MUL is not set
@@ -1157,11 +1258,13 @@ CONFIG_CRYPTO=y
#
# CONFIG_CRYPTO_HMAC is not set
# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
#
# Digest
#
# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
# CONFIG_CRYPTO_MD4 is not set
# CONFIG_CRYPTO_MD5 is not set
# CONFIG_CRYPTO_MICHAEL_MIC is not set
diff --git a/arch/arm/configs/davinci_all_defconfig b/arch/arm/configs/davinci_all_defconfig
index ddffe39d9f8..bd656e8e6e4 100644
--- a/arch/arm/configs/davinci_all_defconfig
+++ b/arch/arm/configs/davinci_all_defconfig
@@ -1,14 +1,13 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.31-rc3-davinci1
-# Fri Jul 17 08:26:52 2009
+# Linux kernel version: 2.6.32-rc4
+# Mon Oct 12 14:13:12 2009
#
CONFIG_ARM=y
CONFIG_SYS_SUPPORTS_APM_EMULATION=y
CONFIG_GENERIC_GPIO=y
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
-CONFIG_MMU=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_STACKTRACE_SUPPORT=y
CONFIG_HAVE_LATENCYTOP_SUPPORT=y
@@ -46,11 +45,12 @@ CONFIG_POSIX_MQUEUE_SYSCTL=y
#
# RCU Subsystem
#
-CONFIG_CLASSIC_RCU=y
-# CONFIG_TREE_RCU is not set
-# CONFIG_PREEMPT_RCU is not set
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
# CONFIG_TREE_RCU_TRACE is not set
-# CONFIG_PREEMPT_RCU_TRACE is not set
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
@@ -91,17 +91,15 @@ CONFIG_SHMEM=y
CONFIG_AIO=y
#
-# Performance Counters
+# Kernel Performance Events And Counters
#
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_SLUB_DEBUG=y
-# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_COMPAT_BRK=y
# CONFIG_SLAB is not set
CONFIG_SLUB=y
# CONFIG_SLOB is not set
# CONFIG_PROFILING is not set
-# CONFIG_MARKERS is not set
CONFIG_HAVE_OPROFILE=y
# CONFIG_KPROBES is not set
CONFIG_HAVE_KPROBES=y
@@ -145,6 +143,7 @@ CONFIG_DEFAULT_IOSCHED="anticipatory"
#
# System Type
#
+CONFIG_MMU=y
# CONFIG_ARCH_AAEC2000 is not set
# CONFIG_ARCH_INTEGRATOR is not set
# CONFIG_ARCH_REALVIEW is not set
@@ -159,6 +158,7 @@ CONFIG_DEFAULT_IOSCHED="anticipatory"
# CONFIG_ARCH_STMP3XXX is not set
# CONFIG_ARCH_NETX is not set
# CONFIG_ARCH_H720X is not set
+# CONFIG_ARCH_NOMADIK is not set
# CONFIG_ARCH_IOP13XX is not set
# CONFIG_ARCH_IOP32X is not set
# CONFIG_ARCH_IOP33X is not set
@@ -181,11 +181,13 @@ CONFIG_DEFAULT_IOSCHED="anticipatory"
# CONFIG_ARCH_SA1100 is not set
# CONFIG_ARCH_S3C2410 is not set
# CONFIG_ARCH_S3C64XX is not set
+# CONFIG_ARCH_S5PC1XX is not set
# CONFIG_ARCH_SHARK is not set
# CONFIG_ARCH_LH7A40X is not set
# CONFIG_ARCH_U300 is not set
CONFIG_ARCH_DAVINCI=y
# CONFIG_ARCH_OMAP is not set
+# CONFIG_ARCH_BCMRING is not set
CONFIG_AINTC=y
CONFIG_ARCH_DAVINCI_DMx=y
@@ -208,6 +210,7 @@ CONFIG_ARCH_DAVINCI_DM365=y
#
CONFIG_MACH_DAVINCI_EVM=y
CONFIG_MACH_SFFSDR=y
+CONFIG_MACH_NEUROS_OSD2=y
CONFIG_MACH_DAVINCI_DM355_EVM=y
CONFIG_MACH_DM355_LEOPARD=y
CONFIG_MACH_DAVINCI_DM6467_EVM=y
@@ -224,7 +227,7 @@ CONFIG_CPU_32=y
CONFIG_CPU_ARM926T=y
CONFIG_CPU_32v5=y
CONFIG_CPU_ABRT_EV5TJ=y
-CONFIG_CPU_PABRT_NOIFAR=y
+CONFIG_CPU_PABRT_LEGACY=y
CONFIG_CPU_CACHE_VIVT=y
CONFIG_CPU_COPY_V4WB=y
CONFIG_CPU_TLB_V4WBI=y
@@ -239,6 +242,7 @@ CONFIG_ARM_THUMB=y
# CONFIG_CPU_DCACHE_DISABLE is not set
# CONFIG_CPU_DCACHE_WRITETHROUGH is not set
# CONFIG_CPU_CACHE_ROUND_ROBIN is not set
+CONFIG_ARM_L1_CACHE_SHIFT=5
CONFIG_COMMON_CLKDEV=y
#
@@ -259,6 +263,8 @@ CONFIG_VMSPLIT_3G=y
# CONFIG_VMSPLIT_2G is not set
# CONFIG_VMSPLIT_1G is not set
CONFIG_PAGE_OFFSET=0xC0000000
+# CONFIG_PREEMPT_NONE is not set
+# CONFIG_PREEMPT_VOLUNTARY is not set
CONFIG_PREEMPT=y
CONFIG_HZ=100
CONFIG_AEABI=y
@@ -280,6 +286,7 @@ CONFIG_BOUNCE=y
CONFIG_VIRT_TO_BUS=y
CONFIG_HAVE_MLOCK=y
CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
CONFIG_LEDS=y
# CONFIG_LEDS_CPU is not set
@@ -412,6 +419,7 @@ CONFIG_NETFILTER_ADVANCED=y
# CONFIG_IP6_NF_IPTABLES is not set
# CONFIG_IP_DCCP is not set
# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
# CONFIG_BRIDGE is not set
@@ -452,6 +460,7 @@ CONFIG_NETFILTER_ADVANCED=y
# Generic Driver Options
#
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
# CONFIG_FW_LOADER is not set
@@ -461,9 +470,9 @@ CONFIG_PREVENT_FIRMWARE_BUILD=y
# CONFIG_CONNECTOR is not set
CONFIG_MTD=m
# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_CONCAT is not set
CONFIG_MTD_PARTITIONS=y
-# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_REDBOOT_PARTS is not set
# CONFIG_MTD_AFS_PARTS is not set
# CONFIG_MTD_AR7_PARTS is not set
@@ -499,7 +508,7 @@ CONFIG_MTD_CFI_I1=y
CONFIG_MTD_CFI_I2=y
# CONFIG_MTD_CFI_I4 is not set
# CONFIG_MTD_CFI_I8 is not set
-# CONFIG_MTD_CFI_INTELEXT is not set
+CONFIG_MTD_CFI_INTELEXT=m
CONFIG_MTD_CFI_AMDSTD=m
# CONFIG_MTD_CFI_STAA is not set
CONFIG_MTD_CFI_UTIL=m
@@ -694,12 +703,10 @@ CONFIG_DM9000_DEBUGLEVEL=4
# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
# CONFIG_B44 is not set
# CONFIG_KS8842 is not set
+# CONFIG_KS8851_MLL is not set
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
-
-#
-# Wireless LAN
-#
+CONFIG_WLAN=y
# CONFIG_WLAN_PRE80211 is not set
# CONFIG_WLAN_80211 is not set
@@ -734,6 +741,7 @@ CONFIG_NETPOLL=y
CONFIG_NETPOLL_TRAP=y
CONFIG_NET_POLL_CONTROLLER=y
# CONFIG_ISDN is not set
+# CONFIG_PHONE is not set
#
# Input device support
@@ -745,10 +753,7 @@ CONFIG_INPUT=y
#
# Userland interfaces
#
-CONFIG_INPUT_MOUSEDEV=m
-CONFIG_INPUT_MOUSEDEV_PSAUX=y
-CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
-CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_JOYDEV is not set
CONFIG_INPUT_EVDEV=m
CONFIG_INPUT_EVBUG=m
@@ -757,12 +762,16 @@ CONFIG_INPUT_EVBUG=m
# Input Device Drivers
#
CONFIG_INPUT_KEYBOARD=y
+# CONFIG_KEYBOARD_ADP5588 is not set
CONFIG_KEYBOARD_ATKBD=m
+# CONFIG_QT2160 is not set
# CONFIG_KEYBOARD_LKKBD is not set
CONFIG_KEYBOARD_GPIO=y
# CONFIG_KEYBOARD_MATRIX is not set
# CONFIG_KEYBOARD_LM8323 is not set
+# CONFIG_KEYBOARD_MAX7359 is not set
# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_OPENCORES is not set
# CONFIG_KEYBOARD_STOWAWAY is not set
# CONFIG_KEYBOARD_SUNKBD is not set
CONFIG_KEYBOARD_XTKBD=m
@@ -777,6 +786,7 @@ CONFIG_INPUT_TOUCHSCREEN=y
# CONFIG_TOUCHSCREEN_GUNZE is not set
# CONFIG_TOUCHSCREEN_ELO is not set
# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
+# CONFIG_TOUCHSCREEN_MCS5000 is not set
# CONFIG_TOUCHSCREEN_MTOUCH is not set
# CONFIG_TOUCHSCREEN_INEXIO is not set
# CONFIG_TOUCHSCREEN_MK712 is not set
@@ -787,7 +797,17 @@ CONFIG_INPUT_TOUCHSCREEN=y
# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
# CONFIG_TOUCHSCREEN_TSC2007 is not set
# CONFIG_TOUCHSCREEN_W90X900 is not set
-# CONFIG_INPUT_MISC is not set
+CONFIG_INPUT_MISC=y
+# CONFIG_INPUT_ATI_REMOTE is not set
+# CONFIG_INPUT_ATI_REMOTE2 is not set
+# CONFIG_INPUT_KEYSPAN_REMOTE is not set
+# CONFIG_INPUT_POWERMATE is not set
+# CONFIG_INPUT_YEALINK is not set
+# CONFIG_INPUT_CM109 is not set
+# CONFIG_INPUT_UINPUT is not set
+# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set
+CONFIG_INPUT_DM355EVM=m
+CONFIG_INPUT_DM365EVM=m
#
# Hardware I/O ports
@@ -828,13 +848,13 @@ CONFIG_UNIX98_PTYS=y
CONFIG_LEGACY_PTYS=y
CONFIG_LEGACY_PTY_COUNT=256
# CONFIG_IPMI_HANDLER is not set
-CONFIG_HW_RANDOM=m
-# CONFIG_HW_RANDOM_TIMERIOMEM is not set
+# CONFIG_HW_RANDOM is not set
# CONFIG_R3964 is not set
# CONFIG_RAW_DRIVER is not set
# CONFIG_TCG_TPM is not set
CONFIG_I2C=y
CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_HELPER_AUTO=y
@@ -868,13 +888,17 @@ CONFIG_I2C_DAVINCI=y
# Miscellaneous I2C Chip support
#
# CONFIG_DS1682 is not set
-# CONFIG_SENSORS_PCA9539 is not set
# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
# CONFIG_I2C_DEBUG_BUS is not set
# CONFIG_I2C_DEBUG_CHIP is not set
# CONFIG_SPI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
CONFIG_ARCH_REQUIRE_GPIOLIB=y
CONFIG_GPIOLIB=y
# CONFIG_DEBUG_GPIO is not set
@@ -889,7 +913,7 @@ CONFIG_GPIOLIB=y
#
# CONFIG_GPIO_MAX732X is not set
# CONFIG_GPIO_PCA953X is not set
-CONFIG_GPIO_PCF857X=m
+CONFIG_GPIO_PCF857X=y
#
# PCI GPIO expanders:
@@ -898,10 +922,19 @@ CONFIG_GPIO_PCF857X=m
#
# SPI GPIO expanders:
#
+
+#
+# AC97 GPIO expanders:
+#
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
CONFIG_HWMON=y
# CONFIG_HWMON_VID is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Native drivers
+#
# CONFIG_SENSORS_AD7414 is not set
# CONFIG_SENSORS_AD7418 is not set
# CONFIG_SENSORS_ADM1021 is not set
@@ -950,6 +983,7 @@ CONFIG_HWMON=y
# CONFIG_SENSORS_ADS7828 is not set
# CONFIG_SENSORS_THMC50 is not set
# CONFIG_SENSORS_TMP401 is not set
+# CONFIG_SENSORS_TMP421 is not set
# CONFIG_SENSORS_VT1211 is not set
# CONFIG_SENSORS_W83781D is not set
# CONFIG_SENSORS_W83791D is not set
@@ -959,9 +993,7 @@ CONFIG_HWMON=y
# CONFIG_SENSORS_W83L786NG is not set
# CONFIG_SENSORS_W83627HF is not set
# CONFIG_SENSORS_W83627EHF is not set
-# CONFIG_HWMON_DEBUG_CHIP is not set
# CONFIG_THERMAL is not set
-# CONFIG_THERMAL_HWMON is not set
CONFIG_WATCHDOG=y
# CONFIG_WATCHDOG_NOWAYOUT is not set
@@ -988,7 +1020,7 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_MFD_CORE is not set
# CONFIG_MFD_SM501 is not set
# CONFIG_MFD_ASIC3 is not set
-# CONFIG_MFD_DM355EVM_MSP is not set
+CONFIG_MFD_DM355EVM_MSP=y
# CONFIG_HTC_EGPIO is not set
# CONFIG_HTC_PASIC3 is not set
# CONFIG_TPS65010 is not set
@@ -999,9 +1031,11 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_MFD_TC6393XB is not set
# CONFIG_PMIC_DA903X is not set
# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X is not set
# CONFIG_MFD_WM8350_I2C is not set
# CONFIG_MFD_PCF50633 is not set
# CONFIG_AB3100_CORE is not set
+# CONFIG_REGULATOR is not set
# CONFIG_MEDIA_SUPPORT is not set
#
@@ -1013,9 +1047,9 @@ CONFIG_FB=y
CONFIG_FIRMWARE_EDID=y
# CONFIG_FB_DDC is not set
# CONFIG_FB_BOOT_VESA_SUPPORT is not set
-# CONFIG_FB_CFB_FILLRECT is not set
-# CONFIG_FB_CFB_COPYAREA is not set
-# CONFIG_FB_CFB_IMAGEBLIT is not set
+CONFIG_FB_CFB_FILLRECT=y
+CONFIG_FB_CFB_COPYAREA=y
+CONFIG_FB_CFB_IMAGEBLIT=y
# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
# CONFIG_FB_SYS_FILLRECT is not set
# CONFIG_FB_SYS_COPYAREA is not set
@@ -1032,6 +1066,7 @@ CONFIG_FIRMWARE_EDID=y
# Frame buffer hardware drivers
#
# CONFIG_FB_S1D13XXX is not set
+CONFIG_FB_DAVINCI=y
# CONFIG_FB_VIRTUAL is not set
# CONFIG_FB_METRONOME is not set
# CONFIG_FB_MB862XX is not set
@@ -1101,7 +1136,6 @@ CONFIG_SND_SOC_TLV320AIC3X=m
# CONFIG_SOUND_PRIME is not set
CONFIG_HID_SUPPORT=y
CONFIG_HID=m
-# CONFIG_HID_DEBUG is not set
# CONFIG_HIDRAW is not set
#
@@ -1130,6 +1164,7 @@ CONFIG_HID_CYPRESS=m
CONFIG_HID_EZKEY=m
# CONFIG_HID_KYE is not set
CONFIG_HID_GYRATION=m
+# CONFIG_HID_TWINHAN is not set
# CONFIG_HID_KENSINGTON is not set
CONFIG_HID_LOGITECH=m
# CONFIG_LOGITECH_FF is not set
@@ -1176,6 +1211,7 @@ CONFIG_USB_MON=m
# CONFIG_USB_OXU210HP_HCD is not set
# CONFIG_USB_ISP116X_HCD is not set
# CONFIG_USB_ISP1760_HCD is not set
+# CONFIG_USB_ISP1362_HCD is not set
# CONFIG_USB_SL811_HCD is not set
# CONFIG_USB_R8A66597_HCD is not set
# CONFIG_USB_HWA_HCD is not set
@@ -1269,6 +1305,7 @@ CONFIG_USB_GADGET_SELECTED=y
# CONFIG_USB_GADGET_LH7A40X is not set
# CONFIG_USB_GADGET_OMAP is not set
# CONFIG_USB_GADGET_PXA25X is not set
+# CONFIG_USB_GADGET_R8A66597 is not set
# CONFIG_USB_GADGET_PXA27X is not set
# CONFIG_USB_GADGET_S3C_HSOTG is not set
# CONFIG_USB_GADGET_IMX is not set
@@ -1286,6 +1323,7 @@ CONFIG_USB_ZERO=m
# CONFIG_USB_AUDIO is not set
CONFIG_USB_ETH=m
CONFIG_USB_ETH_RNDIS=y
+# CONFIG_USB_ETH_EEM is not set
CONFIG_USB_GADGETFS=m
CONFIG_USB_FILE_STORAGE=m
# CONFIG_USB_FILE_STORAGE_TEST is not set
@@ -1316,8 +1354,10 @@ CONFIG_MMC_BLOCK=m
# MMC/SD/SDIO Host Controller Drivers
#
# CONFIG_MMC_SDHCI is not set
+# CONFIG_MMC_AT91 is not set
+# CONFIG_MMC_ATMELMCI is not set
+CONFIG_MMC_DAVINCI=m
# CONFIG_MEMSTICK is not set
-# CONFIG_ACCESSIBILITY is not set
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=m
@@ -1345,6 +1385,7 @@ CONFIG_LEDS_TRIGGER_HEARTBEAT=m
#
# iptables trigger is under Netfilter config (LED target)
#
+# CONFIG_ACCESSIBILITY is not set
CONFIG_RTC_LIB=y
CONFIG_RTC_CLASS=m
@@ -1370,6 +1411,7 @@ CONFIG_RTC_INTF_DEV=y
# CONFIG_RTC_DRV_PCF8563 is not set
# CONFIG_RTC_DRV_PCF8583 is not set
# CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_DM355EVM is not set
# CONFIG_RTC_DRV_S35390A is not set
# CONFIG_RTC_DRV_FM3130 is not set
# CONFIG_RTC_DRV_RX8581 is not set
@@ -1399,8 +1441,11 @@ CONFIG_RTC_INTF_DEV=y
#
# CONFIG_DMADEVICES is not set
# CONFIG_AUXDISPLAY is not set
-# CONFIG_REGULATOR is not set
# CONFIG_UIO is not set
+
+#
+# TI VLYNQ
+#
# CONFIG_STAGING is not set
#
@@ -1429,6 +1474,7 @@ CONFIG_XFS_FS=m
# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
CONFIG_FILE_LOCKING=y
CONFIG_FSNOTIFY=y
CONFIG_DNOTIFY=y
@@ -1500,7 +1546,6 @@ CONFIG_MINIX_FS=m
# CONFIG_ROMFS_FS is not set
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
-# CONFIG_NILFS2_FS is not set
CONFIG_NETWORK_FILESYSTEMS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V3=y
@@ -1596,6 +1641,7 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
CONFIG_ENABLE_MUST_CHECK=y
CONFIG_FRAME_WARN=1024
# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
@@ -1634,11 +1680,14 @@ CONFIG_DEBUG_BUGVERBOSE=y
# CONFIG_DEBUG_LIST is not set
# CONFIG_DEBUG_SG is not set
# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
+CONFIG_FRAME_POINTER=y
# CONFIG_BOOT_PRINTK_DELAY is not set
# CONFIG_RCU_TORTURE_TEST is not set
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_BACKTRACE_SELF_TEST is not set
# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
# CONFIG_FAULT_INJECTION is not set
# CONFIG_LATENCYTOP is not set
# CONFIG_SYSCTL_SYSCALL_CHECK is not set
@@ -1663,7 +1712,7 @@ CONFIG_BRANCH_PROFILE_NONE=y
# CONFIG_SAMPLES is not set
CONFIG_HAVE_ARCH_KGDB=y
# CONFIG_KGDB is not set
-CONFIG_ARM_UNWIND=y
+# CONFIG_ARM_UNWIND is not set
CONFIG_DEBUG_USER=y
CONFIG_DEBUG_ERRORS=y
# CONFIG_DEBUG_STACK_USAGE is not set
@@ -1681,7 +1730,6 @@ CONFIG_CRYPTO=y
#
# Crypto core or helper
#
-# CONFIG_CRYPTO_FIPS is not set
# CONFIG_CRYPTO_MANAGER is not set
# CONFIG_CRYPTO_MANAGER2 is not set
# CONFIG_CRYPTO_GF128MUL is not set
@@ -1713,11 +1761,13 @@ CONFIG_CRYPTO=y
#
# CONFIG_CRYPTO_HMAC is not set
# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
#
# Digest
#
# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
# CONFIG_CRYPTO_MD4 is not set
# CONFIG_CRYPTO_MD5 is not set
# CONFIG_CRYPTO_MICHAEL_MIC is not set
diff --git a/arch/arm/mach-davinci/Kconfig b/arch/arm/mach-davinci/Kconfig
index 40866c643f1..033bfede6b6 100644
--- a/arch/arm/mach-davinci/Kconfig
+++ b/arch/arm/mach-davinci/Kconfig
@@ -32,11 +32,13 @@ config ARCH_DAVINCI_DA830
bool "DA830/OMAP-L137 based system"
select CP_INTC
select ARCH_DAVINCI_DA8XX
+ select CPU_DCACHE_WRITETHROUGH # needed on silicon revs 1.0, 1.1
config ARCH_DAVINCI_DA850
bool "DA850/OMAP-L138 based system"
select CP_INTC
select ARCH_DAVINCI_DA8XX
+ select ARCH_HAS_CPUFREQ
config ARCH_DAVINCI_DA8XX
bool
@@ -63,6 +65,13 @@ config MACH_SFFSDR
Say Y here to select the Lyrtech Small Form Factor
Software Defined Radio (SFFSDR) board.
+config MACH_NEUROS_OSD2
+ bool "Neuros OSD2 Open Television Set Top Box"
+ depends on ARCH_DAVINCI_DM644x
+ help
+ Configure this option to specify the whether the board used
+ for development is a Neuros OSD2 Open Set Top Box.
+
config MACH_DAVINCI_DM355_EVM
bool "TI DM355 EVM"
default ARCH_DAVINCI_DM355
@@ -98,16 +107,66 @@ config MACH_DAVINCI_DA830_EVM
bool "TI DA830/OMAP-L137 Reference Platform"
default ARCH_DAVINCI_DA830
depends on ARCH_DAVINCI_DA830
+ select GPIO_PCF857X
help
Say Y here to select the TI DA830/OMAP-L137 Evaluation Module.
+choice
+ prompt "Select DA830/OMAP-L137 UI board peripheral"
+ depends on MACH_DAVINCI_DA830_EVM
+ help
+ The presence of UI card on the DA830/OMAP-L137 EVM is detected
+ automatically based on successful probe of the I2C based GPIO
+ expander on that board. This option selected in this menu has
+ an effect only in case of a successful UI card detection.
+
+config DA830_UI_LCD
+ bool "LCD"
+ help
+ Say Y here to use the LCD as a framebuffer or simple character
+ display.
+
+config DA830_UI_NAND
+ bool "NAND flash"
+ help
+ Say Y here to use the NAND flash. Do not forget to setup
+ the switch correctly.
+endchoice
+
config MACH_DAVINCI_DA850_EVM
bool "TI DA850/OMAP-L138 Reference Platform"
default ARCH_DAVINCI_DA850
depends on ARCH_DAVINCI_DA850
+ select GPIO_PCA953X
help
Say Y here to select the TI DA850/OMAP-L138 Evaluation Module.
+choice
+ prompt "Select peripherals connected to expander on UI board"
+ depends on MACH_DAVINCI_DA850_EVM
+ help
+ The presence of User Interface (UI) card on the DA850/OMAP-L138
+ EVM is detected automatically based on successful probe of the I2C
+ based GPIO expander on that card. This option selected in this
+ menu has an effect only in case of a successful UI card detection.
+
+config DA850_UI_NONE
+ bool "No peripheral is enabled"
+ help
+ Say Y if you do not want to enable any of the peripherals connected
+ to TCA6416 expander on DA850/OMAP-L138 EVM UI card
+
+config DA850_UI_RMII
+ bool "RMII Ethernet PHY"
+ help
+ Say Y if you want to use the RMII PHY on the DA850/OMAP-L138 EVM.
+ This PHY is found on the UI daughter card that is supplied with
+ the EVM.
+ NOTE: Please take care while choosing this option, MII PHY will
+ not be functional if RMII mode is selected.
+
+endchoice
+
config DAVINCI_MUX
bool "DAVINCI multiplexing support"
depends on ARCH_DAVINCI
diff --git a/arch/arm/mach-davinci/Makefile b/arch/arm/mach-davinci/Makefile
index 2e11e847313..eeb9230d884 100644
--- a/arch/arm/mach-davinci/Makefile
+++ b/arch/arm/mach-davinci/Makefile
@@ -23,9 +23,14 @@ obj-$(CONFIG_CP_INTC) += cp_intc.o
# Board specific
obj-$(CONFIG_MACH_DAVINCI_EVM) += board-dm644x-evm.o
obj-$(CONFIG_MACH_SFFSDR) += board-sffsdr.o
+obj-$(CONFIG_MACH_NEUROS_OSD2) += board-neuros-osd2.o
obj-$(CONFIG_MACH_DAVINCI_DM355_EVM) += board-dm355-evm.o
obj-$(CONFIG_MACH_DM355_LEOPARD) += board-dm355-leopard.o
obj-$(CONFIG_MACH_DAVINCI_DM6467_EVM) += board-dm646x-evm.o
obj-$(CONFIG_MACH_DAVINCI_DM365_EVM) += board-dm365-evm.o
obj-$(CONFIG_MACH_DAVINCI_DA830_EVM) += board-da830-evm.o
obj-$(CONFIG_MACH_DAVINCI_DA850_EVM) += board-da850-evm.o
+
+# Power Management
+obj-$(CONFIG_CPU_FREQ) += cpufreq.o
+obj-$(CONFIG_CPU_IDLE) += cpuidle.o
diff --git a/arch/arm/mach-davinci/board-da830-evm.c b/arch/arm/mach-davinci/board-da830-evm.c
index bfbb63936f3..31dc9901e55 100644
--- a/arch/arm/mach-davinci/board-da830-evm.c
+++ b/arch/arm/mach-davinci/board-da830-evm.c
@@ -10,51 +10,194 @@
* or implied.
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/init.h>
#include <linux/console.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
#include <linux/i2c.h>
+#include <linux/i2c/pcf857x.h>
#include <linux/i2c/at24.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
-#include <mach/common.h>
-#include <mach/irqs.h>
#include <mach/cp_intc.h>
+#include <mach/mux.h>
+#include <mach/nand.h>
#include <mach/da8xx.h>
-#include <mach/asp.h>
+#include <mach/usb.h>
#define DA830_EVM_PHY_MASK 0x0
#define DA830_EVM_MDIO_FREQUENCY 2200000 /* PHY bus frequency */
-static struct at24_platform_data da830_evm_i2c_eeprom_info = {
- .byte_len = SZ_256K / 8,
- .page_size = 64,
- .flags = AT24_FLAG_ADDR16,
- .setup = davinci_get_mac_addr,
- .context = (void *)0x7f00,
-};
+#define DA830_EMIF25_ASYNC_DATA_CE3_BASE 0x62000000
+#define DA830_EMIF25_CONTROL_BASE 0x68000000
-static struct i2c_board_info __initdata da830_evm_i2c_devices[] = {
- {
- I2C_BOARD_INFO("24c256", 0x50),
- .platform_data = &da830_evm_i2c_eeprom_info,
- },
- {
- I2C_BOARD_INFO("tlv320aic3x", 0x18),
- }
+/*
+ * USB1 VBUS is controlled by GPIO1[15], over-current is reported on GPIO2[4].
+ */
+#define ON_BD_USB_DRV GPIO_TO_PIN(1, 15)
+#define ON_BD_USB_OVC GPIO_TO_PIN(2, 4)
+
+static const short da830_evm_usb11_pins[] = {
+ DA830_GPIO1_15, DA830_GPIO2_4,
+ -1
};
-static struct davinci_i2c_platform_data da830_evm_i2c_0_pdata = {
- .bus_freq = 100, /* kHz */
- .bus_delay = 0, /* usec */
+static da8xx_ocic_handler_t da830_evm_usb_ocic_handler;
+
+static int da830_evm_usb_set_power(unsigned port, int on)
+{
+ gpio_set_value(ON_BD_USB_DRV, on);
+ return 0;
+}
+
+static int da830_evm_usb_get_power(unsigned port)
+{
+ return gpio_get_value(ON_BD_USB_DRV);
+}
+
+static int da830_evm_usb_get_oci(unsigned port)
+{
+ return !gpio_get_value(ON_BD_USB_OVC);
+}
+
+static irqreturn_t da830_evm_usb_ocic_irq(int, void *);
+
+static int da830_evm_usb_ocic_notify(da8xx_ocic_handler_t handler)
+{
+ int irq = gpio_to_irq(ON_BD_USB_OVC);
+ int error = 0;
+
+ if (handler != NULL) {
+ da830_evm_usb_ocic_handler = handler;
+
+ error = request_irq(irq, da830_evm_usb_ocic_irq, IRQF_DISABLED |
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ "OHCI over-current indicator", NULL);
+ if (error)
+ printk(KERN_ERR "%s: could not request IRQ to watch "
+ "over-current indicator changes\n", __func__);
+ } else
+ free_irq(irq, NULL);
+
+ return error;
+}
+
+static struct da8xx_ohci_root_hub da830_evm_usb11_pdata = {
+ .set_power = da830_evm_usb_set_power,
+ .get_power = da830_evm_usb_get_power,
+ .get_oci = da830_evm_usb_get_oci,
+ .ocic_notify = da830_evm_usb_ocic_notify,
+
+ /* TPS2065 switch @ 5V */
+ .potpgt = (3 + 1) / 2, /* 3 ms max */
};
+static irqreturn_t da830_evm_usb_ocic_irq(int irq, void *dev_id)
+{
+ da830_evm_usb_ocic_handler(&da830_evm_usb11_pdata, 1);
+ return IRQ_HANDLED;
+}
+
+static __init void da830_evm_usb_init(void)
+{
+ u32 cfgchip2;
+ int ret;
+
+ /*
+ * Set up USB clock/mode in the CFGCHIP2 register.
+ * FYI: CFGCHIP2 is 0x0000ef00 initially.
+ */
+ cfgchip2 = __raw_readl(DA8XX_SYSCFG_VIRT(DA8XX_CFGCHIP2_REG));
+
+ /* USB2.0 PHY reference clock is 24 MHz */
+ cfgchip2 &= ~CFGCHIP2_REFFREQ;
+ cfgchip2 |= CFGCHIP2_REFFREQ_24MHZ;
+
+ /*
+ * Select internal reference clock for USB 2.0 PHY
+ * and use it as a clock source for USB 1.1 PHY
+ * (this is the default setting anyway).
+ */
+ cfgchip2 &= ~CFGCHIP2_USB1PHYCLKMUX;
+ cfgchip2 |= CFGCHIP2_USB2PHYCLKMUX;
+
+ /*
+ * We have to override VBUS/ID signals when MUSB is configured into the
+ * host-only mode -- ID pin will float if no cable is connected, so the
+ * controller won't be able to drive VBUS thinking that it's a B-device.
+ * Otherwise, we want to use the OTG mode and enable VBUS comparators.
+ */
+ cfgchip2 &= ~CFGCHIP2_OTGMODE;
+#ifdef CONFIG_USB_MUSB_HOST
+ cfgchip2 |= CFGCHIP2_FORCE_HOST;
+#else
+ cfgchip2 |= CFGCHIP2_SESENDEN | CFGCHIP2_VBDTCTEN;
+#endif
+
+ __raw_writel(cfgchip2, DA8XX_SYSCFG_VIRT(DA8XX_CFGCHIP2_REG));
+
+ /* USB_REFCLKIN is not used. */
+ ret = davinci_cfg_reg(DA830_USB0_DRVVBUS);
+ if (ret)
+ pr_warning("%s: USB 2.0 PinMux setup failed: %d\n",
+ __func__, ret);
+ else {
+ /*
+ * TPS2065 switch @ 5V supplies 1 A (sustains 1.5 A),
+ * with the power on to power good time of 3 ms.
+ */
+ ret = da8xx_register_usb20(1000, 3);
+ if (ret)
+ pr_warning("%s: USB 2.0 registration failed: %d\n",
+ __func__, ret);
+ }
+
+ ret = da8xx_pinmux_setup(da830_evm_usb11_pins);
+ if (ret) {
+ pr_warning("%s: USB 1.1 PinMux setup failed: %d\n",
+ __func__, ret);
+ return;
+ }
+
+ ret = gpio_request(ON_BD_USB_DRV, "ON_BD_USB_DRV");
+ if (ret) {
+ printk(KERN_ERR "%s: failed to request GPIO for USB 1.1 port "
+ "power control: %d\n", __func__, ret);
+ return;
+ }
+ gpio_direction_output(ON_BD_USB_DRV, 0);
+
+ ret = gpio_request(ON_BD_USB_OVC, "ON_BD_USB_OVC");
+ if (ret) {
+ printk(KERN_ERR "%s: failed to request GPIO for USB 1.1 port "
+ "over-current indicator: %d\n", __func__, ret);
+ return;
+ }
+ gpio_direction_input(ON_BD_USB_OVC);
+
+ ret = da8xx_register_usb11(&da830_evm_usb11_pdata);
+ if (ret)
+ pr_warning("%s: USB 1.1 registration failed: %d\n",
+ __func__, ret);
+}
+
static struct davinci_uart_config da830_evm_uart_config __initdata = {
.enabled_uarts = 0x7,
};
+static const short da830_evm_mcasp1_pins[] = {
+ DA830_AHCLKX1, DA830_ACLKX1, DA830_AFSX1, DA830_AHCLKR1, DA830_AFSR1,
+ DA830_AMUTE1, DA830_AXR1_0, DA830_AXR1_1, DA830_AXR1_2, DA830_AXR1_5,
+ DA830_ACLKR1, DA830_AXR1_6, DA830_AXR1_7, DA830_AXR1_8, DA830_AXR1_10,
+ DA830_AXR1_11,
+ -1
+};
+
static u8 da830_iis_serializer_direction[] = {
RX_MODE, INACTIVE_MODE, INACTIVE_MODE, INACTIVE_MODE,
INACTIVE_MODE, TX_MODE, INACTIVE_MODE, INACTIVE_MODE,
@@ -74,6 +217,271 @@ static struct snd_platform_data da830_evm_snd_data = {
.rxnumevt = 1,
};
+/*
+ * GPIO2[1] is used as MMC_SD_WP and GPIO2[2] as MMC_SD_INS.
+ */
+static const short da830_evm_mmc_sd_pins[] = {
+ DA830_MMCSD_DAT_0, DA830_MMCSD_DAT_1, DA830_MMCSD_DAT_2,
+ DA830_MMCSD_DAT_3, DA830_MMCSD_DAT_4, DA830_MMCSD_DAT_5,
+ DA830_MMCSD_DAT_6, DA830_MMCSD_DAT_7, DA830_MMCSD_CLK,
+ DA830_MMCSD_CMD, DA830_GPIO2_1, DA830_GPIO2_2,
+ -1
+};
+
+#define DA830_MMCSD_WP_PIN GPIO_TO_PIN(2, 1)
+
+static int da830_evm_mmc_get_ro(int index)
+{
+ return gpio_get_value(DA830_MMCSD_WP_PIN);
+}
+
+static struct davinci_mmc_config da830_evm_mmc_config = {
+ .get_ro = da830_evm_mmc_get_ro,
+ .wires = 4,
+ .max_freq = 50000000,
+ .caps = MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED,
+ .version = MMC_CTLR_VERSION_2,
+};
+
+static inline void da830_evm_init_mmc(void)
+{
+ int ret;
+
+ ret = da8xx_pinmux_setup(da830_evm_mmc_sd_pins);
+ if (ret) {
+ pr_warning("da830_evm_init: mmc/sd mux setup failed: %d\n",
+ ret);
+ return;
+ }
+
+ ret = gpio_request(DA830_MMCSD_WP_PIN, "MMC WP");
+ if (ret) {
+ pr_warning("da830_evm_init: can not open GPIO %d\n",
+ DA830_MMCSD_WP_PIN);
+ return;
+ }
+ gpio_direction_input(DA830_MMCSD_WP_PIN);
+
+ ret = da8xx_register_mmcsd0(&da830_evm_mmc_config);
+ if (ret) {
+ pr_warning("da830_evm_init: mmc/sd registration failed: %d\n",
+ ret);
+ gpio_free(DA830_MMCSD_WP_PIN);
+ }
+}
+
+/*
+ * UI board NAND/NOR flashes only use 8-bit data bus.
+ */
+static const short da830_evm_emif25_pins[] = {
+ DA830_EMA_D_0, DA830_EMA_D_1, DA830_EMA_D_2, DA830_EMA_D_3,
+ DA830_EMA_D_4, DA830_EMA_D_5, DA830_EMA_D_6, DA830_EMA_D_7,
+ DA830_EMA_A_0, DA830_EMA_A_1, DA830_EMA_A_2, DA830_EMA_A_3,
+ DA830_EMA_A_4, DA830_EMA_A_5, DA830_EMA_A_6, DA830_EMA_A_7,
+ DA830_EMA_A_8, DA830_EMA_A_9, DA830_EMA_A_10, DA830_EMA_A_11,
+ DA830_EMA_A_12, DA830_EMA_BA_0, DA830_EMA_BA_1, DA830_NEMA_WE,
+ DA830_NEMA_CS_2, DA830_NEMA_CS_3, DA830_NEMA_OE, DA830_EMA_WAIT_0,
+ -1
+};
+
+#if defined(CONFIG_MMC_DAVINCI) || defined(CONFIG_MMC_DAVINCI_MODULE)
+#define HAS_MMC 1
+#else
+#define HAS_MMC 0
+#endif
+
+#ifdef CONFIG_DA830_UI_NAND
+static struct mtd_partition da830_evm_nand_partitions[] = {
+ /* bootloader (U-Boot, etc) in first sector */
+ [0] = {
+ .name = "bootloader",
+ .offset = 0,
+ .size = SZ_128K,
+ .mask_flags = MTD_WRITEABLE, /* force read-only */
+ },
+ /* bootloader params in the next sector */
+ [1] = {
+ .name = "params",
+ .offset = MTDPART_OFS_APPEND,
+ .size = SZ_128K,
+ .mask_flags = MTD_WRITEABLE, /* force read-only */
+ },
+ /* kernel */
+ [2] = {
+ .name = "kernel",
+ .offset = MTDPART_OFS_APPEND,
+ .size = SZ_2M,
+ .mask_flags = 0,
+ },
+ /* file system */
+ [3] = {
+ .name = "filesystem",
+ .offset = MTDPART_OFS_APPEND,
+ .size = MTDPART_SIZ_FULL,
+ .mask_flags = 0,
+ }
+};
+
+/* flash bbt decriptors */
+static uint8_t da830_evm_nand_bbt_pattern[] = { 'B', 'b', 't', '0' };
+static uint8_t da830_evm_nand_mirror_pattern[] = { '1', 't', 'b', 'B' };
+
+static struct nand_bbt_descr da830_evm_nand_bbt_main_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE |
+ NAND_BBT_WRITE | NAND_BBT_2BIT |
+ NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 2,
+ .len = 4,
+ .veroffs = 16,
+ .maxblocks = 4,
+ .pattern = da830_evm_nand_bbt_pattern
+};
+
+static struct nand_bbt_descr da830_evm_nand_bbt_mirror_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE |
+ NAND_BBT_WRITE | NAND_BBT_2BIT |
+ NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 2,
+ .len = 4,
+ .veroffs = 16,
+ .maxblocks = 4,
+ .pattern = da830_evm_nand_mirror_pattern
+};
+
+static struct davinci_nand_pdata da830_evm_nand_pdata = {
+ .parts = da830_evm_nand_partitions,
+ .nr_parts = ARRAY_SIZE(da830_evm_nand_partitions),
+ .ecc_mode = NAND_ECC_HW,
+ .ecc_bits = 4,
+ .options = NAND_USE_FLASH_BBT,
+ .bbt_td = &da830_evm_nand_bbt_main_descr,
+ .bbt_md = &da830_evm_nand_bbt_mirror_descr,
+};
+
+static struct resource da830_evm_nand_resources[] = {
+ [0] = { /* First memory resource is NAND I/O window */
+ .start = DA830_EMIF25_ASYNC_DATA_CE3_BASE,
+ .end = DA830_EMIF25_ASYNC_DATA_CE3_BASE + PAGE_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = { /* Second memory resource is AEMIF control registers */
+ .start = DA830_EMIF25_CONTROL_BASE,
+ .end = DA830_EMIF25_CONTROL_BASE + SZ_32K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device da830_evm_nand_device = {
+ .name = "davinci_nand",
+ .id = 1,
+ .dev = {
+ .platform_data = &da830_evm_nand_pdata,
+ },
+ .num_resources = ARRAY_SIZE(da830_evm_nand_resources),
+ .resource = da830_evm_nand_resources,
+};
+
+static inline void da830_evm_init_nand(int mux_mode)
+{
+ int ret;
+
+ if (HAS_MMC) {
+ pr_warning("WARNING: both MMC/SD and NAND are "
+ "enabled, but they share AEMIF pins.\n"
+ "\tDisable MMC/SD for NAND support.\n");
+ return;
+ }
+
+ ret = da8xx_pinmux_setup(da830_evm_emif25_pins);
+ if (ret)
+ pr_warning("da830_evm_init: emif25 mux setup failed: %d\n",
+ ret);
+
+ ret = platform_device_register(&da830_evm_nand_device);
+ if (ret)
+ pr_warning("da830_evm_init: NAND device not registered.\n");
+
+ gpio_direction_output(mux_mode, 1);
+}
+#else
+static inline void da830_evm_init_nand(int mux_mode) { }
+#endif
+
+#ifdef CONFIG_DA830_UI_LCD
+static inline void da830_evm_init_lcdc(int mux_mode)
+{
+ int ret;
+
+ ret = da8xx_pinmux_setup(da830_lcdcntl_pins);
+ if (ret)
+ pr_warning("da830_evm_init: lcdcntl mux setup failed: %d\n",
+ ret);
+
+ ret = da8xx_register_lcdc(&sharp_lcd035q3dg01_pdata);
+ if (ret)
+ pr_warning("da830_evm_init: lcd setup failed: %d\n", ret);
+
+ gpio_direction_output(mux_mode, 0);
+}
+#else
+static inline void da830_evm_init_lcdc(int mux_mode) { }
+#endif
+
+static struct at24_platform_data da830_evm_i2c_eeprom_info = {
+ .byte_len = SZ_256K / 8,
+ .page_size = 64,
+ .flags = AT24_FLAG_ADDR16,
+ .setup = davinci_get_mac_addr,
+ .context = (void *)0x7f00,
+};
+
+static int __init da830_evm_ui_expander_setup(struct i2c_client *client,
+ int gpio, unsigned ngpio, void *context)
+{
+ gpio_request(gpio + 6, "UI MUX_MODE");
+
+ /* Drive mux mode low to match the default without UI card */
+ gpio_direction_output(gpio + 6, 0);
+
+ da830_evm_init_lcdc(gpio + 6);
+
+ da830_evm_init_nand(gpio + 6);
+
+ return 0;
+}
+
+static int da830_evm_ui_expander_teardown(struct i2c_client *client, int gpio,
+ unsigned ngpio, void *context)
+{
+ gpio_free(gpio + 6);
+ return 0;
+}
+
+static struct pcf857x_platform_data __initdata da830_evm_ui_expander_info = {
+ .gpio_base = DAVINCI_N_GPIO,
+ .setup = da830_evm_ui_expander_setup,
+ .teardown = da830_evm_ui_expander_teardown,
+};
+
+static struct i2c_board_info __initdata da830_evm_i2c_devices[] = {
+ {
+ I2C_BOARD_INFO("24c256", 0x50),
+ .platform_data = &da830_evm_i2c_eeprom_info,
+ },
+ {
+ I2C_BOARD_INFO("tlv320aic3x", 0x18),
+ },
+ {
+ I2C_BOARD_INFO("pcf8574", 0x3f),
+ .platform_data = &da830_evm_ui_expander_info,
+ },
+};
+
+static struct davinci_i2c_platform_data da830_evm_i2c_0_pdata = {
+ .bus_freq = 100, /* kHz */
+ .bus_delay = 0, /* usec */
+};
+
static __init void da830_evm_init(void)
{
struct davinci_soc_info *soc_info = &davinci_soc_info;
@@ -94,6 +502,8 @@ static __init void da830_evm_init(void)
pr_warning("da830_evm_init: i2c0 registration failed: %d\n",
ret);
+ da830_evm_usb_init();
+
soc_info->emac_pdata->phy_mask = DA830_EVM_PHY_MASK;
soc_info->emac_pdata->mdio_max_freq = DA830_EVM_MDIO_FREQUENCY;
soc_info->emac_pdata->rmii_en = 1;
@@ -117,12 +527,18 @@ static __init void da830_evm_init(void)
i2c_register_board_info(1, da830_evm_i2c_devices,
ARRAY_SIZE(da830_evm_i2c_devices));
- ret = da8xx_pinmux_setup(da830_mcasp1_pins);
+ ret = da8xx_pinmux_setup(da830_evm_mcasp1_pins);
if (ret)
pr_warning("da830_evm_init: mcasp1 mux setup failed: %d\n",
ret);
- da8xx_init_mcasp(1, &da830_evm_snd_data);
+ da8xx_register_mcasp(1, &da830_evm_snd_data);
+
+ da830_evm_init_mmc();
+
+ ret = da8xx_register_rtc();
+ if (ret)
+ pr_warning("da830_evm_init: rtc setup failed: %d\n", ret);
}
#ifdef CONFIG_SERIAL_8250_CONSOLE
@@ -146,7 +562,7 @@ static void __init da830_evm_map_io(void)
da830_init();
}
-MACHINE_START(DAVINCI_DA830_EVM, "DaVinci DA830/OMAP L137 EVM")
+MACHINE_START(DAVINCI_DA830_EVM, "DaVinci DA830/OMAP-L137 EVM")
.phys_io = IO_PHYS,
.io_pg_offst = (__IO_ADDRESS(IO_PHYS) >> 18) & 0xfffc,
.boot_params = (DA8XX_DDR_BASE + 0x100),
diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c
index c759d72494e..62b98bffc15 100644
--- a/arch/arm/mach-davinci/board-da850-evm.c
+++ b/arch/arm/mach-davinci/board-da850-evm.c
@@ -12,36 +12,38 @@
* or implied.
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/init.h>
#include <linux/console.h>
#include <linux/i2c.h>
#include <linux/i2c/at24.h>
+#include <linux/i2c/pca953x.h>
#include <linux/gpio.h>
#include <linux/platform_device.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h>
+#include <linux/regulator/machine.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
-#include <mach/common.h>
-#include <mach/irqs.h>
#include <mach/cp_intc.h>
#include <mach/da8xx.h>
#include <mach/nand.h>
+#include <mach/mux.h>
#define DA850_EVM_PHY_MASK 0x1
#define DA850_EVM_MDIO_FREQUENCY 2200000 /* PHY bus frequency */
+#define DA850_LCD_PWR_PIN GPIO_TO_PIN(2, 8)
#define DA850_LCD_BL_PIN GPIO_TO_PIN(2, 15)
-#define DA850_LCD_PWR_PIN GPIO_TO_PIN(8, 10)
#define DA850_MMCSD_CD_PIN GPIO_TO_PIN(4, 0)
#define DA850_MMCSD_WP_PIN GPIO_TO_PIN(4, 1)
+#define DA850_MII_MDIO_CLKEN_PIN GPIO_TO_PIN(2, 6)
+
static struct mtd_partition da850_evm_norflash_partition[] = {
{
.name = "NOR filesystem",
@@ -143,10 +145,149 @@ static struct platform_device da850_evm_nandflash_device = {
.resource = da850_evm_nandflash_resource,
};
+static struct platform_device *da850_evm_devices[] __initdata = {
+ &da850_evm_nandflash_device,
+ &da850_evm_norflash_device,
+};
+
+#define DA8XX_AEMIF_CE2CFG_OFFSET 0x10
+#define DA8XX_AEMIF_ASIZE_16BIT 0x1
+
+static void __init da850_evm_init_nor(void)
+{
+ void __iomem *aemif_addr;
+
+ aemif_addr = ioremap(DA8XX_AEMIF_CTL_BASE, SZ_32K);
+
+ /* Configure data bus width of CS2 to 16 bit */
+ writel(readl(aemif_addr + DA8XX_AEMIF_CE2CFG_OFFSET) |
+ DA8XX_AEMIF_ASIZE_16BIT,
+ aemif_addr + DA8XX_AEMIF_CE2CFG_OFFSET);
+
+ iounmap(aemif_addr);
+}
+
+static u32 ui_card_detected;
+
+#if defined(CONFIG_MMC_DAVINCI) || \
+ defined(CONFIG_MMC_DAVINCI_MODULE)
+#define HAS_MMC 1
+#else
+#define HAS_MMC 0
+#endif
+
+static __init void da850_evm_setup_nor_nand(void)
+{
+ int ret = 0;
+
+ if (ui_card_detected & !HAS_MMC) {
+ ret = da8xx_pinmux_setup(da850_nand_pins);
+ if (ret)
+ pr_warning("da850_evm_init: nand mux setup failed: "
+ "%d\n", ret);
+
+ ret = da8xx_pinmux_setup(da850_nor_pins);
+ if (ret)
+ pr_warning("da850_evm_init: nor mux setup failed: %d\n",
+ ret);
+
+ da850_evm_init_nor();
+
+ platform_add_devices(da850_evm_devices,
+ ARRAY_SIZE(da850_evm_devices));
+ }
+}
+
+#ifdef CONFIG_DA850_UI_RMII
+static inline void da850_evm_setup_emac_rmii(int rmii_sel)
+{
+ struct davinci_soc_info *soc_info = &davinci_soc_info;
+
+ soc_info->emac_pdata->rmii_en = 1;
+ gpio_set_value(rmii_sel, 0);
+}
+#else
+static inline void da850_evm_setup_emac_rmii(int rmii_sel) { }
+#endif
+
+static int da850_evm_ui_expander_setup(struct i2c_client *client, unsigned gpio,
+ unsigned ngpio, void *c)
+{
+ int sel_a, sel_b, sel_c, ret;
+
+ sel_a = gpio + 7;
+ sel_b = gpio + 6;
+ sel_c = gpio + 5;
+
+ ret = gpio_request(sel_a, "sel_a");
+ if (ret) {
+ pr_warning("Cannot open UI expander pin %d\n", sel_a);
+ goto exp_setup_sela_fail;
+ }
+
+ ret = gpio_request(sel_b, "sel_b");
+ if (ret) {
+ pr_warning("Cannot open UI expander pin %d\n", sel_b);
+ goto exp_setup_selb_fail;
+ }
+
+ ret = gpio_request(sel_c, "sel_c");
+ if (ret) {
+ pr_warning("Cannot open UI expander pin %d\n", sel_c);
+ goto exp_setup_selc_fail;
+ }
+
+ /* deselect all functionalities */
+ gpio_direction_output(sel_a, 1);
+ gpio_direction_output(sel_b, 1);
+ gpio_direction_output(sel_c, 1);
+
+ ui_card_detected = 1;
+ pr_info("DA850/OMAP-L138 EVM UI card detected\n");
+
+ da850_evm_setup_nor_nand();
+
+ da850_evm_setup_emac_rmii(sel_a);
+
+ return 0;
+
+exp_setup_selc_fail:
+ gpio_free(sel_b);
+exp_setup_selb_fail:
+ gpio_free(sel_a);
+exp_setup_sela_fail:
+ return ret;
+}
+
+static int da850_evm_ui_expander_teardown(struct i2c_client *client,
+ unsigned gpio, unsigned ngpio, void *c)
+{
+ /* deselect all functionalities */
+ gpio_set_value(gpio + 5, 1);
+ gpio_set_value(gpio + 6, 1);
+ gpio_set_value(gpio + 7, 1);
+
+ gpio_free(gpio + 5);
+ gpio_free(gpio + 6);
+ gpio_free(gpio + 7);
+
+ return 0;
+}
+
+static struct pca953x_platform_data da850_evm_ui_expander_info = {
+ .gpio_base = DAVINCI_N_GPIO,
+ .setup = da850_evm_ui_expander_setup,
+ .teardown = da850_evm_ui_expander_teardown,
+};
+
static struct i2c_board_info __initdata da850_evm_i2c_devices[] = {
{
I2C_BOARD_INFO("tlv320aic3x", 0x18),
- }
+ },
+ {
+ I2C_BOARD_INFO("tca6416", 0x20),
+ .platform_data = &da850_evm_ui_expander_info,
+ },
};
static struct davinci_i2c_platform_data da850_evm_i2c_0_pdata = {
@@ -158,11 +299,6 @@ static struct davinci_uart_config da850_evm_uart_config __initdata = {
.enabled_uarts = 0x7,
};
-static struct platform_device *da850_evm_devices[] __initdata = {
- &da850_evm_nandflash_device,
- &da850_evm_norflash_device,
-};
-
/* davinci da850 evm audio machine driver */
static u8 da850_iis_serializer_direction[] = {
INACTIVE_MODE, INACTIVE_MODE, INACTIVE_MODE, INACTIVE_MODE,
@@ -198,6 +334,8 @@ static struct davinci_mmc_config da850_mmc_config = {
.get_ro = da850_evm_mmc_get_ro,
.get_cd = da850_evm_mmc_get_cd,
.wires = 4,
+ .max_freq = 50000000,
+ .caps = MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED,
.version = MMC_CTLR_VERSION_2,
};
@@ -233,56 +371,227 @@ static int da850_lcd_hw_init(void)
return 0;
}
-#define DA8XX_AEMIF_CE2CFG_OFFSET 0x10
-#define DA8XX_AEMIF_ASIZE_16BIT 0x1
+/* TPS65070 voltage regulator support */
-static void __init da850_evm_init_nor(void)
-{
- void __iomem *aemif_addr;
+/* 3.3V */
+struct regulator_consumer_supply tps65070_dcdc1_consumers[] = {
+ {
+ .supply = "usb0_vdda33",
+ },
+ {
+ .supply = "usb1_vdda33",
+ },
+};
- aemif_addr = ioremap(DA8XX_AEMIF_CTL_BASE, SZ_32K);
+/* 3.3V or 1.8V */
+struct regulator_consumer_supply tps65070_dcdc2_consumers[] = {
+ {
+ .supply = "dvdd3318_a",
+ },
+ {
+ .supply = "dvdd3318_b",
+ },
+ {
+ .supply = "dvdd3318_c",
+ },
+};
- /* Configure data bus width of CS2 to 16 bit */
- writel(readl(aemif_addr + DA8XX_AEMIF_CE2CFG_OFFSET) |
- DA8XX_AEMIF_ASIZE_16BIT,
- aemif_addr + DA8XX_AEMIF_CE2CFG_OFFSET);
+/* 1.2V */
+struct regulator_consumer_supply tps65070_dcdc3_consumers[] = {
+ {
+ .supply = "cvdd",
+ },
+};
- iounmap(aemif_addr);
-}
+/* 1.8V LDO */
+struct regulator_consumer_supply tps65070_ldo1_consumers[] = {
+ {
+ .supply = "sata_vddr",
+ },
+ {
+ .supply = "usb0_vdda18",
+ },
+ {
+ .supply = "usb1_vdda18",
+ },
+ {
+ .supply = "ddr_dvdd18",
+ },
+};
-#if defined(CONFIG_MTD_PHYSMAP) || \
- defined(CONFIG_MTD_PHYSMAP_MODULE)
-#define HAS_NOR 1
-#else
-#define HAS_NOR 0
-#endif
+/* 1.2V LDO */
+struct regulator_consumer_supply tps65070_ldo2_consumers[] = {
+ {
+ .supply = "sata_vdd",
+ },
+ {
+ .supply = "pll0_vdda",
+ },
+ {
+ .supply = "pll1_vdda",
+ },
+ {
+ .supply = "usbs_cvdd",
+ },
+ {
+ .supply = "vddarnwa1",
+ },
+};
-#if defined(CONFIG_MMC_DAVINCI) || \
- defined(CONFIG_MMC_DAVINCI_MODULE)
-#define HAS_MMC 1
-#else
-#define HAS_MMC 0
-#endif
+struct regulator_init_data tps65070_regulator_data[] = {
+ /* dcdc1 */
+ {
+ .constraints = {
+ .min_uV = 3150000,
+ .max_uV = 3450000,
+ .valid_ops_mask = (REGULATOR_CHANGE_VOLTAGE |
+ REGULATOR_CHANGE_STATUS),
+ .boot_on = 1,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(tps65070_dcdc1_consumers),
+ .consumer_supplies = tps65070_dcdc1_consumers,
+ },
-static __init void da850_evm_init(void)
+ /* dcdc2 */
+ {
+ .constraints = {
+ .min_uV = 1710000,
+ .max_uV = 3450000,
+ .valid_ops_mask = (REGULATOR_CHANGE_VOLTAGE |
+ REGULATOR_CHANGE_STATUS),
+ .boot_on = 1,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(tps65070_dcdc2_consumers),
+ .consumer_supplies = tps65070_dcdc2_consumers,
+ },
+
+ /* dcdc3 */
+ {
+ .constraints = {
+ .min_uV = 950000,
+ .max_uV = 1320000,
+ .valid_ops_mask = (REGULATOR_CHANGE_VOLTAGE |
+ REGULATOR_CHANGE_STATUS),
+ .boot_on = 1,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(tps65070_dcdc3_consumers),
+ .consumer_supplies = tps65070_dcdc3_consumers,
+ },
+
+ /* ldo1 */
+ {
+ .constraints = {
+ .min_uV = 1710000,
+ .max_uV = 1890000,
+ .valid_ops_mask = (REGULATOR_CHANGE_VOLTAGE |
+ REGULATOR_CHANGE_STATUS),
+ .boot_on = 1,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(tps65070_ldo1_consumers),
+ .consumer_supplies = tps65070_ldo1_consumers,
+ },
+
+ /* ldo2 */
+ {
+ .constraints = {
+ .min_uV = 1140000,
+ .max_uV = 1320000,
+ .valid_ops_mask = (REGULATOR_CHANGE_VOLTAGE |
+ REGULATOR_CHANGE_STATUS),
+ .boot_on = 1,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(tps65070_ldo2_consumers),
+ .consumer_supplies = tps65070_ldo2_consumers,
+ },
+};
+
+static struct i2c_board_info __initdata da850evm_tps65070_info[] = {
+ {
+ I2C_BOARD_INFO("tps6507x", 0x48),
+ .platform_data = &tps65070_regulator_data[0],
+ },
+};
+
+static int __init pmic_tps65070_init(void)
{
- struct davinci_soc_info *soc_info = &davinci_soc_info;
+ return i2c_register_board_info(1, da850evm_tps65070_info,
+ ARRAY_SIZE(da850evm_tps65070_info));
+}
+
+static const short da850_evm_lcdc_pins[] = {
+ DA850_GPIO2_8, DA850_GPIO2_15,
+ -1
+};
+
+static int __init da850_evm_config_emac(void)
+{
+ void __iomem *cfg_chip3_base;
int ret;
+ u32 val;
+ struct davinci_soc_info *soc_info = &davinci_soc_info;
+ u8 rmii_en = soc_info->emac_pdata->rmii_en;
+
+ if (!machine_is_davinci_da850_evm())
+ return 0;
+
+ cfg_chip3_base = DA8XX_SYSCFG_VIRT(DA8XX_CFGCHIP3_REG);
+
+ val = __raw_readl(cfg_chip3_base);
+
+ if (rmii_en) {
+ val |= BIT(8);
+ ret = da8xx_pinmux_setup(da850_rmii_pins);
+ pr_info("EMAC: RMII PHY configured, MII PHY will not be"
+ " functional\n");
+ } else {
+ val &= ~BIT(8);
+ ret = da8xx_pinmux_setup(da850_cpgmac_pins);
+ pr_info("EMAC: MII PHY configured, RMII PHY will not be"
+ " functional\n");
+ }
- ret = da8xx_pinmux_setup(da850_nand_pins);
if (ret)
- pr_warning("da850_evm_init: nand mux setup failed: %d\n",
+ pr_warning("da850_evm_init: cpgmac/rmii mux setup failed: %d\n",
ret);
- ret = da8xx_pinmux_setup(da850_nor_pins);
+ /* configure the CFGCHIP3 register for RMII or MII */
+ __raw_writel(val, cfg_chip3_base);
+
+ ret = davinci_cfg_reg(DA850_GPIO2_6);
if (ret)
- pr_warning("da850_evm_init: nor mux setup failed: %d\n",
+ pr_warning("da850_evm_init:GPIO(2,6) mux setup "
+ "failed\n");
+
+ ret = gpio_request(DA850_MII_MDIO_CLKEN_PIN, "mdio_clk_en");
+ if (ret) {
+ pr_warning("Cannot open GPIO %d\n",
+ DA850_MII_MDIO_CLKEN_PIN);
+ return ret;
+ }
+
+ /* Enable/Disable MII MDIO clock */
+ gpio_direction_output(DA850_MII_MDIO_CLKEN_PIN, rmii_en);
+
+ soc_info->emac_pdata->phy_mask = DA850_EVM_PHY_MASK;
+ soc_info->emac_pdata->mdio_max_freq = DA850_EVM_MDIO_FREQUENCY;
+
+ ret = da8xx_register_emac();
+ if (ret)
+ pr_warning("da850_evm_init: emac registration failed: %d\n",
ret);
- da850_evm_init_nor();
+ return 0;
+}
+device_initcall(da850_evm_config_emac);
+
+static __init void da850_evm_init(void)
+{
+ int ret;
- platform_add_devices(da850_evm_devices,
- ARRAY_SIZE(da850_evm_devices));
+ ret = pmic_tps65070_init();
+ if (ret)
+ pr_warning("da850_evm_init: TPS65070 PMIC init failed: %d\n",
+ ret);
ret = da8xx_register_edma();
if (ret)
@@ -299,19 +608,6 @@ static __init void da850_evm_init(void)
pr_warning("da850_evm_init: i2c0 registration failed: %d\n",
ret);
- soc_info->emac_pdata->phy_mask = DA850_EVM_PHY_MASK;
- soc_info->emac_pdata->mdio_max_freq = DA850_EVM_MDIO_FREQUENCY;
- soc_info->emac_pdata->rmii_en = 0;
-
- ret = da8xx_pinmux_setup(da850_cpgmac_pins);
- if (ret)
- pr_warning("da850_evm_init: cpgmac mux setup failed: %d\n",
- ret);
-
- ret = da8xx_register_emac();
- if (ret)
- pr_warning("da850_evm_init: emac registration failed: %d\n",
- ret);
ret = da8xx_register_watchdog();
if (ret)
@@ -319,11 +615,6 @@ static __init void da850_evm_init(void)
ret);
if (HAS_MMC) {
- if (HAS_NOR)
- pr_warning("WARNING: both NOR Flash and MMC/SD are "
- "enabled, but they share AEMIF pins.\n"
- "\tDisable one of them.\n");
-
ret = da8xx_pinmux_setup(da850_mmcsd0_pins);
if (ret)
pr_warning("da850_evm_init: mmcsd0 mux setup failed:"
@@ -365,22 +656,42 @@ static __init void da850_evm_init(void)
pr_warning("da850_evm_init: mcasp mux setup failed: %d\n",
ret);
- da8xx_init_mcasp(0, &da850_evm_snd_data);
+ da8xx_register_mcasp(0, &da850_evm_snd_data);
ret = da8xx_pinmux_setup(da850_lcdcntl_pins);
if (ret)
pr_warning("da850_evm_init: lcdcntl mux setup failed: %d\n",
ret);
+ /* Handle board specific muxing for LCD here */
+ ret = da8xx_pinmux_setup(da850_evm_lcdc_pins);
+ if (ret)
+ pr_warning("da850_evm_init: evm specific lcd mux setup "
+ "failed: %d\n", ret);
+
ret = da850_lcd_hw_init();
if (ret)
pr_warning("da850_evm_init: lcd initialization failed: %d\n",
ret);
- ret = da8xx_register_lcdc();
+ ret = da8xx_register_lcdc(&sharp_lk043t1dg01_pdata);
if (ret)
pr_warning("da850_evm_init: lcdc registration failed: %d\n",
ret);
+
+ ret = da8xx_register_rtc();
+ if (ret)
+ pr_warning("da850_evm_init: rtc setup failed: %d\n", ret);
+
+ ret = da850_register_cpufreq();
+ if (ret)
+ pr_warning("da850_evm_init: cpufreq registration failed: %d\n",
+ ret);
+
+ ret = da8xx_register_cpuidle();
+ if (ret)
+ pr_warning("da850_evm_init: cpuidle registration failed: %d\n",
+ ret);
}
#ifdef CONFIG_SERIAL_8250_CONSOLE
diff --git a/arch/arm/mach-davinci/board-dm355-evm.c b/arch/arm/mach-davinci/board-dm355-evm.c
index 77e80679882..a9b650dcc17 100644
--- a/arch/arm/mach-davinci/board-dm355-evm.c
+++ b/arch/arm/mach-davinci/board-dm355-evm.c
@@ -9,15 +9,13 @@
* or implied.
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/init.h>
-#include <linux/dma-mapping.h>
+#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/nand.h>
#include <linux/i2c.h>
-#include <linux/io.h>
#include <linux/gpio.h>
#include <linux/clk.h>
#include <linux/videodev2.h>
@@ -25,20 +23,15 @@
#include <linux/spi/spi.h>
#include <linux/spi/eeprom.h>
-#include <asm/setup.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-#include <asm/mach/flash.h>
-#include <mach/hardware.h>
#include <mach/dm355.h>
-#include <mach/psc.h>
-#include <mach/common.h>
#include <mach/i2c.h>
#include <mach/serial.h>
#include <mach/nand.h>
#include <mach/mmc.h>
+#include <mach/usb.h>
#define DAVINCI_ASYNC_EMIF_CONTROL_BASE 0x01e10000
#define DAVINCI_ASYNC_EMIF_DATA_CE0_BASE 0x02000000
@@ -86,8 +79,9 @@ static struct davinci_nand_pdata davinci_nand_data = {
.mask_chipsel = BIT(14),
.parts = davinci_nand_partitions,
.nr_parts = ARRAY_SIZE(davinci_nand_partitions),
- .ecc_mode = NAND_ECC_HW_SYNDROME,
+ .ecc_mode = NAND_ECC_HW,
.options = NAND_USE_FLASH_BBT,
+ .ecc_bits = 4,
};
static struct resource davinci_nand_resources[] = {
@@ -344,7 +338,7 @@ static __init void dm355_evm_init(void)
gpio_request(2, "usb_id_toggle");
gpio_direction_output(2, USB_ID_VALUE);
/* irlml6401 switches over 1A in under 8 msec */
- setup_usb(500, 8);
+ davinci_setup_usb(1000, 8);
davinci_setup_mmc(0, &dm355evm_mmc_config);
davinci_setup_mmc(1, &dm355evm_mmc_config);
diff --git a/arch/arm/mach-davinci/board-dm355-leopard.c b/arch/arm/mach-davinci/board-dm355-leopard.c
index 84ad5d161a8..21f32eb41e8 100644
--- a/arch/arm/mach-davinci/board-dm355-leopard.c
+++ b/arch/arm/mach-davinci/board-dm355-leopard.c
@@ -8,34 +8,27 @@
* warranty of any kind, whether express or implied.
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/init.h>
-#include <linux/dma-mapping.h>
+#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/nand.h>
#include <linux/i2c.h>
-#include <linux/io.h>
#include <linux/gpio.h>
#include <linux/clk.h>
#include <linux/spi/spi.h>
#include <linux/spi/eeprom.h>
-#include <asm/setup.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-#include <asm/mach/flash.h>
-#include <mach/hardware.h>
#include <mach/dm355.h>
-#include <mach/psc.h>
-#include <mach/common.h>
#include <mach/i2c.h>
#include <mach/serial.h>
#include <mach/nand.h>
#include <mach/mmc.h>
+#include <mach/usb.h>
#define DAVINCI_ASYNC_EMIF_CONTROL_BASE 0x01e10000
#define DAVINCI_ASYNC_EMIF_DATA_CE0_BASE 0x02000000
@@ -270,7 +263,7 @@ static __init void dm355_leopard_init(void)
gpio_request(2, "usb_id_toggle");
gpio_direction_output(2, USB_ID_VALUE);
/* irlml6401 switches over 1A in under 8 msec */
- setup_usb(500, 8);
+ davinci_setup_usb(1000, 8);
davinci_setup_mmc(0, &dm355leopard_mmc_config);
davinci_setup_mmc(1, &dm355leopard_mmc_config);
diff --git a/arch/arm/mach-davinci/board-dm365-evm.c b/arch/arm/mach-davinci/board-dm365-evm.c
index 52dd8046b30..289fe1b7d25 100644
--- a/arch/arm/mach-davinci/board-dm365-evm.c
+++ b/arch/arm/mach-davinci/board-dm365-evm.c
@@ -13,9 +13,8 @@
* GNU General Public License for more details.
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/init.h>
-#include <linux/dma-mapping.h>
+#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/io.h>
#include <linux/clk.h>
@@ -24,20 +23,19 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/nand.h>
-#include <asm/setup.h>
+#include <linux/input.h>
+
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
+
#include <mach/mux.h>
-#include <mach/hardware.h>
#include <mach/dm365.h>
-#include <mach/psc.h>
#include <mach/common.h>
#include <mach/i2c.h>
#include <mach/serial.h>
#include <mach/mmc.h>
#include <mach/nand.h>
-
+#include <mach/keyscan.h>
static inline int have_imager(void)
{
@@ -144,6 +142,7 @@ static struct davinci_nand_pdata davinci_nand_data = {
.nr_parts = ARRAY_SIZE(davinci_nand_partitions),
.ecc_mode = NAND_ECC_HW,
.options = NAND_USE_FLASH_BBT,
+ .ecc_bits = 4,
};
static struct resource davinci_nand_resources[] = {
@@ -176,11 +175,16 @@ static struct at24_platform_data eeprom_info = {
.context = (void *)0x7f00,
};
+static struct snd_platform_data dm365_evm_snd_data;
+
static struct i2c_board_info i2c_info[] = {
{
I2C_BOARD_INFO("24c256", 0x50),
.platform_data = &eeprom_info,
},
+ {
+ I2C_BOARD_INFO("tlv320aic3x", 0x18),
+ },
};
static struct davinci_i2c_platform_data i2c_pdata = {
@@ -188,6 +192,38 @@ static struct davinci_i2c_platform_data i2c_pdata = {
.bus_delay = 0 /* usec */,
};
+#ifdef CONFIG_KEYBOARD_DAVINCI
+static unsigned short dm365evm_keymap[] = {
+ KEY_KP2,
+ KEY_LEFT,
+ KEY_EXIT,
+ KEY_DOWN,
+ KEY_ENTER,
+ KEY_UP,
+ KEY_KP1,
+ KEY_RIGHT,
+ KEY_MENU,
+ KEY_RECORD,
+ KEY_REWIND,
+ KEY_KPMINUS,
+ KEY_STOP,
+ KEY_FASTFORWARD,
+ KEY_KPPLUS,
+ KEY_PLAYPAUSE,
+ 0
+};
+
+static struct davinci_ks_platform_data dm365evm_ks_data = {
+ .keymap = dm365evm_keymap,
+ .keymapsize = ARRAY_SIZE(dm365evm_keymap),
+ .rep = 1,
+ /* Scan period = strobe + interval */
+ .strobe = 0x5,
+ .interval = 0x2,
+ .matrix_type = DAVINCI_KEYSCAN_MATRIX_4X4,
+};
+#endif
+
static int cpld_mmc_get_cd(int module)
{
if (!cpld)
@@ -472,6 +508,13 @@ static __init void dm365_evm_init(void)
/* maybe setup mmc1/etc ... _after_ mmc0 */
evm_init_cpld();
+
+ dm365_init_asp(&dm365_evm_snd_data);
+ dm365_init_rtc();
+
+#ifdef CONFIG_KEYBOARD_DAVINCI
+ dm365_init_ks(&dm365evm_ks_data);
+#endif
}
static __init void dm365_evm_irq_init(void)
diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c
index 1213a0087ad..fd0398bc6db 100644
--- a/arch/arm/mach-davinci/board-dm644x-evm.c
+++ b/arch/arm/mach-davinci/board-dm644x-evm.c
@@ -9,45 +9,34 @@
* or implied.
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/init.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
-#include <linux/leds.h>
-#include <linux/memory.h>
-
#include <linux/i2c.h>
#include <linux/i2c/pcf857x.h>
#include <linux/i2c/at24.h>
-#include <linux/etherdevice.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h>
-#include <linux/io.h>
#include <linux/phy.h>
#include <linux/clk.h>
#include <linux/videodev2.h>
#include <media/tvp514x.h>
-#include <asm/setup.h>
#include <asm/mach-types.h>
-
#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-#include <asm/mach/flash.h>
#include <mach/dm644x.h>
#include <mach/common.h>
#include <mach/i2c.h>
#include <mach/serial.h>
#include <mach/mux.h>
-#include <mach/psc.h>
#include <mach/nand.h>
#include <mach/mmc.h>
-#include <mach/emac.h>
+#include <mach/usb.h>
#define DM644X_EVM_PHY_MASK (0x2)
#define DM644X_EVM_MDIO_FREQUENCY (2200000) /* PHY bus frequency */
@@ -477,7 +466,7 @@ evm_u35_setup(struct i2c_client *client, int gpio, unsigned ngpio, void *c)
/* irlml6401 switches over 1A, in under 8 msec;
* now it can be managed by nDRV_VBUS ...
*/
- setup_usb(500, 8);
+ davinci_setup_usb(1000, 8);
return 0;
}
diff --git a/arch/arm/mach-davinci/board-dm646x-evm.c b/arch/arm/mach-davinci/board-dm646x-evm.c
index 24e0e13b149..8d0b0e01c59 100644
--- a/arch/arm/mach-davinci/board-dm646x-evm.c
+++ b/arch/arm/mach-davinci/board-dm646x-evm.c
@@ -17,38 +17,28 @@
**************************************************************************/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/init.h>
-#include <linux/fs.h>
-#include <linux/major.h>
-#include <linux/root_dev.h>
-#include <linux/dma-mapping.h>
-#include <linux/serial.h>
-#include <linux/serial_8250.h>
#include <linux/leds.h>
#include <linux/gpio.h>
-#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/i2c.h>
#include <linux/i2c/at24.h>
#include <linux/i2c/pcf857x.h>
-#include <linux/etherdevice.h>
#include <media/tvp514x.h>
-#include <asm/setup.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-#include <asm/mach/flash.h>
#include <mach/dm646x.h>
#include <mach/common.h>
-#include <mach/psc.h>
#include <mach/serial.h>
#include <mach/i2c.h>
-#include <mach/mmc.h>
-#include <mach/emac.h>
+#include <mach/nand.h>
#if defined(CONFIG_BLK_DEV_PALMCHIP_BK3710) || \
defined(CONFIG_BLK_DEV_PALMCHIP_BK3710_MODULE)
@@ -57,6 +47,11 @@
#define HAS_ATA 0
#endif
+#define DAVINCI_ASYNC_EMIF_CONTROL_BASE 0x20008000
+#define DAVINCI_ASYNC_EMIF_DATA_CE0_BASE 0x42000000
+
+#define NAND_BLOCK_SIZE SZ_128K
+
/* CPLD Register 0 bits to control ATA */
#define DM646X_EVM_ATA_RST BIT(0)
#define DM646X_EVM_ATA_PWD BIT(1)
@@ -92,6 +87,63 @@ static struct davinci_uart_config uart_config __initdata = {
.enabled_uarts = (1 << 0),
};
+/* Note: We are setting first partition as 'bootloader' constituting UBL, U-Boot
+ * and U-Boot environment this avoids dependency on any particular combination
+ * of UBL, U-Boot or flashing tools etc.
+ */
+static struct mtd_partition davinci_nand_partitions[] = {
+ {
+ /* UBL, U-Boot with environment */
+ .name = "bootloader",
+ .offset = MTDPART_OFS_APPEND,
+ .size = 16 * NAND_BLOCK_SIZE,
+ .mask_flags = MTD_WRITEABLE, /* force read-only */
+ }, {
+ .name = "kernel",
+ .offset = MTDPART_OFS_APPEND,
+ .size = SZ_4M,
+ .mask_flags = 0,
+ }, {
+ .name = "filesystem",
+ .offset = MTDPART_OFS_APPEND,
+ .size = MTDPART_SIZ_FULL,
+ .mask_flags = 0,
+ }
+};
+
+static struct davinci_nand_pdata davinci_nand_data = {
+ .mask_cle = 0x80000,
+ .mask_ale = 0x40000,
+ .parts = davinci_nand_partitions,
+ .nr_parts = ARRAY_SIZE(davinci_nand_partitions),
+ .ecc_mode = NAND_ECC_HW,
+ .options = 0,
+};
+
+static struct resource davinci_nand_resources[] = {
+ {
+ .start = DAVINCI_ASYNC_EMIF_DATA_CE0_BASE,
+ .end = DAVINCI_ASYNC_EMIF_DATA_CE0_BASE + SZ_32M - 1,
+ .flags = IORESOURCE_MEM,
+ }, {
+ .start = DAVINCI_ASYNC_EMIF_CONTROL_BASE,
+ .end = DAVINCI_ASYNC_EMIF_CONTROL_BASE + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device davinci_nand_device = {
+ .name = "davinci_nand",
+ .id = 0,
+
+ .num_resources = ARRAY_SIZE(davinci_nand_resources),
+ .resource = davinci_nand_resources,
+
+ .dev = {
+ .platform_data = &davinci_nand_data,
+ },
+};
+
/* CPLD Register 0 Client: used for I/O Control */
static int cpld_reg0_probe(struct i2c_client *client,
const struct i2c_device_id *id)
@@ -142,7 +194,7 @@ static struct gpio_led evm_leds[] = {
{ .name = "DS4", .active_low = 1, },
};
-static __initconst struct gpio_led_platform_data evm_led_data = {
+static const struct gpio_led_platform_data evm_led_data = {
.num_leds = ARRAY_SIZE(evm_leds),
.leds = evm_leds,
};
@@ -647,6 +699,8 @@ static __init void evm_init(void)
dm646x_init_mcasp0(&dm646x_evm_snd_data[0]);
dm646x_init_mcasp1(&dm646x_evm_snd_data[1]);
+ platform_device_register(&davinci_nand_device);
+
if (HAS_ATA)
dm646x_init_ide();
diff --git a/arch/arm/mach-davinci/board-neuros-osd2.c b/arch/arm/mach-davinci/board-neuros-osd2.c
new file mode 100644
index 00000000000..bd9ca079b69
--- /dev/null
+++ b/arch/arm/mach-davinci/board-neuros-osd2.c
@@ -0,0 +1,323 @@
+/*
+ * Neuros Technologies OSD2 board support
+ *
+ * Modified from original 644X-EVM board support.
+ * 2008 (c) Neuros Technology, LLC.
+ * 2009 (c) Jorge Luis Zapata Muga <jorgeluis.zapata@gmail.com>
+ * 2009 (c) Andrey A. Porodko <Andrey.Porodko@gmail.com>
+ *
+ * The Neuros OSD 2.0 is the hardware component of the Neuros Open
+ * Internet Television Platform. Hardware is very close to TI
+ * DM644X-EVM board. It has:
+ * DM6446M02 module with 256MB NAND, 256MB RAM, TLV320AIC32 AIC,
+ * USB, Ethernet, SD/MMC, UART, THS8200, TVP7000 for video.
+ * Additionaly realtime clock, IR remote control receiver,
+ * IR Blaster based on MSP430 (firmware although is different
+ * from used in DM644X-EVM), internal ATA-6 3.5” HDD drive
+ * with PATA interface, two muxed red-green leds.
+ *
+ * For more information please refer to
+ * http://wiki.neurostechnology.com/index.php/OSD_2.0_HD
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/mtd/partitions.h>
+
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+
+#include <mach/dm644x.h>
+#include <mach/i2c.h>
+#include <mach/serial.h>
+#include <mach/mux.h>
+#include <mach/nand.h>
+#include <mach/mmc.h>
+#include <mach/usb.h>
+
+#define NEUROS_OSD2_PHY_MASK 0x2
+#define NEUROS_OSD2_MDIO_FREQUENCY 2200000 /* PHY bus frequency */
+
+#define DAVINCI_CFC_ATA_BASE 0x01C66000
+
+#define DAVINCI_ASYNC_EMIF_CONTROL_BASE 0x01e00000
+#define DAVINCI_ASYNC_EMIF_DATA_CE0_BASE 0x02000000
+
+#define LXT971_PHY_ID 0x001378e2
+#define LXT971_PHY_MASK 0xfffffff0
+
+#define NTOSD2_AUDIOSOC_I2C_ADDR 0x18
+#define NTOSD2_MSP430_I2C_ADDR 0x59
+#define NTOSD2_MSP430_IRQ 2
+
+/* Neuros OSD2 has a Samsung 256 MByte NAND flash (Dev ID of 0xAA,
+ * 2048 blocks in the device, 64 pages per block, 2048 bytes per
+ * page.
+ */
+
+#define NAND_BLOCK_SIZE SZ_128K
+
+struct mtd_partition davinci_ntosd2_nandflash_partition[] = {
+ {
+ /* UBL (a few copies) plus U-Boot */
+ .name = "bootloader",
+ .offset = 0,
+ .size = 15 * NAND_BLOCK_SIZE,
+ .mask_flags = MTD_WRITEABLE, /* force read-only */
+ }, {
+ /* U-Boot environment */
+ .name = "params",
+ .offset = MTDPART_OFS_APPEND,
+ .size = 1 * NAND_BLOCK_SIZE,
+ .mask_flags = 0,
+ }, {
+ /* Kernel */
+ .name = "kernel",
+ .offset = MTDPART_OFS_APPEND,
+ .size = SZ_4M,
+ .mask_flags = 0,
+ }, {
+ /* File System */
+ .name = "filesystem",
+ .offset = MTDPART_OFS_APPEND,
+ .size = MTDPART_SIZ_FULL,
+ .mask_flags = 0,
+ }
+ /* A few blocks at end hold a flash Bad Block Table. */
+};
+
+static struct davinci_nand_pdata davinci_ntosd2_nandflash_data = {
+ .parts = davinci_ntosd2_nandflash_partition,
+ .nr_parts = ARRAY_SIZE(davinci_ntosd2_nandflash_partition),
+ .ecc_mode = NAND_ECC_HW,
+ .options = NAND_USE_FLASH_BBT,
+};
+
+static struct resource davinci_ntosd2_nandflash_resource[] = {
+ {
+ .start = DAVINCI_ASYNC_EMIF_DATA_CE0_BASE,
+ .end = DAVINCI_ASYNC_EMIF_DATA_CE0_BASE + SZ_16M - 1,
+ .flags = IORESOURCE_MEM,
+ }, {
+ .start = DAVINCI_ASYNC_EMIF_CONTROL_BASE,
+ .end = DAVINCI_ASYNC_EMIF_CONTROL_BASE + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device davinci_ntosd2_nandflash_device = {
+ .name = "davinci_nand",
+ .id = 0,
+ .dev = {
+ .platform_data = &davinci_ntosd2_nandflash_data,
+ },
+ .num_resources = ARRAY_SIZE(davinci_ntosd2_nandflash_resource),
+ .resource = davinci_ntosd2_nandflash_resource,
+};
+
+static u64 davinci_fb_dma_mask = DMA_BIT_MASK(32);
+
+static struct platform_device davinci_fb_device = {
+ .name = "davincifb",
+ .id = -1,
+ .dev = {
+ .dma_mask = &davinci_fb_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+ .num_resources = 0,
+};
+
+static struct resource ide_resources[] = {
+ {
+ .start = DAVINCI_CFC_ATA_BASE,
+ .end = DAVINCI_CFC_ATA_BASE + 0x7ff,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = IRQ_IDE,
+ .end = IRQ_IDE,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static u64 ide_dma_mask = DMA_BIT_MASK(32);
+
+static struct platform_device ide_dev = {
+ .name = "palm_bk3710",
+ .id = -1,
+ .resource = ide_resources,
+ .num_resources = ARRAY_SIZE(ide_resources),
+ .dev = {
+ .dma_mask = &ide_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+};
+
+static struct snd_platform_data dm644x_ntosd2_snd_data;
+
+static struct gpio_led ntosd2_leds[] = {
+ { .name = "led1_green", .gpio = GPIO(10), },
+ { .name = "led1_red", .gpio = GPIO(11), },
+ { .name = "led2_green", .gpio = GPIO(12), },
+ { .name = "led2_red", .gpio = GPIO(13), },
+};
+
+static struct gpio_led_platform_data ntosd2_leds_data = {
+ .num_leds = ARRAY_SIZE(ntosd2_leds),
+ .leds = ntosd2_leds,
+};
+
+static struct platform_device ntosd2_leds_dev = {
+ .name = "leds-gpio",
+ .id = -1,
+ .dev = {
+ .platform_data = &ntosd2_leds_data,
+ },
+};
+
+
+static struct platform_device *davinci_ntosd2_devices[] __initdata = {
+ &davinci_fb_device,
+ &ntosd2_leds_dev,
+};
+
+static struct davinci_uart_config uart_config __initdata = {
+ .enabled_uarts = (1 << 0),
+};
+
+static void __init davinci_ntosd2_map_io(void)
+{
+ dm644x_init();
+}
+
+/*
+ I2C initialization
+*/
+static struct davinci_i2c_platform_data ntosd2_i2c_pdata = {
+ .bus_freq = 20 /* kHz */,
+ .bus_delay = 100 /* usec */,
+};
+
+static struct i2c_board_info __initdata ntosd2_i2c_info[] = {
+};
+
+static int ntosd2_init_i2c(void)
+{
+ int status;
+
+ davinci_init_i2c(&ntosd2_i2c_pdata);
+ status = gpio_request(NTOSD2_MSP430_IRQ, ntosd2_i2c_info[0].type);
+ if (status == 0) {
+ status = gpio_direction_input(NTOSD2_MSP430_IRQ);
+ if (status == 0) {
+ status = gpio_to_irq(NTOSD2_MSP430_IRQ);
+ if (status > 0) {
+ ntosd2_i2c_info[0].irq = status;
+ i2c_register_board_info(1,
+ ntosd2_i2c_info,
+ ARRAY_SIZE(ntosd2_i2c_info));
+ }
+ }
+ }
+ return status;
+}
+
+static struct davinci_mmc_config davinci_ntosd2_mmc_config = {
+ .wires = 4,
+ .version = MMC_CTLR_VERSION_1
+};
+
+
+#if defined(CONFIG_BLK_DEV_PALMCHIP_BK3710) || \
+ defined(CONFIG_BLK_DEV_PALMCHIP_BK3710_MODULE)
+#define HAS_ATA 1
+#else
+#define HAS_ATA 0
+#endif
+
+#if defined(CONFIG_MTD_NAND_DAVINCI) || \
+ defined(CONFIG_MTD_NAND_DAVINCI_MODULE)
+#define HAS_NAND 1
+#else
+#define HAS_NAND 0
+#endif
+
+static __init void davinci_ntosd2_init(void)
+{
+ struct clk *aemif_clk;
+ struct davinci_soc_info *soc_info = &davinci_soc_info;
+ int status;
+
+ aemif_clk = clk_get(NULL, "aemif");
+ clk_enable(aemif_clk);
+
+ if (HAS_ATA) {
+ if (HAS_NAND)
+ pr_warning("WARNING: both IDE and Flash are "
+ "enabled, but they share AEMIF pins.\n"
+ "\tDisable IDE for NAND/NOR support.\n");
+ davinci_cfg_reg(DM644X_HPIEN_DISABLE);
+ davinci_cfg_reg(DM644X_ATAEN);
+ davinci_cfg_reg(DM644X_HDIREN);
+ platform_device_register(&ide_dev);
+ } else if (HAS_NAND) {
+ davinci_cfg_reg(DM644X_HPIEN_DISABLE);
+ davinci_cfg_reg(DM644X_ATAEN_DISABLE);
+
+ /* only one device will be jumpered and detected */
+ if (HAS_NAND)
+ platform_device_register(
+ &davinci_ntosd2_nandflash_device);
+ }
+
+ platform_add_devices(davinci_ntosd2_devices,
+ ARRAY_SIZE(davinci_ntosd2_devices));
+
+ /* Initialize I2C interface specific for this board */
+ status = ntosd2_init_i2c();
+ if (status < 0)
+ pr_warning("davinci_ntosd2_init: msp430 irq setup failed:"
+ " %d\n", status);
+
+ davinci_serial_init(&uart_config);
+ dm644x_init_asp(&dm644x_ntosd2_snd_data);
+
+ soc_info->emac_pdata->phy_mask = NEUROS_OSD2_PHY_MASK;
+ soc_info->emac_pdata->mdio_max_freq = NEUROS_OSD2_MDIO_FREQUENCY;
+
+ davinci_setup_usb(1000, 8);
+ /*
+ * Mux the pins to be GPIOs, VLYNQEN is already done at startup.
+ * The AEAWx are five new AEAW pins that can be muxed by separately.
+ * They are a bitmask for GPIO management. According TI
+ * documentation (http://www.ti.com/lit/gpn/tms320dm6446) to employ
+ * gpio(10,11,12,13) for leds any combination of bits works except
+ * four last. So we are to reset all five.
+ */
+ davinci_cfg_reg(DM644X_AEAW0);
+ davinci_cfg_reg(DM644X_AEAW1);
+ davinci_cfg_reg(DM644X_AEAW2);
+ davinci_cfg_reg(DM644X_AEAW3);
+ davinci_cfg_reg(DM644X_AEAW4);
+
+ davinci_setup_mmc(0, &davinci_ntosd2_mmc_config);
+}
+
+static __init void davinci_ntosd2_irq_init(void)
+{
+ davinci_irq_init();
+}
+
+MACHINE_START(NEUROS_OSD2, "Neuros OSD2")
+ /* Maintainer: Neuros Technologies <neuros@groups.google.com> */
+ .phys_io = IO_PHYS,
+ .io_pg_offst = (__IO_ADDRESS(IO_PHYS) >> 18) & 0xfffc,
+ .boot_params = (DAVINCI_DDR_BASE + 0x100),
+ .map_io = davinci_ntosd2_map_io,
+ .init_irq = davinci_ntosd2_irq_init,
+ .timer = &davinci_timer,
+ .init_machine = davinci_ntosd2_init,
+MACHINE_END
diff --git a/arch/arm/mach-davinci/board-sffsdr.c b/arch/arm/mach-davinci/board-sffsdr.c
index 7acdfd8ac07..08d373bfcc8 100644
--- a/arch/arm/mach-davinci/board-sffsdr.c
+++ b/arch/arm/mach-davinci/board-sffsdr.c
@@ -23,35 +23,24 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/init.h>
-#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
-#include <linux/gpio.h>
-
#include <linux/i2c.h>
#include <linux/i2c/at24.h>
-#include <linux/etherdevice.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
-#include <linux/mtd/physmap.h>
-#include <linux/io.h>
-#include <asm/setup.h>
#include <asm/mach-types.h>
-
#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
#include <asm/mach/flash.h>
#include <mach/dm644x.h>
#include <mach/common.h>
#include <mach/i2c.h>
#include <mach/serial.h>
-#include <mach/psc.h>
#include <mach/mux.h>
+#include <mach/usb.h>
#define SFFSDR_PHY_MASK (0x2)
#define SFFSDR_MDIO_FREQUENCY (2200000) /* PHY bus frequency */
@@ -107,11 +96,6 @@ static struct platform_device davinci_sffsdr_nandflash_device = {
.resource = davinci_sffsdr_nandflash_resource,
};
-static struct emac_platform_data sffsdr_emac_pdata = {
- .phy_mask = SFFSDR_PHY_MASK,
- .mdio_max_freq = SFFSDR_MDIO_FREQUENCY,
-};
-
static struct at24_platform_data eeprom_info = {
.byte_len = (64*1024) / 8,
.page_size = 32,
@@ -164,7 +148,7 @@ static __init void davinci_sffsdr_init(void)
davinci_serial_init(&uart_config);
soc_info->emac_pdata->phy_mask = SFFSDR_PHY_MASK;
soc_info->emac_pdata->mdio_max_freq = SFFSDR_MDIO_FREQUENCY;
- setup_usb(0, 0); /* We support only peripheral mode. */
+ davinci_setup_usb(0, 0); /* We support only peripheral mode. */
/* mux VLYNQ pins */
davinci_cfg_reg(DM644X_VLYNQEN);
diff --git a/arch/arm/mach-davinci/clock.c b/arch/arm/mach-davinci/clock.c
index 83d54d50b5e..baece65cb9c 100644
--- a/arch/arm/mach-davinci/clock.c
+++ b/arch/arm/mach-davinci/clock.c
@@ -17,8 +17,8 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/mutex.h>
-#include <linux/platform_device.h>
#include <linux/io.h>
+#include <linux/delay.h>
#include <mach/hardware.h>
@@ -42,8 +42,7 @@ static void __clk_enable(struct clk *clk)
if (clk->parent)
__clk_enable(clk->parent);
if (clk->usecount++ == 0 && (clk->flags & CLK_PSC))
- davinci_psc_config(psc_domain(clk), clk->psc_ctlr,
- clk->lpsc, 1);
+ davinci_psc_config(psc_domain(clk), clk->gpsc, clk->lpsc, 1);
}
static void __clk_disable(struct clk *clk)
@@ -51,8 +50,7 @@ static void __clk_disable(struct clk *clk)
if (WARN_ON(clk->usecount == 0))
return;
if (--clk->usecount == 0 && !(clk->flags & CLK_PLL))
- davinci_psc_config(psc_domain(clk), clk->psc_ctlr,
- clk->lpsc, 0);
+ davinci_psc_config(psc_domain(clk), clk->gpsc, clk->lpsc, 0);
if (clk->parent)
__clk_disable(clk->parent);
}
@@ -99,20 +97,74 @@ long clk_round_rate(struct clk *clk, unsigned long rate)
if (clk == NULL || IS_ERR(clk))
return -EINVAL;
+ if (clk->round_rate)
+ return clk->round_rate(clk, rate);
+
return clk->rate;
}
EXPORT_SYMBOL(clk_round_rate);
+/* Propagate rate to children */
+static void propagate_rate(struct clk *root)
+{
+ struct clk *clk;
+
+ list_for_each_entry(clk, &root->children, childnode) {
+ if (clk->recalc)
+ clk->rate = clk->recalc(clk);
+ propagate_rate(clk);
+ }
+}
+
int clk_set_rate(struct clk *clk, unsigned long rate)
{
+ unsigned long flags;
+ int ret = -EINVAL;
+
if (clk == NULL || IS_ERR(clk))
- return -EINVAL;
+ return ret;
- /* changing the clk rate is not supported */
- return -EINVAL;
+ spin_lock_irqsave(&clockfw_lock, flags);
+ if (clk->set_rate)
+ ret = clk->set_rate(clk, rate);
+ if (ret == 0) {
+ if (clk->recalc)
+ clk->rate = clk->recalc(clk);
+ propagate_rate(clk);
+ }
+ spin_unlock_irqrestore(&clockfw_lock, flags);
+
+ return ret;
}
EXPORT_SYMBOL(clk_set_rate);
+int clk_set_parent(struct clk *clk, struct clk *parent)
+{
+ unsigned long flags;
+
+ if (clk == NULL || IS_ERR(clk))
+ return -EINVAL;
+
+ /* Cannot change parent on enabled clock */
+ if (WARN_ON(clk->usecount))
+ return -EINVAL;
+
+ mutex_lock(&clocks_mutex);
+ clk->parent = parent;
+ list_del_init(&clk->childnode);
+ list_add(&clk->childnode, &clk->parent->children);
+ mutex_unlock(&clocks_mutex);
+
+ spin_lock_irqsave(&clockfw_lock, flags);
+ if (clk->recalc)
+ clk->rate = clk->recalc(clk);
+ propagate_rate(clk);
+ spin_unlock_irqrestore(&clockfw_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(clk_set_parent);
+
int clk_register(struct clk *clk)
{
if (clk == NULL || IS_ERR(clk))
@@ -123,16 +175,24 @@ int clk_register(struct clk *clk)
clk->name, clk->parent->name))
return -EINVAL;
+ INIT_LIST_HEAD(&clk->children);
+
mutex_lock(&clocks_mutex);
list_add_tail(&clk->node, &clocks);
+ if (clk->parent)
+ list_add_tail(&clk->childnode, &clk->parent->children);
mutex_unlock(&clocks_mutex);
/* If rate is already set, use it */
if (clk->rate)
return 0;
+ /* Else, see if there is a way to calculate it */
+ if (clk->recalc)
+ clk->rate = clk->recalc(clk);
+
/* Otherwise, default to parent rate */
- if (clk->parent)
+ else if (clk->parent)
clk->rate = clk->parent->rate;
return 0;
@@ -146,6 +206,7 @@ void clk_unregister(struct clk *clk)
mutex_lock(&clocks_mutex);
list_del(&clk->node);
+ list_del(&clk->childnode);
mutex_unlock(&clocks_mutex);
}
EXPORT_SYMBOL(clk_unregister);
@@ -166,11 +227,11 @@ static int __init clk_disable_unused(void)
continue;
/* ignore if in Disabled or SwRstDisable states */
- if (!davinci_psc_is_clk_active(ck->psc_ctlr, ck->lpsc))
+ if (!davinci_psc_is_clk_active(ck->gpsc, ck->lpsc))
continue;
pr_info("Clocks: disable unused %s\n", ck->name);
- davinci_psc_config(psc_domain(ck), ck->psc_ctlr, ck->lpsc, 0);
+ davinci_psc_config(psc_domain(ck), ck->gpsc, ck->lpsc, 0);
}
spin_unlock_irq(&clockfw_lock);
@@ -179,50 +240,62 @@ static int __init clk_disable_unused(void)
late_initcall(clk_disable_unused);
#endif
-static void clk_sysclk_recalc(struct clk *clk)
+static unsigned long clk_sysclk_recalc(struct clk *clk)
{
u32 v, plldiv;
struct pll_data *pll;
+ unsigned long rate = clk->rate;
/* If this is the PLL base clock, no more calculations needed */
if (clk->pll_data)
- return;
+ return rate;
if (WARN_ON(!clk->parent))
- return;
+ return rate;
- clk->rate = clk->parent->rate;
+ rate = clk->parent->rate;
/* Otherwise, the parent must be a PLL */
if (WARN_ON(!clk->parent->pll_data))
- return;
+ return rate;
pll = clk->parent->pll_data;
/* If pre-PLL, source clock is before the multiplier and divider(s) */
if (clk->flags & PRE_PLL)
- clk->rate = pll->input_rate;
+ rate = pll->input_rate;
if (!clk->div_reg)
- return;
+ return rate;
v = __raw_readl(pll->base + clk->div_reg);
if (v & PLLDIV_EN) {
plldiv = (v & PLLDIV_RATIO_MASK) + 1;
if (plldiv)
- clk->rate /= plldiv;
+ rate /= plldiv;
}
+
+ return rate;
+}
+
+static unsigned long clk_leafclk_recalc(struct clk *clk)
+{
+ if (WARN_ON(!clk->parent))
+ return clk->rate;
+
+ return clk->parent->rate;
}
-static void __init clk_pll_init(struct clk *clk)
+static unsigned long clk_pllclk_recalc(struct clk *clk)
{
u32 ctrl, mult = 1, prediv = 1, postdiv = 1;
u8 bypass;
struct pll_data *pll = clk->pll_data;
+ unsigned long rate = clk->rate;
pll->base = IO_ADDRESS(pll->phys_base);
ctrl = __raw_readl(pll->base + PLLCTL);
- clk->rate = pll->input_rate = clk->parent->rate;
+ rate = pll->input_rate = clk->parent->rate;
if (ctrl & PLLCTL_PLLEN) {
bypass = 0;
@@ -255,9 +328,9 @@ static void __init clk_pll_init(struct clk *clk)
}
if (!bypass) {
- clk->rate /= prediv;
- clk->rate *= mult;
- clk->rate /= postdiv;
+ rate /= prediv;
+ rate *= mult;
+ rate /= postdiv;
}
pr_debug("PLL%d: input = %lu MHz [ ",
@@ -270,8 +343,90 @@ static void __init clk_pll_init(struct clk *clk)
pr_debug("* %d ", mult);
if (postdiv > 1)
pr_debug("/ %d ", postdiv);
- pr_debug("] --> %lu MHz output.\n", clk->rate / 1000000);
+ pr_debug("] --> %lu MHz output.\n", rate / 1000000);
+
+ return rate;
+}
+
+/**
+ * davinci_set_pllrate - set the output rate of a given PLL.
+ *
+ * Note: Currently tested to work with OMAP-L138 only.
+ *
+ * @pll: pll whose rate needs to be changed.
+ * @prediv: The pre divider value. Passing 0 disables the pre-divider.
+ * @pllm: The multiplier value. Passing 0 leads to multiply-by-one.
+ * @postdiv: The post divider value. Passing 0 disables the post-divider.
+ */
+int davinci_set_pllrate(struct pll_data *pll, unsigned int prediv,
+ unsigned int mult, unsigned int postdiv)
+{
+ u32 ctrl;
+ unsigned int locktime;
+
+ if (pll->base == NULL)
+ return -EINVAL;
+
+ /*
+ * PLL lock time required per OMAP-L138 datasheet is
+ * (2000 * prediv)/sqrt(pllm) OSCIN cycles. We approximate sqrt(pllm)
+ * as 4 and OSCIN cycle as 25 MHz.
+ */
+ if (prediv) {
+ locktime = ((2000 * prediv) / 100);
+ prediv = (prediv - 1) | PLLDIV_EN;
+ } else {
+ locktime = 20;
+ }
+ if (postdiv)
+ postdiv = (postdiv - 1) | PLLDIV_EN;
+ if (mult)
+ mult = mult - 1;
+
+ ctrl = __raw_readl(pll->base + PLLCTL);
+
+ /* Switch the PLL to bypass mode */
+ ctrl &= ~(PLLCTL_PLLENSRC | PLLCTL_PLLEN);
+ __raw_writel(ctrl, pll->base + PLLCTL);
+
+ /*
+ * Wait for 4 OSCIN/CLKIN cycles to ensure that the PLLC has switched
+ * to bypass mode. Delay of 1us ensures we are good for all > 4MHz
+ * OSCIN/CLKIN inputs. Typically the input is ~25MHz.
+ */
+ udelay(1);
+
+ /* Reset and enable PLL */
+ ctrl &= ~(PLLCTL_PLLRST | PLLCTL_PLLDIS);
+ __raw_writel(ctrl, pll->base + PLLCTL);
+
+ if (pll->flags & PLL_HAS_PREDIV)
+ __raw_writel(prediv, pll->base + PREDIV);
+
+ __raw_writel(mult, pll->base + PLLM);
+
+ if (pll->flags & PLL_HAS_POSTDIV)
+ __raw_writel(postdiv, pll->base + POSTDIV);
+
+ /*
+ * Wait for PLL to reset properly, OMAP-L138 datasheet says
+ * 'min' time = 125ns
+ */
+ udelay(1);
+
+ /* Bring PLL out of reset */
+ ctrl |= PLLCTL_PLLRST;
+ __raw_writel(ctrl, pll->base + PLLCTL);
+
+ udelay(locktime);
+
+ /* Remove PLL from bypass mode */
+ ctrl |= PLLCTL_PLLEN;
+ __raw_writel(ctrl, pll->base + PLLCTL);
+
+ return 0;
}
+EXPORT_SYMBOL(davinci_set_pllrate);
int __init davinci_clk_init(struct davinci_clk *clocks)
{
@@ -281,12 +436,23 @@ int __init davinci_clk_init(struct davinci_clk *clocks)
for (c = clocks; c->lk.clk; c++) {
clk = c->lk.clk;
- if (clk->pll_data)
- clk_pll_init(clk);
+ if (!clk->recalc) {
+
+ /* Check if clock is a PLL */
+ if (clk->pll_data)
+ clk->recalc = clk_pllclk_recalc;
+
+ /* Else, if it is a PLL-derived clock */
+ else if (clk->flags & CLK_PLL)
+ clk->recalc = clk_sysclk_recalc;
+
+ /* Otherwise, it is a leaf clock (PSC clock) */
+ else if (clk->parent)
+ clk->recalc = clk_leafclk_recalc;
+ }
- /* Calculate rates for PLL-derived clocks */
- else if (clk->flags & CLK_PLL)
- clk_sysclk_recalc(clk);
+ if (clk->recalc)
+ clk->rate = clk->recalc(clk);
if (clk->lpsc)
clk->flags |= CLK_PSC;
@@ -352,9 +518,8 @@ dump_clock(struct seq_file *s, unsigned nest, struct clk *parent)
/* REVISIT show device associations too */
/* cost is now small, but not linear... */
- list_for_each_entry(clk, &clocks, node) {
- if (clk->parent == parent)
- dump_clock(s, nest + NEST_DELTA, clk);
+ list_for_each_entry(clk, &parent->children, childnode) {
+ dump_clock(s, nest + NEST_DELTA, clk);
}
}
diff --git a/arch/arm/mach-davinci/clock.h b/arch/arm/mach-davinci/clock.h
index 27233cb4a2f..c92d77a3008 100644
--- a/arch/arm/mach-davinci/clock.h
+++ b/arch/arm/mach-davinci/clock.h
@@ -22,6 +22,10 @@
/* PLL/Reset register offsets */
#define PLLCTL 0x100
#define PLLCTL_PLLEN BIT(0)
+#define PLLCTL_PLLPWRDN BIT(1)
+#define PLLCTL_PLLRST BIT(3)
+#define PLLCTL_PLLDIS BIT(4)
+#define PLLCTL_PLLENSRC BIT(5)
#define PLLCTL_CLKMODE BIT(8)
#define PLLM 0x110
@@ -65,15 +69,20 @@ struct clk {
const char *name;
unsigned long rate;
u8 usecount;
- u8 flags;
u8 lpsc;
- u8 psc_ctlr;
+ u8 gpsc;
+ u32 flags;
struct clk *parent;
+ struct list_head children; /* list of children */
+ struct list_head childnode; /* parent's child list node */
struct pll_data *pll_data;
u32 div_reg;
+ unsigned long (*recalc) (struct clk *);
+ int (*set_rate) (struct clk *clk, unsigned long rate);
+ int (*round_rate) (struct clk *clk, unsigned long rate);
};
-/* Clock flags */
+/* Clock flags: SoC-specific flags start at BIT(16) */
#define ALWAYS_ENABLED BIT(1)
#define CLK_PSC BIT(2)
#define PSC_DSP BIT(3) /* PSC uses DSP domain, not ARM */
@@ -94,6 +103,8 @@ struct davinci_clk {
}
int davinci_clk_init(struct davinci_clk *clocks);
+int davinci_set_pllrate(struct pll_data *pll, unsigned int prediv,
+ unsigned int mult, unsigned int postdiv);
extern struct platform_device davinci_wdt_device;
diff --git a/arch/arm/mach-davinci/common.c b/arch/arm/mach-davinci/common.c
index 61ede19c6b5..c2de94cde56 100644
--- a/arch/arm/mach-davinci/common.c
+++ b/arch/arm/mach-davinci/common.c
@@ -86,6 +86,8 @@ void __init davinci_common_init(struct davinci_soc_info *soc_info)
dip = davinci_get_id(davinci_soc_info.jtag_id);
if (!dip) {
ret = -EINVAL;
+ pr_err("Unknown DaVinci JTAG ID 0x%x\n",
+ davinci_soc_info.jtag_id);
goto err;
}
@@ -104,5 +106,5 @@ void __init davinci_common_init(struct davinci_soc_info *soc_info)
return;
err:
- pr_err("davinci_common_init: SoC Initialization failed\n");
+ panic("davinci_common_init: SoC Initialization failed\n");
}
diff --git a/arch/arm/mach-davinci/cp_intc.c b/arch/arm/mach-davinci/cp_intc.c
index 96c8e97a7de..52b287cf3a4 100644
--- a/arch/arm/mach-davinci/cp_intc.c
+++ b/arch/arm/mach-davinci/cp_intc.c
@@ -10,9 +10,6 @@
*/
#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
#include <linux/irq.h>
#include <linux/io.h>
diff --git a/arch/arm/mach-davinci/cpufreq.c b/arch/arm/mach-davinci/cpufreq.c
new file mode 100644
index 00000000000..d3fa6de1e20
--- /dev/null
+++ b/arch/arm/mach-davinci/cpufreq.c
@@ -0,0 +1,226 @@
+/*
+ * CPU frequency scaling for DaVinci
+ *
+ * Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Based on linux/arch/arm/plat-omap/cpu-omap.c. Original Copyright follows:
+ *
+ * Copyright (C) 2005 Nokia Corporation
+ * Written by Tony Lindgren <tony@atomide.com>
+ *
+ * Based on cpu-sa1110.c, Copyright (C) 2001 Russell King
+ *
+ * Copyright (C) 2007-2008 Texas Instruments, Inc.
+ * Updated to support OMAP3
+ * Rajendra Nayak <rnayak@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/types.h>
+#include <linux/cpufreq.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+
+#include <mach/hardware.h>
+#include <mach/cpufreq.h>
+#include <mach/common.h>
+
+#include "clock.h"
+
+struct davinci_cpufreq {
+ struct device *dev;
+ struct clk *armclk;
+};
+static struct davinci_cpufreq cpufreq;
+
+static int davinci_verify_speed(struct cpufreq_policy *policy)
+{
+ struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data;
+ struct cpufreq_frequency_table *freq_table = pdata->freq_table;
+ struct clk *armclk = cpufreq.armclk;
+
+ if (freq_table)
+ return cpufreq_frequency_table_verify(policy, freq_table);
+
+ if (policy->cpu)
+ return -EINVAL;
+
+ cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
+ policy->cpuinfo.max_freq);
+
+ policy->min = clk_round_rate(armclk, policy->min * 1000) / 1000;
+ policy->max = clk_round_rate(armclk, policy->max * 1000) / 1000;
+ cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
+ policy->cpuinfo.max_freq);
+ return 0;
+}
+
+static unsigned int davinci_getspeed(unsigned int cpu)
+{
+ if (cpu)
+ return 0;
+
+ return clk_get_rate(cpufreq.armclk) / 1000;
+}
+
+static int davinci_target(struct cpufreq_policy *policy,
+ unsigned int target_freq, unsigned int relation)
+{
+ int ret = 0;
+ unsigned int idx;
+ struct cpufreq_freqs freqs;
+ struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data;
+ struct clk *armclk = cpufreq.armclk;
+
+ /*
+ * Ensure desired rate is within allowed range. Some govenors
+ * (ondemand) will just pass target_freq=0 to get the minimum.
+ */
+ if (target_freq < policy->cpuinfo.min_freq)
+ target_freq = policy->cpuinfo.min_freq;
+ if (target_freq > policy->cpuinfo.max_freq)
+ target_freq = policy->cpuinfo.max_freq;
+
+ freqs.old = davinci_getspeed(0);
+ freqs.new = clk_round_rate(armclk, target_freq * 1000) / 1000;
+ freqs.cpu = 0;
+
+ if (freqs.old == freqs.new)
+ return ret;
+
+ cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER,
+ dev_driver_string(cpufreq.dev),
+ "transition: %u --> %u\n", freqs.old, freqs.new);
+
+ ret = cpufreq_frequency_table_target(policy, pdata->freq_table,
+ freqs.new, relation, &idx);
+ if (ret)
+ return -EINVAL;
+
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+
+ /* if moving to higher frequency, up the voltage beforehand */
+ if (pdata->set_voltage && freqs.new > freqs.old)
+ pdata->set_voltage(idx);
+
+ ret = clk_set_rate(armclk, idx);
+
+ /* if moving to lower freq, lower the voltage after lowering freq */
+ if (pdata->set_voltage && freqs.new < freqs.old)
+ pdata->set_voltage(idx);
+
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+
+ return ret;
+}
+
+static int __init davinci_cpu_init(struct cpufreq_policy *policy)
+{
+ int result = 0;
+ struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data;
+ struct cpufreq_frequency_table *freq_table = pdata->freq_table;
+
+ if (policy->cpu != 0)
+ return -EINVAL;
+
+ /* Finish platform specific initialization */
+ if (pdata->init) {
+ result = pdata->init();
+ if (result)
+ return result;
+ }
+
+ policy->cur = policy->min = policy->max = davinci_getspeed(0);
+
+ if (freq_table) {
+ result = cpufreq_frequency_table_cpuinfo(policy, freq_table);
+ if (!result)
+ cpufreq_frequency_table_get_attr(freq_table,
+ policy->cpu);
+ } else {
+ policy->cpuinfo.min_freq = policy->min;
+ policy->cpuinfo.max_freq = policy->max;
+ }
+
+ policy->min = policy->cpuinfo.min_freq;
+ policy->max = policy->cpuinfo.max_freq;
+ policy->cur = davinci_getspeed(0);
+
+ /*
+ * Time measurement across the target() function yields ~1500-1800us
+ * time taken with no drivers on notification list.
+ * Setting the latency to 2000 us to accomodate addition of drivers
+ * to pre/post change notification list.
+ */
+ policy->cpuinfo.transition_latency = 2000 * 1000;
+ return 0;
+}
+
+static int davinci_cpu_exit(struct cpufreq_policy *policy)
+{
+ cpufreq_frequency_table_put_attr(policy->cpu);
+ return 0;
+}
+
+static struct freq_attr *davinci_cpufreq_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ NULL,
+};
+
+static struct cpufreq_driver davinci_driver = {
+ .flags = CPUFREQ_STICKY,
+ .verify = davinci_verify_speed,
+ .target = davinci_target,
+ .get = davinci_getspeed,
+ .init = davinci_cpu_init,
+ .exit = davinci_cpu_exit,
+ .name = "davinci",
+ .attr = davinci_cpufreq_attr,
+};
+
+static int __init davinci_cpufreq_probe(struct platform_device *pdev)
+{
+ struct davinci_cpufreq_config *pdata = pdev->dev.platform_data;
+
+ if (!pdata)
+ return -EINVAL;
+ if (!pdata->freq_table)
+ return -EINVAL;
+
+ cpufreq.dev = &pdev->dev;
+
+ cpufreq.armclk = clk_get(NULL, "arm");
+ if (IS_ERR(cpufreq.armclk)) {
+ dev_err(cpufreq.dev, "Unable to get ARM clock\n");
+ return PTR_ERR(cpufreq.armclk);
+ }
+
+ return cpufreq_register_driver(&davinci_driver);
+}
+
+static int __exit davinci_cpufreq_remove(struct platform_device *pdev)
+{
+ clk_put(cpufreq.armclk);
+
+ return cpufreq_unregister_driver(&davinci_driver);
+}
+
+static struct platform_driver davinci_cpufreq_driver = {
+ .driver = {
+ .name = "cpufreq-davinci",
+ .owner = THIS_MODULE,
+ },
+ .remove = __exit_p(davinci_cpufreq_remove),
+};
+
+static int __init davinci_cpufreq_init(void)
+{
+ return platform_driver_probe(&davinci_cpufreq_driver,
+ davinci_cpufreq_probe);
+}
+late_initcall(davinci_cpufreq_init);
+
diff --git a/arch/arm/mach-davinci/cpuidle.c b/arch/arm/mach-davinci/cpuidle.c
new file mode 100644
index 00000000000..97a90f36fc9
--- /dev/null
+++ b/arch/arm/mach-davinci/cpuidle.c
@@ -0,0 +1,197 @@
+/*
+ * CPU idle for DaVinci SoCs
+ *
+ * Copyright (C) 2009 Texas Instruments Incorporated. http://www.ti.com/
+ *
+ * Derived from Marvell Kirkwood CPU idle code
+ * (arch/arm/mach-kirkwood/cpuidle.c)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/cpuidle.h>
+#include <linux/io.h>
+#include <asm/proc-fns.h>
+
+#include <mach/cpuidle.h>
+
+#define DAVINCI_CPUIDLE_MAX_STATES 2
+
+struct davinci_ops {
+ void (*enter) (u32 flags);
+ void (*exit) (u32 flags);
+ u32 flags;
+};
+
+/* fields in davinci_ops.flags */
+#define DAVINCI_CPUIDLE_FLAGS_DDR2_PWDN BIT(0)
+
+static struct cpuidle_driver davinci_idle_driver = {
+ .name = "cpuidle-davinci",
+ .owner = THIS_MODULE,
+};
+
+static DEFINE_PER_CPU(struct cpuidle_device, davinci_cpuidle_device);
+static void __iomem *ddr2_reg_base;
+
+#define DDR2_SDRCR_OFFSET 0xc
+#define DDR2_SRPD_BIT BIT(23)
+#define DDR2_LPMODEN_BIT BIT(31)
+
+static void davinci_save_ddr_power(int enter, bool pdown)
+{
+ u32 val;
+
+ val = __raw_readl(ddr2_reg_base + DDR2_SDRCR_OFFSET);
+
+ if (enter) {
+ if (pdown)
+ val |= DDR2_SRPD_BIT;
+ else
+ val &= ~DDR2_SRPD_BIT;
+ val |= DDR2_LPMODEN_BIT;
+ } else {
+ val &= ~(DDR2_SRPD_BIT | DDR2_LPMODEN_BIT);
+ }
+
+ __raw_writel(val, ddr2_reg_base + DDR2_SDRCR_OFFSET);
+}
+
+static void davinci_c2state_enter(u32 flags)
+{
+ davinci_save_ddr_power(1, !!(flags & DAVINCI_CPUIDLE_FLAGS_DDR2_PWDN));
+}
+
+static void davinci_c2state_exit(u32 flags)
+{
+ davinci_save_ddr_power(0, !!(flags & DAVINCI_CPUIDLE_FLAGS_DDR2_PWDN));
+}
+
+static struct davinci_ops davinci_states[DAVINCI_CPUIDLE_MAX_STATES] = {
+ [1] = {
+ .enter = davinci_c2state_enter,
+ .exit = davinci_c2state_exit,
+ },
+};
+
+/* Actual code that puts the SoC in different idle states */
+static int davinci_enter_idle(struct cpuidle_device *dev,
+ struct cpuidle_state *state)
+{
+ struct davinci_ops *ops = cpuidle_get_statedata(state);
+ struct timeval before, after;
+ int idle_time;
+
+ local_irq_disable();
+ do_gettimeofday(&before);
+
+ if (ops && ops->enter)
+ ops->enter(ops->flags);
+ /* Wait for interrupt state */
+ cpu_do_idle();
+ if (ops && ops->exit)
+ ops->exit(ops->flags);
+
+ do_gettimeofday(&after);
+ local_irq_enable();
+ idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC +
+ (after.tv_usec - before.tv_usec);
+ return idle_time;
+}
+
+static int __init davinci_cpuidle_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct cpuidle_device *device;
+ struct davinci_cpuidle_config *pdata = pdev->dev.platform_data;
+ struct resource *ddr2_regs;
+ resource_size_t len;
+
+ device = &per_cpu(davinci_cpuidle_device, smp_processor_id());
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "cannot get platform data\n");
+ return -ENOENT;
+ }
+
+ ddr2_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!ddr2_regs) {
+ dev_err(&pdev->dev, "cannot get DDR2 controller register base");
+ return -ENODEV;
+ }
+
+ len = resource_size(ddr2_regs);
+
+ ddr2_regs = request_mem_region(ddr2_regs->start, len, ddr2_regs->name);
+ if (!ddr2_regs)
+ return -EBUSY;
+
+ ddr2_reg_base = ioremap(ddr2_regs->start, len);
+ if (!ddr2_reg_base) {
+ ret = -ENOMEM;
+ goto ioremap_fail;
+ }
+
+ ret = cpuidle_register_driver(&davinci_idle_driver);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register driver\n");
+ goto driver_register_fail;
+ }
+
+ /* Wait for interrupt state */
+ device->states[0].enter = davinci_enter_idle;
+ device->states[0].exit_latency = 1;
+ device->states[0].target_residency = 10000;
+ device->states[0].flags = CPUIDLE_FLAG_TIME_VALID;
+ strcpy(device->states[0].name, "WFI");
+ strcpy(device->states[0].desc, "Wait for interrupt");
+
+ /* Wait for interrupt and DDR self refresh state */
+ device->states[1].enter = davinci_enter_idle;
+ device->states[1].exit_latency = 10;
+ device->states[1].target_residency = 10000;
+ device->states[1].flags = CPUIDLE_FLAG_TIME_VALID;
+ strcpy(device->states[1].name, "DDR SR");
+ strcpy(device->states[1].desc, "WFI and DDR Self Refresh");
+ if (pdata->ddr2_pdown)
+ davinci_states[1].flags |= DAVINCI_CPUIDLE_FLAGS_DDR2_PWDN;
+ cpuidle_set_statedata(&device->states[1], &davinci_states[1]);
+
+ device->state_count = DAVINCI_CPUIDLE_MAX_STATES;
+
+ ret = cpuidle_register_device(device);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register device\n");
+ goto device_register_fail;
+ }
+
+ return 0;
+
+device_register_fail:
+ cpuidle_unregister_driver(&davinci_idle_driver);
+driver_register_fail:
+ iounmap(ddr2_reg_base);
+ioremap_fail:
+ release_mem_region(ddr2_regs->start, len);
+ return ret;
+}
+
+static struct platform_driver davinci_cpuidle_driver = {
+ .driver = {
+ .name = "cpuidle-davinci",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init davinci_cpuidle_init(void)
+{
+ return platform_driver_probe(&davinci_cpuidle_driver,
+ davinci_cpuidle_probe);
+}
+device_initcall(davinci_cpuidle_init);
+
diff --git a/arch/arm/mach-davinci/da830.c b/arch/arm/mach-davinci/da830.c
index 19b2748357f..b22b5cf0425 100644
--- a/arch/arm/mach-davinci/da830.c
+++ b/arch/arm/mach-davinci/da830.c
@@ -8,22 +8,17 @@
* is licensed "as is" without any warranty of any kind, whether express
* or implied.
*/
-#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/clk.h>
-#include <linux/platform_device.h>
#include <asm/mach/map.h>
-#include <mach/clock.h>
#include <mach/psc.h>
-#include <mach/mux.h>
#include <mach/irqs.h>
#include <mach/cputype.h>
#include <mach/common.h>
#include <mach/time.h>
#include <mach/da8xx.h>
-#include <mach/asp.h>
#include "clock.h"
#include "mux.h"
@@ -193,14 +188,14 @@ static struct clk uart1_clk = {
.name = "uart1",
.parent = &pll0_sysclk2,
.lpsc = DA8XX_LPSC1_UART1,
- .psc_ctlr = 1,
+ .gpsc = 1,
};
static struct clk uart2_clk = {
.name = "uart2",
.parent = &pll0_sysclk2,
.lpsc = DA8XX_LPSC1_UART2,
- .psc_ctlr = 1,
+ .gpsc = 1,
};
static struct clk spi0_clk = {
@@ -213,98 +208,98 @@ static struct clk spi1_clk = {
.name = "spi1",
.parent = &pll0_sysclk2,
.lpsc = DA8XX_LPSC1_SPI1,
- .psc_ctlr = 1,
+ .gpsc = 1,
};
static struct clk ecap0_clk = {
.name = "ecap0",
.parent = &pll0_sysclk2,
.lpsc = DA8XX_LPSC1_ECAP,
- .psc_ctlr = 1,
+ .gpsc = 1,
};
static struct clk ecap1_clk = {
.name = "ecap1",
.parent = &pll0_sysclk2,
.lpsc = DA8XX_LPSC1_ECAP,
- .psc_ctlr = 1,
+ .gpsc = 1,
};
static struct clk ecap2_clk = {
.name = "ecap2",
.parent = &pll0_sysclk2,
.lpsc = DA8XX_LPSC1_ECAP,
- .psc_ctlr = 1,
+ .gpsc = 1,
};
static struct clk pwm0_clk = {
.name = "pwm0",
.parent = &pll0_sysclk2,
.lpsc = DA8XX_LPSC1_PWM,
- .psc_ctlr = 1,
+ .gpsc = 1,
};
static struct clk pwm1_clk = {
.name = "pwm1",
.parent = &pll0_sysclk2,
.lpsc = DA8XX_LPSC1_PWM,
- .psc_ctlr = 1,
+ .gpsc = 1,
};
static struct clk pwm2_clk = {
.name = "pwm2",
.parent = &pll0_sysclk2,
.lpsc = DA8XX_LPSC1_PWM,
- .psc_ctlr = 1,
+ .gpsc = 1,
};
static struct clk eqep0_clk = {
.name = "eqep0",
.parent = &pll0_sysclk2,
.lpsc = DA830_LPSC1_EQEP,
- .psc_ctlr = 1,
+ .gpsc = 1,
};
static struct clk eqep1_clk = {
.name = "eqep1",
.parent = &pll0_sysclk2,
.lpsc = DA830_LPSC1_EQEP,
- .psc_ctlr = 1,
+ .gpsc = 1,
};
static struct clk lcdc_clk = {
.name = "lcdc",
.parent = &pll0_sysclk2,
.lpsc = DA8XX_LPSC1_LCDC,
- .psc_ctlr = 1,
+ .gpsc = 1,
};
static struct clk mcasp0_clk = {
.name = "mcasp0",
.parent = &pll0_sysclk2,
.lpsc = DA8XX_LPSC1_McASP0,
- .psc_ctlr = 1,
+ .gpsc = 1,
};
static struct clk mcasp1_clk = {
.name = "mcasp1",
.parent = &pll0_sysclk2,
.lpsc = DA830_LPSC1_McASP1,
- .psc_ctlr = 1,
+ .gpsc = 1,
};
static struct clk mcasp2_clk = {
.name = "mcasp2",
.parent = &pll0_sysclk2,
.lpsc = DA830_LPSC1_McASP2,
- .psc_ctlr = 1,
+ .gpsc = 1,
};
static struct clk usb20_clk = {
.name = "usb20",
.parent = &pll0_sysclk2,
.lpsc = DA8XX_LPSC1_USB20,
- .psc_ctlr = 1,
+ .gpsc = 1,
};
static struct clk aemif_clk = {
@@ -332,36 +327,36 @@ static struct clk emac_clk = {
.name = "emac",
.parent = &pll0_sysclk4,
.lpsc = DA8XX_LPSC1_CPGMAC,
- .psc_ctlr = 1,
+ .gpsc = 1,
};
static struct clk gpio_clk = {
.name = "gpio",
.parent = &pll0_sysclk4,
.lpsc = DA8XX_LPSC1_GPIO,
- .psc_ctlr = 1,
+ .gpsc = 1,
};
static struct clk i2c1_clk = {
.name = "i2c1",
.parent = &pll0_sysclk4,
.lpsc = DA8XX_LPSC1_I2C,
- .psc_ctlr = 1,
+ .gpsc = 1,
};
static struct clk usb11_clk = {
.name = "usb11",
.parent = &pll0_sysclk4,
.lpsc = DA8XX_LPSC1_USB11,
- .psc_ctlr = 1,
+ .gpsc = 1,
};
static struct clk emif3_clk = {
.name = "emif3",
.parent = &pll0_sysclk5,
.lpsc = DA8XX_LPSC1_EMIF3C,
+ .gpsc = 1,
.flags = ALWAYS_ENABLED,
- .psc_ctlr = 1,
};
static struct clk arm_clk = {
@@ -411,7 +406,7 @@ static struct davinci_clk da830_clks[] = {
CLK(NULL, "pwm2", &pwm2_clk),
CLK("eqep.0", NULL, &eqep0_clk),
CLK("eqep.1", NULL, &eqep1_clk),
- CLK("da830_lcdc", NULL, &lcdc_clk),
+ CLK("da8xx_lcdc.0", NULL, &lcdc_clk),
CLK("davinci-mcasp.0", NULL, &mcasp0_clk),
CLK("davinci-mcasp.1", NULL, &mcasp1_clk),
CLK("davinci-mcasp.2", NULL, &mcasp2_clk),
@@ -1143,7 +1138,21 @@ static struct davinci_id da830_ids[] = {
.part_no = 0xb7df,
.manufacturer = 0x017, /* 0x02f >> 1 */
.cpu_id = DAVINCI_CPU_ID_DA830,
- .name = "da830/omap l137",
+ .name = "da830/omap-l137 rev1.0",
+ },
+ {
+ .variant = 0x8,
+ .part_no = 0xb7df,
+ .manufacturer = 0x017,
+ .cpu_id = DAVINCI_CPU_ID_DA830,
+ .name = "da830/omap-l137 rev1.1",
+ },
+ {
+ .variant = 0x9,
+ .part_no = 0xb7df,
+ .manufacturer = 0x017,
+ .cpu_id = DAVINCI_CPU_ID_DA830,
+ .name = "da830/omap-l137 rev2.0",
},
};
@@ -1178,13 +1187,11 @@ static struct davinci_timer_info da830_timer_info = {
static struct davinci_soc_info davinci_soc_info_da830 = {
.io_desc = da830_io_desc,
.io_desc_num = ARRAY_SIZE(da830_io_desc),
- .jtag_id_base = IO_ADDRESS(DA8XX_JTAG_ID_REG),
.ids = da830_ids,
.ids_num = ARRAY_SIZE(da830_ids),
.cpu_clks = da830_clks,
.psc_bases = da830_psc_bases,
.psc_bases_num = ARRAY_SIZE(da830_psc_bases),
- .pinmux_base = IO_ADDRESS(DA8XX_BOOT_CFG_BASE + 0x120),
.pinmux_pins = da830_pins,
.pinmux_pins_num = ARRAY_SIZE(da830_pins),
.intc_base = (void __iomem *)DA8XX_CP_INTC_VIRT,
@@ -1201,5 +1208,13 @@ static struct davinci_soc_info davinci_soc_info_da830 = {
void __init da830_init(void)
{
+ da8xx_syscfg_base = ioremap(DA8XX_SYSCFG_BASE, SZ_4K);
+ if (WARN(!da8xx_syscfg_base, "Unable to map syscfg module"))
+ return;
+
+ davinci_soc_info_da830.jtag_id_base =
+ DA8XX_SYSCFG_VIRT(DA8XX_JTAG_ID_REG);
+ davinci_soc_info_da830.pinmux_base = DA8XX_SYSCFG_VIRT(0x120);
+
davinci_common_init(&davinci_soc_info_da830);
}
diff --git a/arch/arm/mach-davinci/da850.c b/arch/arm/mach-davinci/da850.c
index 192d719a47d..717806c6cef 100644
--- a/arch/arm/mach-davinci/da850.c
+++ b/arch/arm/mach-davinci/da850.c
@@ -11,31 +11,41 @@
* is licensed "as is" without any warranty of any kind, whether express
* or implied.
*/
-#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/clk.h>
#include <linux/platform_device.h>
+#include <linux/cpufreq.h>
+#include <linux/regulator/consumer.h>
#include <asm/mach/map.h>
-#include <mach/clock.h>
#include <mach/psc.h>
-#include <mach/mux.h>
#include <mach/irqs.h>
#include <mach/cputype.h>
#include <mach/common.h>
#include <mach/time.h>
#include <mach/da8xx.h>
+#include <mach/cpufreq.h>
#include "clock.h"
#include "mux.h"
+/* SoC specific clock flags */
+#define DA850_CLK_ASYNC3 BIT(16)
+
#define DA850_PLL1_BASE 0x01e1a000
#define DA850_TIMER64P2_BASE 0x01f0c000
#define DA850_TIMER64P3_BASE 0x01f0d000
#define DA850_REF_FREQ 24000000
+#define CFGCHIP3_ASYNC3_CLKSRC BIT(4)
+#define CFGCHIP0_PLL_MASTER_LOCK BIT(4)
+
+static int da850_set_armrate(struct clk *clk, unsigned long rate);
+static int da850_round_armrate(struct clk *clk, unsigned long rate);
+static int da850_set_pll0rate(struct clk *clk, unsigned long armrate);
+
static struct pll_data pll0_data = {
.num = 1,
.phys_base = DA8XX_PLL0_BASE,
@@ -52,6 +62,7 @@ static struct clk pll0_clk = {
.parent = &ref_clk,
.pll_data = &pll0_data,
.flags = CLK_PLL,
+ .set_rate = da850_set_pll0rate,
};
static struct clk pll0_aux_clk = {
@@ -210,16 +221,16 @@ static struct clk tpcc1_clk = {
.name = "tpcc1",
.parent = &pll0_sysclk2,
.lpsc = DA850_LPSC1_TPCC1,
+ .gpsc = 1,
.flags = CLK_PSC | ALWAYS_ENABLED,
- .psc_ctlr = 1,
};
static struct clk tptc2_clk = {
.name = "tptc2",
.parent = &pll0_sysclk2,
.lpsc = DA850_LPSC1_TPTC2,
+ .gpsc = 1,
.flags = ALWAYS_ENABLED,
- .psc_ctlr = 1,
};
static struct clk uart0_clk = {
@@ -232,14 +243,16 @@ static struct clk uart1_clk = {
.name = "uart1",
.parent = &pll0_sysclk2,
.lpsc = DA8XX_LPSC1_UART1,
- .psc_ctlr = 1,
+ .gpsc = 1,
+ .flags = DA850_CLK_ASYNC3,
};
static struct clk uart2_clk = {
.name = "uart2",
.parent = &pll0_sysclk2,
.lpsc = DA8XX_LPSC1_UART2,
- .psc_ctlr = 1,
+ .gpsc = 1,
+ .flags = DA850_CLK_ASYNC3,
};
static struct clk aintc_clk = {
@@ -253,22 +266,22 @@ static struct clk gpio_clk = {
.name = "gpio",
.parent = &pll0_sysclk4,
.lpsc = DA8XX_LPSC1_GPIO,
- .psc_ctlr = 1,
+ .gpsc = 1,
};
static struct clk i2c1_clk = {
.name = "i2c1",
.parent = &pll0_sysclk4,
.lpsc = DA8XX_LPSC1_I2C,
- .psc_ctlr = 1,
+ .gpsc = 1,
};
static struct clk emif3_clk = {
.name = "emif3",
.parent = &pll0_sysclk5,
.lpsc = DA8XX_LPSC1_EMIF3C,
+ .gpsc = 1,
.flags = ALWAYS_ENABLED,
- .psc_ctlr = 1,
};
static struct clk arm_clk = {
@@ -276,6 +289,8 @@ static struct clk arm_clk = {
.parent = &pll0_sysclk6,
.lpsc = DA8XX_LPSC0_ARM,
.flags = ALWAYS_ENABLED,
+ .set_rate = da850_set_armrate,
+ .round_rate = da850_round_armrate,
};
static struct clk rmii_clk = {
@@ -287,21 +302,22 @@ static struct clk emac_clk = {
.name = "emac",
.parent = &pll0_sysclk4,
.lpsc = DA8XX_LPSC1_CPGMAC,
- .psc_ctlr = 1,
+ .gpsc = 1,
};
static struct clk mcasp_clk = {
.name = "mcasp",
.parent = &pll0_sysclk2,
.lpsc = DA8XX_LPSC1_McASP0,
- .psc_ctlr = 1,
+ .gpsc = 1,
+ .flags = DA850_CLK_ASYNC3,
};
static struct clk lcdc_clk = {
.name = "lcdc",
.parent = &pll0_sysclk2,
.lpsc = DA8XX_LPSC1_LCDC,
- .psc_ctlr = 1,
+ .gpsc = 1,
};
static struct clk mmcsd_clk = {
@@ -404,6 +420,14 @@ static const struct mux_config da850_pins[] = {
MUX_CFG(DA850, MII_RXD_0, 3, 28, 15, 8, false)
MUX_CFG(DA850, MDIO_CLK, 4, 0, 15, 8, false)
MUX_CFG(DA850, MDIO_D, 4, 4, 15, 8, false)
+ MUX_CFG(DA850, RMII_TXD_0, 14, 12, 15, 8, false)
+ MUX_CFG(DA850, RMII_TXD_1, 14, 8, 15, 8, false)
+ MUX_CFG(DA850, RMII_TXEN, 14, 16, 15, 8, false)
+ MUX_CFG(DA850, RMII_CRS_DV, 15, 4, 15, 8, false)
+ MUX_CFG(DA850, RMII_RXD_0, 14, 24, 15, 8, false)
+ MUX_CFG(DA850, RMII_RXD_1, 14, 20, 15, 8, false)
+ MUX_CFG(DA850, RMII_RXER, 14, 28, 15, 8, false)
+ MUX_CFG(DA850, RMII_MHZ_50_CLK, 15, 0, 15, 0, false)
/* McASP function */
MUX_CFG(DA850, ACLKR, 0, 0, 15, 1, false)
MUX_CFG(DA850, ACLKX, 0, 4, 15, 1, false)
@@ -506,8 +530,9 @@ static const struct mux_config da850_pins[] = {
MUX_CFG(DA850, EMA_WAIT_1, 6, 24, 15, 1, false)
MUX_CFG(DA850, NEMA_CS_2, 7, 0, 15, 1, false)
/* GPIO function */
+ MUX_CFG(DA850, GPIO2_6, 6, 4, 15, 8, false)
+ MUX_CFG(DA850, GPIO2_8, 5, 28, 15, 8, false)
MUX_CFG(DA850, GPIO2_15, 5, 0, 15, 8, false)
- MUX_CFG(DA850, GPIO8_10, 18, 28, 15, 8, false)
MUX_CFG(DA850, GPIO4_0, 10, 28, 15, 8, false)
MUX_CFG(DA850, GPIO4_1, 10, 24, 15, 8, false)
#endif
@@ -547,6 +572,14 @@ const short da850_cpgmac_pins[] __initdata = {
-1
};
+const short da850_rmii_pins[] __initdata = {
+ DA850_RMII_TXD_0, DA850_RMII_TXD_1, DA850_RMII_TXEN,
+ DA850_RMII_CRS_DV, DA850_RMII_RXD_0, DA850_RMII_RXD_1,
+ DA850_RMII_RXER, DA850_RMII_MHZ_50_CLK, DA850_MDIO_CLK,
+ DA850_MDIO_D,
+ -1
+};
+
const short da850_mcasp_pins[] __initdata = {
DA850_AHCLKX, DA850_ACLKX, DA850_AFSX,
DA850_AHCLKR, DA850_ACLKR, DA850_AFSR, DA850_AMUTE,
@@ -555,12 +588,11 @@ const short da850_mcasp_pins[] __initdata = {
};
const short da850_lcdcntl_pins[] __initdata = {
- DA850_LCD_D_1, DA850_LCD_D_2, DA850_LCD_D_3, DA850_LCD_D_4,
- DA850_LCD_D_5, DA850_LCD_D_6, DA850_LCD_D_7, DA850_LCD_D_8,
- DA850_LCD_D_9, DA850_LCD_D_10, DA850_LCD_D_11, DA850_LCD_D_12,
- DA850_LCD_D_13, DA850_LCD_D_14, DA850_LCD_D_15, DA850_LCD_PCLK,
- DA850_LCD_HSYNC, DA850_LCD_VSYNC, DA850_NLCD_AC_ENB_CS, DA850_GPIO2_15,
- DA850_GPIO8_10,
+ DA850_LCD_D_0, DA850_LCD_D_1, DA850_LCD_D_2, DA850_LCD_D_3,
+ DA850_LCD_D_4, DA850_LCD_D_5, DA850_LCD_D_6, DA850_LCD_D_7,
+ DA850_LCD_D_8, DA850_LCD_D_9, DA850_LCD_D_10, DA850_LCD_D_11,
+ DA850_LCD_D_12, DA850_LCD_D_13, DA850_LCD_D_14, DA850_LCD_D_15,
+ DA850_LCD_PCLK, DA850_LCD_HSYNC, DA850_LCD_VSYNC, DA850_NLCD_AC_ENB_CS,
-1
};
@@ -790,16 +822,221 @@ static struct davinci_timer_info da850_timer_info = {
.clocksource_id = T0_TOP,
};
+static void da850_set_async3_src(int pllnum)
+{
+ struct clk *clk, *newparent = pllnum ? &pll1_sysclk2 : &pll0_sysclk2;
+ struct davinci_clk *c;
+ unsigned int v;
+ int ret;
+
+ for (c = da850_clks; c->lk.clk; c++) {
+ clk = c->lk.clk;
+ if (clk->flags & DA850_CLK_ASYNC3) {
+ ret = clk_set_parent(clk, newparent);
+ WARN(ret, "DA850: unable to re-parent clock %s",
+ clk->name);
+ }
+ }
+
+ v = __raw_readl(DA8XX_SYSCFG_VIRT(DA8XX_CFGCHIP3_REG));
+ if (pllnum)
+ v |= CFGCHIP3_ASYNC3_CLKSRC;
+ else
+ v &= ~CFGCHIP3_ASYNC3_CLKSRC;
+ __raw_writel(v, DA8XX_SYSCFG_VIRT(DA8XX_CFGCHIP3_REG));
+}
+
+#ifdef CONFIG_CPU_FREQ
+/*
+ * Notes:
+ * According to the TRM, minimum PLLM results in maximum power savings.
+ * The OPP definitions below should keep the PLLM as low as possible.
+ *
+ * The output of the PLLM must be between 400 to 600 MHz.
+ * This rules out prediv of anything but divide-by-one for 24Mhz OSC input.
+ */
+struct da850_opp {
+ unsigned int freq; /* in KHz */
+ unsigned int prediv;
+ unsigned int mult;
+ unsigned int postdiv;
+ unsigned int cvdd_min; /* in uV */
+ unsigned int cvdd_max; /* in uV */
+};
+
+static const struct da850_opp da850_opp_300 = {
+ .freq = 300000,
+ .prediv = 1,
+ .mult = 25,
+ .postdiv = 2,
+ .cvdd_min = 1140000,
+ .cvdd_max = 1320000,
+};
+
+static const struct da850_opp da850_opp_200 = {
+ .freq = 200000,
+ .prediv = 1,
+ .mult = 25,
+ .postdiv = 3,
+ .cvdd_min = 1050000,
+ .cvdd_max = 1160000,
+};
+
+static const struct da850_opp da850_opp_96 = {
+ .freq = 96000,
+ .prediv = 1,
+ .mult = 20,
+ .postdiv = 5,
+ .cvdd_min = 950000,
+ .cvdd_max = 1050000,
+};
+
+#define OPP(freq) \
+ { \
+ .index = (unsigned int) &da850_opp_##freq, \
+ .frequency = freq * 1000, \
+ }
+
+static struct cpufreq_frequency_table da850_freq_table[] = {
+ OPP(300),
+ OPP(200),
+ OPP(96),
+ {
+ .index = 0,
+ .frequency = CPUFREQ_TABLE_END,
+ },
+};
+
+#ifdef CONFIG_REGULATOR
+static struct regulator *cvdd;
+
+static int da850_set_voltage(unsigned int index)
+{
+ struct da850_opp *opp;
+
+ if (!cvdd)
+ return -ENODEV;
+
+ opp = (struct da850_opp *) da850_freq_table[index].index;
+
+ return regulator_set_voltage(cvdd, opp->cvdd_min, opp->cvdd_max);
+}
+
+static int da850_regulator_init(void)
+{
+ cvdd = regulator_get(NULL, "cvdd");
+ if (WARN(IS_ERR(cvdd), "Unable to obtain voltage regulator for CVDD;"
+ " voltage scaling unsupported\n")) {
+ return PTR_ERR(cvdd);
+ }
+
+ return 0;
+}
+#endif
+
+static struct davinci_cpufreq_config cpufreq_info = {
+ .freq_table = &da850_freq_table[0],
+#ifdef CONFIG_REGULATOR
+ .init = da850_regulator_init,
+ .set_voltage = da850_set_voltage,
+#endif
+};
+
+static struct platform_device da850_cpufreq_device = {
+ .name = "cpufreq-davinci",
+ .dev = {
+ .platform_data = &cpufreq_info,
+ },
+};
+
+int __init da850_register_cpufreq(void)
+{
+ return platform_device_register(&da850_cpufreq_device);
+}
+
+static int da850_round_armrate(struct clk *clk, unsigned long rate)
+{
+ int i, ret = 0, diff;
+ unsigned int best = (unsigned int) -1;
+
+ rate /= 1000; /* convert to kHz */
+
+ for (i = 0; da850_freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
+ diff = da850_freq_table[i].frequency - rate;
+ if (diff < 0)
+ diff = -diff;
+
+ if (diff < best) {
+ best = diff;
+ ret = da850_freq_table[i].frequency;
+ }
+ }
+
+ return ret * 1000;
+}
+
+static int da850_set_armrate(struct clk *clk, unsigned long index)
+{
+ struct clk *pllclk = &pll0_clk;
+
+ return clk_set_rate(pllclk, index);
+}
+
+static int da850_set_pll0rate(struct clk *clk, unsigned long index)
+{
+ unsigned int prediv, mult, postdiv;
+ struct da850_opp *opp;
+ struct pll_data *pll = clk->pll_data;
+ unsigned int v;
+ int ret;
+
+ opp = (struct da850_opp *) da850_freq_table[index].index;
+ prediv = opp->prediv;
+ mult = opp->mult;
+ postdiv = opp->postdiv;
+
+ /* Unlock writing to PLL registers */
+ v = __raw_readl(DA8XX_SYSCFG_VIRT(DA8XX_CFGCHIP0_REG));
+ v &= ~CFGCHIP0_PLL_MASTER_LOCK;
+ __raw_writel(v, DA8XX_SYSCFG_VIRT(DA8XX_CFGCHIP0_REG));
+
+ ret = davinci_set_pllrate(pll, prediv, mult, postdiv);
+ if (WARN_ON(ret))
+ return ret;
+
+ return 0;
+}
+#else
+int __init da850_register_cpufreq(void)
+{
+ return 0;
+}
+
+static int da850_set_armrate(struct clk *clk, unsigned long rate)
+{
+ return -EINVAL;
+}
+
+static int da850_set_pll0rate(struct clk *clk, unsigned long armrate)
+{
+ return -EINVAL;
+}
+
+static int da850_round_armrate(struct clk *clk, unsigned long rate)
+{
+ return clk->rate;
+}
+#endif
+
+
static struct davinci_soc_info davinci_soc_info_da850 = {
.io_desc = da850_io_desc,
.io_desc_num = ARRAY_SIZE(da850_io_desc),
- .jtag_id_base = IO_ADDRESS(DA8XX_JTAG_ID_REG),
.ids = da850_ids,
.ids_num = ARRAY_SIZE(da850_ids),
.cpu_clks = da850_clks,
.psc_bases = da850_psc_bases,
.psc_bases_num = ARRAY_SIZE(da850_psc_bases),
- .pinmux_base = IO_ADDRESS(DA8XX_BOOT_CFG_BASE + 0x120),
.pinmux_pins = da850_pins,
.pinmux_pins_num = ARRAY_SIZE(da850_pins),
.intc_base = (void __iomem *)DA8XX_CP_INTC_VIRT,
@@ -816,5 +1053,22 @@ static struct davinci_soc_info davinci_soc_info_da850 = {
void __init da850_init(void)
{
+ da8xx_syscfg_base = ioremap(DA8XX_SYSCFG_BASE, SZ_4K);
+ if (WARN(!da8xx_syscfg_base, "Unable to map syscfg module"))
+ return;
+
+ davinci_soc_info_da850.jtag_id_base =
+ DA8XX_SYSCFG_VIRT(DA8XX_JTAG_ID_REG);
+ davinci_soc_info_da850.pinmux_base = DA8XX_SYSCFG_VIRT(0x120);
+
davinci_common_init(&davinci_soc_info_da850);
+
+ /*
+ * Move the clock source of Async3 domain to PLL1 SYSCLK2.
+ * This helps keeping the peripherals on this domain insulated
+ * from CPU frequency changes caused by DVFS. The firmware sets
+ * both PLL0 and PLL1 to the same frequency so, there should not
+ * be any noticible change even in non-DVFS use cases.
+ */
+ da850_set_async3_src(1);
}
diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c
index 58ad5b66fd6..dd2d32c4ce8 100644
--- a/arch/arm/mach-davinci/devices-da8xx.c
+++ b/arch/arm/mach-davinci/devices-da8xx.c
@@ -10,8 +10,6 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
-#include <linux/module.h>
-#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
@@ -21,7 +19,7 @@
#include <mach/common.h>
#include <mach/time.h>
#include <mach/da8xx.h>
-#include <video/da8xx-fb.h>
+#include <mach/cpuidle.h>
#include "clock.h"
@@ -30,6 +28,7 @@
#define DA8XX_TPTC1_BASE 0x01c08400
#define DA8XX_WDOG_BASE 0x01c21000 /* DA8XX_TIMER64P1_BASE */
#define DA8XX_I2C0_BASE 0x01c22000
+#define DA8XX_RTC_BASE 0x01C23000
#define DA8XX_EMAC_CPPI_PORT_BASE 0x01e20000
#define DA8XX_EMAC_CPGMACSS_BASE 0x01e22000
#define DA8XX_EMAC_CPGMAC_BASE 0x01e23000
@@ -43,6 +42,8 @@
#define DA8XX_MDIO_REG_OFFSET 0x4000
#define DA8XX_EMAC_CTRL_RAM_SIZE SZ_8K
+void __iomem *da8xx_syscfg_base;
+
static struct plat_serial8250_port da8xx_serial_pdata[] = {
{
.mapbase = DA8XX_UART0_BASE,
@@ -282,6 +283,11 @@ static struct platform_device da8xx_emac_device = {
.resource = da8xx_emac_resources,
};
+int __init da8xx_register_emac(void)
+{
+ return platform_device_register(&da8xx_emac_device);
+}
+
static struct resource da830_mcasp1_resources[] = {
{
.name = "mcasp1",
@@ -338,12 +344,7 @@ static struct platform_device da850_mcasp_device = {
.resource = da850_mcasp_resources,
};
-int __init da8xx_register_emac(void)
-{
- return platform_device_register(&da8xx_emac_device);
-}
-
-void __init da8xx_init_mcasp(int id, struct snd_platform_data *pdata)
+void __init da8xx_register_mcasp(int id, struct snd_platform_data *pdata)
{
/* DA830/OMAP-L137 has 3 instances of McASP */
if (cpu_is_davinci_da830() && id == 1) {
@@ -379,10 +380,16 @@ static struct lcd_ctrl_config lcd_cfg = {
.raster_order = 0,
};
-static struct da8xx_lcdc_platform_data da850_evm_lcdc_pdata = {
- .manu_name = "sharp",
- .controller_data = &lcd_cfg,
- .type = "Sharp_LK043T1DG01",
+struct da8xx_lcdc_platform_data sharp_lcd035q3dg01_pdata = {
+ .manu_name = "sharp",
+ .controller_data = &lcd_cfg,
+ .type = "Sharp_LCD035Q3DG01",
+};
+
+struct da8xx_lcdc_platform_data sharp_lk043t1dg01_pdata = {
+ .manu_name = "sharp",
+ .controller_data = &lcd_cfg,
+ .type = "Sharp_LK043T1DG01",
};
static struct resource da8xx_lcdc_resources[] = {
@@ -398,19 +405,17 @@ static struct resource da8xx_lcdc_resources[] = {
},
};
-static struct platform_device da850_lcdc_device = {
+static struct platform_device da8xx_lcdc_device = {
.name = "da8xx_lcdc",
.id = 0,
.num_resources = ARRAY_SIZE(da8xx_lcdc_resources),
.resource = da8xx_lcdc_resources,
- .dev = {
- .platform_data = &da850_evm_lcdc_pdata,
- }
};
-int __init da8xx_register_lcdc(void)
+int __init da8xx_register_lcdc(struct da8xx_lcdc_platform_data *pdata)
{
- return platform_device_register(&da850_lcdc_device);
+ da8xx_lcdc_device.dev.platform_data = pdata;
+ return platform_device_register(&da8xx_lcdc_device);
}
static struct resource da8xx_mmcsd0_resources[] = {
@@ -448,3 +453,66 @@ int __init da8xx_register_mmcsd0(struct davinci_mmc_config *config)
da8xx_mmcsd0_device.dev.platform_data = config;
return platform_device_register(&da8xx_mmcsd0_device);
}
+
+static struct resource da8xx_rtc_resources[] = {
+ {
+ .start = DA8XX_RTC_BASE,
+ .end = DA8XX_RTC_BASE + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ { /* timer irq */
+ .start = IRQ_DA8XX_RTC,
+ .end = IRQ_DA8XX_RTC,
+ .flags = IORESOURCE_IRQ,
+ },
+ { /* alarm irq */
+ .start = IRQ_DA8XX_RTC,
+ .end = IRQ_DA8XX_RTC,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device da8xx_rtc_device = {
+ .name = "omap_rtc",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(da8xx_rtc_resources),
+ .resource = da8xx_rtc_resources,
+};
+
+int da8xx_register_rtc(void)
+{
+ /* Unlock the rtc's registers */
+ __raw_writel(0x83e70b13, IO_ADDRESS(DA8XX_RTC_BASE + 0x6c));
+ __raw_writel(0x95a4f1e0, IO_ADDRESS(DA8XX_RTC_BASE + 0x70));
+
+ return platform_device_register(&da8xx_rtc_device);
+}
+
+static struct resource da8xx_cpuidle_resources[] = {
+ {
+ .start = DA8XX_DDR2_CTL_BASE,
+ .end = DA8XX_DDR2_CTL_BASE + SZ_32K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+/* DA8XX devices support DDR2 power down */
+static struct davinci_cpuidle_config da8xx_cpuidle_pdata = {
+ .ddr2_pdown = 1,
+};
+
+
+static struct platform_device da8xx_cpuidle_device = {
+ .name = "cpuidle-davinci",
+ .num_resources = ARRAY_SIZE(da8xx_cpuidle_resources),
+ .resource = da8xx_cpuidle_resources,
+ .dev = {
+ .platform_data = &da8xx_cpuidle_pdata,
+ },
+};
+
+int __init da8xx_register_cpuidle(void)
+{
+ return platform_device_register(&da8xx_cpuidle_device);
+}
+
diff --git a/arch/arm/mach-davinci/devices.c b/arch/arm/mach-davinci/devices.c
index a55b650db71..147949650c2 100644
--- a/arch/arm/mach-davinci/devices.c
+++ b/arch/arm/mach-davinci/devices.c
@@ -9,15 +9,11 @@
* (at your option) any later version.
*/
-#include <linux/module.h>
-#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
-#include <asm/mach/map.h>
-
#include <mach/hardware.h>
#include <mach/i2c.h>
#include <mach/irqs.h>
@@ -177,7 +173,7 @@ void __init davinci_setup_mmc(int module, struct davinci_mmc_config *config)
mmcsd1_resources[0].start = DM365_MMCSD1_BASE;
mmcsd1_resources[0].end = DM365_MMCSD1_BASE +
SZ_4K - 1;
- mmcsd0_resources[2].start = IRQ_DM365_SDIOINT1;
+ mmcsd1_resources[2].start = IRQ_DM365_SDIOINT1;
} else
break;
diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c
index 059670018af..dedf4d4f3a2 100644
--- a/arch/arm/mach-davinci/dm355.c
+++ b/arch/arm/mach-davinci/dm355.c
@@ -8,7 +8,6 @@
* is licensed "as is" without any warranty of any kind, whether express
* or implied.
*/
-#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/clk.h>
#include <linux/serial_8250.h>
@@ -21,7 +20,6 @@
#include <asm/mach/map.h>
#include <mach/dm355.h>
-#include <mach/clock.h>
#include <mach/cputype.h>
#include <mach/edma.h>
#include <mach/psc.h>
diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c
index e8151743470..2ec619ec165 100644
--- a/arch/arm/mach-davinci/dm365.c
+++ b/arch/arm/mach-davinci/dm365.c
@@ -12,7 +12,6 @@
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
-#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/clk.h>
#include <linux/serial_8250.h>
@@ -23,7 +22,6 @@
#include <asm/mach/map.h>
#include <mach/dm365.h>
-#include <mach/clock.h>
#include <mach/cputype.h>
#include <mach/edma.h>
#include <mach/psc.h>
@@ -32,6 +30,8 @@
#include <mach/time.h>
#include <mach/serial.h>
#include <mach/common.h>
+#include <mach/asp.h>
+#include <mach/keyscan.h>
#include "clock.h"
#include "mux.h"
@@ -369,7 +369,7 @@ static struct clk timer3_clk = {
static struct clk usb_clk = {
.name = "usb",
- .parent = &pll2_sysclk1,
+ .parent = &pll1_aux_clk,
.lpsc = DAVINCI_LPSC_USB,
};
@@ -456,7 +456,7 @@ static struct davinci_clk dm365_clks[] = {
CLK(NULL, "usb", &usb_clk),
CLK("davinci_emac.1", NULL, &emac_clk),
CLK("voice_codec", NULL, &voicecodec_clk),
- CLK("soc-audio.0", NULL, &asp0_clk),
+ CLK("davinci-asp.0", NULL, &asp0_clk),
CLK(NULL, "rto", &rto_clk),
CLK(NULL, "mjcp", &mjcp_clk),
CLK(NULL, NULL, NULL),
@@ -531,7 +531,7 @@ MUX_CFG(DM365, EMAC_CRS, 3, 2, 1, 1, false)
MUX_CFG(DM365, EMAC_MDIO, 3, 1, 1, 1, false)
MUX_CFG(DM365, EMAC_MDCLK, 3, 0, 1, 1, false)
-MUX_CFG(DM365, KEYPAD, 2, 0, 0x3f, 0x3f, false)
+MUX_CFG(DM365, KEYSCAN, 2, 0, 0x3f, 0x3f, false)
MUX_CFG(DM365, PWM0, 1, 0, 3, 2, false)
MUX_CFG(DM365, PWM0_G23, 3, 26, 3, 3, false)
@@ -603,6 +603,9 @@ INT_CFG(DM365, INT_IMX1_ENABLE, 24, 1, 1, false)
INT_CFG(DM365, INT_IMX1_DISABLE, 24, 1, 0, false)
INT_CFG(DM365, INT_NSF_ENABLE, 25, 1, 1, false)
INT_CFG(DM365, INT_NSF_DISABLE, 25, 1, 0, false)
+
+EVT_CFG(DM365, EVT2_ASP_TX, 0, 1, 0, false)
+EVT_CFG(DM365, EVT3_ASP_RX, 1, 1, 0, false)
#endif
};
@@ -696,6 +699,7 @@ static u8 dm365_default_priorities[DAVINCI_N_AINTC_IRQ] = {
[IRQ_I2C] = 3,
[IRQ_UARTINT0] = 3,
[IRQ_UARTINT1] = 3,
+ [IRQ_DM365_RTCINT] = 3,
[IRQ_DM365_SPIINT0_0] = 3,
[IRQ_DM365_SPIINT3_0] = 3,
[IRQ_DM365_GPIO0] = 3,
@@ -806,6 +810,50 @@ static struct platform_device dm365_edma_device = {
.resource = edma_resources,
};
+static struct resource dm365_asp_resources[] = {
+ {
+ .start = DAVINCI_DM365_ASP0_BASE,
+ .end = DAVINCI_DM365_ASP0_BASE + SZ_8K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = DAVINCI_DMA_ASP0_TX,
+ .end = DAVINCI_DMA_ASP0_TX,
+ .flags = IORESOURCE_DMA,
+ },
+ {
+ .start = DAVINCI_DMA_ASP0_RX,
+ .end = DAVINCI_DMA_ASP0_RX,
+ .flags = IORESOURCE_DMA,
+ },
+};
+
+static struct platform_device dm365_asp_device = {
+ .name = "davinci-asp",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(dm365_asp_resources),
+ .resource = dm365_asp_resources,
+};
+
+static struct resource dm365_rtc_resources[] = {
+ {
+ .start = DM365_RTC_BASE,
+ .end = DM365_RTC_BASE + SZ_1K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = IRQ_DM365_RTCINT,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device dm365_rtc_device = {
+ .name = "rtc_davinci",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(dm365_rtc_resources),
+ .resource = dm365_rtc_resources,
+};
+
static struct map_desc dm365_io_desc[] = {
{
.virtual = IO_VIRT,
@@ -822,6 +870,28 @@ static struct map_desc dm365_io_desc[] = {
},
};
+static struct resource dm365_ks_resources[] = {
+ {
+ /* registers */
+ .start = DM365_KEYSCAN_BASE,
+ .end = DM365_KEYSCAN_BASE + SZ_1K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ /* interrupt */
+ .start = IRQ_DM365_KEYINT,
+ .end = IRQ_DM365_KEYINT,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device dm365_ks_device = {
+ .name = "davinci_keyscan",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(dm365_ks_resources),
+ .resource = dm365_ks_resources,
+};
+
/* Contents of JTAG ID register used to identify exact cpu type */
static struct davinci_id dm365_ids[] = {
{
@@ -907,6 +977,33 @@ static struct davinci_soc_info davinci_soc_info_dm365 = {
.sram_len = SZ_32K,
};
+void __init dm365_init_asp(struct snd_platform_data *pdata)
+{
+ davinci_cfg_reg(DM365_MCBSP0_BDX);
+ davinci_cfg_reg(DM365_MCBSP0_X);
+ davinci_cfg_reg(DM365_MCBSP0_BFSX);
+ davinci_cfg_reg(DM365_MCBSP0_BDR);
+ davinci_cfg_reg(DM365_MCBSP0_R);
+ davinci_cfg_reg(DM365_MCBSP0_BFSR);
+ davinci_cfg_reg(DM365_EVT2_ASP_TX);
+ davinci_cfg_reg(DM365_EVT3_ASP_RX);
+ dm365_asp_device.dev.platform_data = pdata;
+ platform_device_register(&dm365_asp_device);
+}
+
+void __init dm365_init_ks(struct davinci_ks_platform_data *pdata)
+{
+ davinci_cfg_reg(DM365_KEYSCAN);
+ dm365_ks_device.dev.platform_data = pdata;
+ platform_device_register(&dm365_ks_device);
+}
+
+void __init dm365_init_rtc(void)
+{
+ davinci_cfg_reg(DM365_INT_PRTCSS);
+ platform_device_register(&dm365_rtc_device);
+}
+
void __init dm365_init(void)
{
davinci_common_init(&davinci_soc_info_dm365);
diff --git a/arch/arm/mach-davinci/dm644x.c b/arch/arm/mach-davinci/dm644x.c
index d6e0fa5a8d8..2cd008156de 100644
--- a/arch/arm/mach-davinci/dm644x.c
+++ b/arch/arm/mach-davinci/dm644x.c
@@ -8,7 +8,6 @@
* is licensed "as is" without any warranty of any kind, whether express
* or implied.
*/
-#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/clk.h>
#include <linux/serial_8250.h>
@@ -18,7 +17,6 @@
#include <asm/mach/map.h>
#include <mach/dm644x.h>
-#include <mach/clock.h>
#include <mach/cputype.h>
#include <mach/edma.h>
#include <mach/irqs.h>
@@ -370,6 +368,11 @@ MUX_CFG(DM644X, ATAEN_DISABLE, 0, 17, 1, 0, true)
MUX_CFG(DM644X, HPIEN_DISABLE, 0, 29, 1, 0, true)
MUX_CFG(DM644X, AEAW, 0, 0, 31, 31, true)
+MUX_CFG(DM644X, AEAW0, 0, 0, 1, 0, true)
+MUX_CFG(DM644X, AEAW1, 0, 1, 1, 0, true)
+MUX_CFG(DM644X, AEAW2, 0, 2, 1, 0, true)
+MUX_CFG(DM644X, AEAW3, 0, 3, 1, 0, true)
+MUX_CFG(DM644X, AEAW4, 0, 4, 1, 0, true)
MUX_CFG(DM644X, MSTK, 1, 9, 1, 0, false)
diff --git a/arch/arm/mach-davinci/dm646x.c b/arch/arm/mach-davinci/dm646x.c
index 0976049c7b3..829a44bcf79 100644
--- a/arch/arm/mach-davinci/dm646x.c
+++ b/arch/arm/mach-davinci/dm646x.c
@@ -8,7 +8,6 @@
* is licensed "as is" without any warranty of any kind, whether express
* or implied.
*/
-#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/clk.h>
#include <linux/serial_8250.h>
@@ -18,7 +17,6 @@
#include <asm/mach/map.h>
#include <mach/dm646x.h>
-#include <mach/clock.h>
#include <mach/cputype.h>
#include <mach/edma.h>
#include <mach/irqs.h>
@@ -789,7 +787,14 @@ static struct davinci_id dm646x_ids[] = {
.part_no = 0xb770,
.manufacturer = 0x017,
.cpu_id = DAVINCI_CPU_ID_DM6467,
- .name = "dm6467",
+ .name = "dm6467_rev1.x",
+ },
+ {
+ .variant = 0x1,
+ .part_no = 0xb770,
+ .manufacturer = 0x017,
+ .cpu_id = DAVINCI_CPU_ID_DM6467,
+ .name = "dm6467_rev3.x",
},
};
diff --git a/arch/arm/mach-davinci/dma.c b/arch/arm/mach-davinci/dma.c
index f2e57d27295..648fbb760ae 100644
--- a/arch/arm/mach-davinci/dma.c
+++ b/arch/arm/mach-davinci/dma.c
@@ -18,22 +18,13 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
-#include <linux/sched.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
-#include <linux/spinlock.h>
-#include <linux/compiler.h>
#include <linux/io.h>
-#include <mach/cputype.h>
-#include <mach/memory.h>
-#include <mach/hardware.h>
-#include <mach/irqs.h>
#include <mach/edma.h>
-#include <mach/mux.h>
-
/* Offsets matching "struct edmacc_param" */
#define PARM_OPT 0x00
@@ -509,43 +500,59 @@ static irqreturn_t dma_tc1err_handler(int irq, void *data)
return IRQ_HANDLED;
}
-static int reserve_contiguous_params(int ctlr, unsigned int id,
- unsigned int num_params,
- unsigned int start_param)
+static int reserve_contiguous_slots(int ctlr, unsigned int id,
+ unsigned int num_slots,
+ unsigned int start_slot)
{
int i, j;
- unsigned int count = num_params;
+ unsigned int count = num_slots;
+ int stop_slot = start_slot;
+ DECLARE_BITMAP(tmp_inuse, EDMA_MAX_PARAMENTRY);
- for (i = start_param; i < edma_info[ctlr]->num_slots; ++i) {
+ for (i = start_slot; i < edma_info[ctlr]->num_slots; ++i) {
j = EDMA_CHAN_SLOT(i);
- if (!test_and_set_bit(j, edma_info[ctlr]->edma_inuse))
+ if (!test_and_set_bit(j, edma_info[ctlr]->edma_inuse)) {
+ /* Record our current beginning slot */
+ if (count == num_slots)
+ stop_slot = i;
+
count--;
+ set_bit(j, tmp_inuse);
+
if (count == 0)
break;
- else if (id == EDMA_CONT_PARAMS_FIXED_EXACT)
- break;
- else
- count = num_params;
+ } else {
+ clear_bit(j, tmp_inuse);
+
+ if (id == EDMA_CONT_PARAMS_FIXED_EXACT) {
+ stop_slot = i;
+ break;
+ } else
+ count = num_slots;
+ }
}
/*
* We have to clear any bits that we set
- * if we run out parameter RAMs, i.e we do find a set
- * of contiguous parameter RAMs but do not find the exact number
- * requested as we may reach the total number of parameter RAMs
+ * if we run out parameter RAM slots, i.e we do find a set
+ * of contiguous parameter RAM slots but do not find the exact number
+ * requested as we may reach the total number of parameter RAM slots
*/
- if (count) {
- for (j = i - num_params + count + 1; j <= i ; ++j)
+ if (i == edma_info[ctlr]->num_slots)
+ stop_slot = i;
+
+ for (j = start_slot; j < stop_slot; j++)
+ if (test_bit(j, tmp_inuse))
clear_bit(j, edma_info[ctlr]->edma_inuse);
+ if (count)
return -EBUSY;
- }
- for (j = i - num_params + 1; j <= i; ++j)
+ for (j = i - num_slots + 1; j <= i; ++j)
memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(j),
&dummy_paramset, PARM_SIZE);
- return EDMA_CTLR_CHAN(ctlr, i - num_params + 1);
+ return EDMA_CTLR_CHAN(ctlr, i - num_slots + 1);
}
/*-----------------------------------------------------------------------*/
@@ -743,26 +750,27 @@ EXPORT_SYMBOL(edma_free_slot);
/**
* edma_alloc_cont_slots- alloc contiguous parameter RAM slots
* The API will return the starting point of a set of
- * contiguous PARAM's that have been requested
+ * contiguous parameter RAM slots that have been requested
*
* @id: can only be EDMA_CONT_PARAMS_ANY or EDMA_CONT_PARAMS_FIXED_EXACT
* or EDMA_CONT_PARAMS_FIXED_NOT_EXACT
- * @count: number of contiguous Paramter RAM's
- * @param - the start value of Parameter RAM that should be passed if id
+ * @count: number of contiguous Paramter RAM slots
+ * @slot - the start value of Parameter RAM slot that should be passed if id
* is EDMA_CONT_PARAMS_FIXED_EXACT or EDMA_CONT_PARAMS_FIXED_NOT_EXACT
*
* If id is EDMA_CONT_PARAMS_ANY then the API starts looking for a set of
- * contiguous Parameter RAMs from parameter RAM 64 in the case of DaVinci SOCs
- * and 32 in the case of Primus
+ * contiguous Parameter RAM slots from parameter RAM 64 in the case of
+ * DaVinci SOCs and 32 in the case of DA8xx SOCs.
*
* If id is EDMA_CONT_PARAMS_FIXED_EXACT then the API starts looking for a
- * set of contiguous parameter RAMs from the "param" that is passed as an
+ * set of contiguous parameter RAM slots from the "slot" that is passed as an
* argument to the API.
*
* If id is EDMA_CONT_PARAMS_FIXED_NOT_EXACT then the API initially tries
- * starts looking for a set of contiguous parameter RAMs from the "param"
+ * starts looking for a set of contiguous parameter RAMs from the "slot"
* that is passed as an argument to the API. On failure the API will try to
- * find a set of contiguous Parameter RAMs in the remaining Parameter RAMs
+ * find a set of contiguous Parameter RAM slots from the remaining Parameter
+ * RAM slots
*/
int edma_alloc_cont_slots(unsigned ctlr, unsigned int id, int slot, int count)
{
@@ -771,12 +779,13 @@ int edma_alloc_cont_slots(unsigned ctlr, unsigned int id, int slot, int count)
* the number of channels and lesser than the total number
* of slots
*/
- if (slot < edma_info[ctlr]->num_channels ||
- slot >= edma_info[ctlr]->num_slots)
+ if ((id != EDMA_CONT_PARAMS_ANY) &&
+ (slot < edma_info[ctlr]->num_channels ||
+ slot >= edma_info[ctlr]->num_slots))
return -EINVAL;
/*
- * The number of parameter RAMs requested cannot be less than 1
+ * The number of parameter RAM slots requested cannot be less than 1
* and cannot be more than the number of slots minus the number of
* channels
*/
@@ -786,11 +795,11 @@ int edma_alloc_cont_slots(unsigned ctlr, unsigned int id, int slot, int count)
switch (id) {
case EDMA_CONT_PARAMS_ANY:
- return reserve_contiguous_params(ctlr, id, count,
+ return reserve_contiguous_slots(ctlr, id, count,
edma_info[ctlr]->num_channels);
case EDMA_CONT_PARAMS_FIXED_EXACT:
case EDMA_CONT_PARAMS_FIXED_NOT_EXACT:
- return reserve_contiguous_params(ctlr, id, count, slot);
+ return reserve_contiguous_slots(ctlr, id, count, slot);
default:
return -EINVAL;
}
@@ -799,21 +808,21 @@ int edma_alloc_cont_slots(unsigned ctlr, unsigned int id, int slot, int count)
EXPORT_SYMBOL(edma_alloc_cont_slots);
/**
- * edma_free_cont_slots - deallocate DMA parameter RAMs
- * @slot: first parameter RAM of a set of parameter RAMs to be freed
- * @count: the number of contiguous parameter RAMs to be freed
+ * edma_free_cont_slots - deallocate DMA parameter RAM slots
+ * @slot: first parameter RAM of a set of parameter RAM slots to be freed
+ * @count: the number of contiguous parameter RAM slots to be freed
*
* This deallocates the parameter RAM slots allocated by
* edma_alloc_cont_slots.
* Callers/applications need to keep track of sets of contiguous
- * parameter RAMs that have been allocated using the edma_alloc_cont_slots
+ * parameter RAM slots that have been allocated using the edma_alloc_cont_slots
* API.
* Callers are responsible for ensuring the slots are inactive, and will
* not be activated.
*/
int edma_free_cont_slots(unsigned slot, int count)
{
- unsigned ctlr;
+ unsigned ctlr, slot_to_free;
int i;
ctlr = EDMA_CTLR(slot);
@@ -826,11 +835,11 @@ int edma_free_cont_slots(unsigned slot, int count)
for (i = slot; i < slot + count; ++i) {
ctlr = EDMA_CTLR(i);
- slot = EDMA_CHAN_SLOT(i);
+ slot_to_free = EDMA_CHAN_SLOT(i);
- memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot),
+ memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot_to_free),
&dummy_paramset, PARM_SIZE);
- clear_bit(slot, edma_info[ctlr]->edma_inuse);
+ clear_bit(slot_to_free, edma_info[ctlr]->edma_inuse);
}
return 0;
diff --git a/arch/arm/mach-davinci/gpio.c b/arch/arm/mach-davinci/gpio.c
index f6ea9db11f4..744755b5323 100644
--- a/arch/arm/mach-davinci/gpio.c
+++ b/arch/arm/mach-davinci/gpio.c
@@ -12,23 +12,14 @@
#include <linux/errno.h>
#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/module.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
-#include <linux/irq.h>
-#include <linux/bitops.h>
-#include <mach/cputype.h>
-#include <mach/irqs.h>
-#include <mach/hardware.h>
-#include <mach/common.h>
#include <mach/gpio.h>
#include <asm/mach/irq.h>
-
static DEFINE_SPINLOCK(gpio_lock);
struct davinci_gpio {
diff --git a/arch/arm/mach-davinci/include/mach/asp.h b/arch/arm/mach-davinci/include/mach/asp.h
index e07f70ed7c5..834725f1e81 100644
--- a/arch/arm/mach-davinci/include/mach/asp.h
+++ b/arch/arm/mach-davinci/include/mach/asp.h
@@ -11,6 +11,9 @@
#define DAVINCI_ASP0_BASE 0x01E02000
#define DAVINCI_ASP1_BASE 0x01E04000
+/* Bases of dm365 register banks */
+#define DAVINCI_DM365_ASP0_BASE 0x01D02000
+
/* Bases of dm646x register banks */
#define DAVINCI_DM646X_MCASP0_REG_BASE 0x01D01000
#define DAVINCI_DM646X_MCASP1_REG_BASE 0x01D01800
diff --git a/arch/arm/mach-davinci/include/mach/common.h b/arch/arm/mach-davinci/include/mach/common.h
index 1fd3917cae4..6ca2c9a0a48 100644
--- a/arch/arm/mach-davinci/include/mach/common.h
+++ b/arch/arm/mach-davinci/include/mach/common.h
@@ -20,12 +20,6 @@ extern void davinci_irq_init(void);
extern void __iomem *davinci_intc_base;
extern int davinci_intc_type;
-/* parameters describe VBUS sourcing for host mode */
-extern void setup_usb(unsigned mA, unsigned potpgt_msec);
-
-/* parameters describe VBUS sourcing for host mode */
-extern void setup_usb(unsigned mA, unsigned potpgt_msec);
-
struct davinci_timer_instance {
void __iomem *base;
u32 bottom_irq;
diff --git a/arch/arm/mach-davinci/include/mach/cpufreq.h b/arch/arm/mach-davinci/include/mach/cpufreq.h
new file mode 100644
index 00000000000..3c089cfb6cd
--- /dev/null
+++ b/arch/arm/mach-davinci/include/mach/cpufreq.h
@@ -0,0 +1,26 @@
+/*
+ * TI DaVinci CPUFreq platform support.
+ *
+ * Copyright (C) 2009 Texas Instruments, Inc. http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _MACH_DAVINCI_CPUFREQ_H
+#define _MACH_DAVINCI_CPUFREQ_H
+
+#include <linux/cpufreq.h>
+
+struct davinci_cpufreq_config {
+ struct cpufreq_frequency_table *freq_table;
+ int (*set_voltage) (unsigned int index);
+ int (*init) (void);
+};
+
+#endif
diff --git a/arch/arm/mach-davinci/include/mach/cpuidle.h b/arch/arm/mach-davinci/include/mach/cpuidle.h
new file mode 100644
index 00000000000..cbfc6a9c81b
--- /dev/null
+++ b/arch/arm/mach-davinci/include/mach/cpuidle.h
@@ -0,0 +1,17 @@
+/*
+ * TI DaVinci cpuidle platform support
+ *
+ * 2009 (C) Texas Instruments, Inc. http://www.ti.com/
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+#ifndef _MACH_DAVINCI_CPUIDLE_H
+#define _MACH_DAVINCI_CPUIDLE_H
+
+struct davinci_cpuidle_config {
+ u32 ddr2_pdown;
+};
+
+#endif
diff --git a/arch/arm/mach-davinci/include/mach/da8xx.h b/arch/arm/mach-davinci/include/mach/da8xx.h
index d4095d0572c..90704910d34 100644
--- a/arch/arm/mach-davinci/include/mach/da8xx.h
+++ b/arch/arm/mach-davinci/include/mach/da8xx.h
@@ -11,12 +11,17 @@
#ifndef __ASM_ARCH_DAVINCI_DA8XX_H
#define __ASM_ARCH_DAVINCI_DA8XX_H
+#include <video/da8xx-fb.h>
+
#include <mach/serial.h>
#include <mach/edma.h>
#include <mach/i2c.h>
#include <mach/emac.h>
#include <mach/asp.h>
#include <mach/mmc.h>
+#include <mach/usb.h>
+
+extern void __iomem *da8xx_syscfg_base;
/*
* The cp_intc interrupt controller for the da8xx isn't in the same
@@ -29,11 +34,15 @@
#define DA8XX_CP_INTC_SIZE SZ_8K
#define DA8XX_CP_INTC_VIRT (IO_VIRT - DA8XX_CP_INTC_SIZE - SZ_4K)
-#define DA8XX_BOOT_CFG_BASE (IO_PHYS + 0x14000)
+#define DA8XX_SYSCFG_BASE (IO_PHYS + 0x14000)
+#define DA8XX_SYSCFG_VIRT(x) (da8xx_syscfg_base + (x))
+#define DA8XX_JTAG_ID_REG 0x18
+#define DA8XX_CFGCHIP0_REG 0x17c
+#define DA8XX_CFGCHIP2_REG 0x184
+#define DA8XX_CFGCHIP3_REG 0x188
#define DA8XX_PSC0_BASE 0x01c10000
#define DA8XX_PLL0_BASE 0x01c11000
-#define DA8XX_JTAG_ID_REG 0x01c14018
#define DA8XX_TIMER64P0_BASE 0x01c20000
#define DA8XX_TIMER64P1_BASE 0x01c21000
#define DA8XX_GPIO_BASE 0x01e26000
@@ -43,6 +52,7 @@
#define DA8XX_AEMIF_CS2_BASE 0x60000000
#define DA8XX_AEMIF_CS3_BASE 0x62000000
#define DA8XX_AEMIF_CTL_BASE 0x68000000
+#define DA8XX_DDR2_CTL_BASE 0xb0000000
#define PINMUX0 0x00
#define PINMUX1 0x04
@@ -71,13 +81,20 @@ void __init da850_init(void);
int da8xx_register_edma(void);
int da8xx_register_i2c(int instance, struct davinci_i2c_platform_data *pdata);
int da8xx_register_watchdog(void);
+int da8xx_register_usb20(unsigned mA, unsigned potpgt);
+int da8xx_register_usb11(struct da8xx_ohci_root_hub *pdata);
int da8xx_register_emac(void);
-int da8xx_register_lcdc(void);
+int da8xx_register_lcdc(struct da8xx_lcdc_platform_data *pdata);
int da8xx_register_mmcsd0(struct davinci_mmc_config *config);
-void __init da8xx_init_mcasp(int id, struct snd_platform_data *pdata);
+void __init da8xx_register_mcasp(int id, struct snd_platform_data *pdata);
+int da8xx_register_rtc(void);
+int da850_register_cpufreq(void);
+int da8xx_register_cpuidle(void);
extern struct platform_device da8xx_serial_device;
extern struct emac_platform_data da8xx_emac_pdata;
+extern struct da8xx_lcdc_platform_data sharp_lcd035q3dg01_pdata;
+extern struct da8xx_lcdc_platform_data sharp_lk043t1dg01_pdata;
extern const short da830_emif25_pins[];
extern const short da830_spi0_pins[];
@@ -110,6 +127,7 @@ extern const short da850_uart2_pins[];
extern const short da850_i2c0_pins[];
extern const short da850_i2c1_pins[];
extern const short da850_cpgmac_pins[];
+extern const short da850_rmii_pins[];
extern const short da850_mcasp_pins[];
extern const short da850_lcdcntl_pins[];
extern const short da850_mmcsd0_pins[];
diff --git a/arch/arm/mach-davinci/include/mach/dm365.h b/arch/arm/mach-davinci/include/mach/dm365.h
index 09db4343bb4..f1710a30e7b 100644
--- a/arch/arm/mach-davinci/include/mach/dm365.h
+++ b/arch/arm/mach-davinci/include/mach/dm365.h
@@ -16,6 +16,8 @@
#include <linux/platform_device.h>
#include <mach/hardware.h>
#include <mach/emac.h>
+#include <mach/asp.h>
+#include <mach/keyscan.h>
#define DM365_EMAC_BASE (0x01D07000)
#define DM365_EMAC_CNTRL_OFFSET (0x0000)
@@ -24,6 +26,14 @@
#define DM365_EMAC_MDIO_OFFSET (0x4000)
#define DM365_EMAC_CNTRL_RAM_SIZE (0x2000)
+/* Base of key scan register bank */
+#define DM365_KEYSCAN_BASE (0x01C69400)
+
+#define DM365_RTC_BASE (0x01C69000)
+
void __init dm365_init(void);
+void __init dm365_init_asp(struct snd_platform_data *pdata);
+void __init dm365_init_ks(struct davinci_ks_platform_data *pdata);
+void __init dm365_init_rtc(void);
#endif /* __ASM_ARCH_DM365_H */
diff --git a/arch/arm/mach-davinci/include/mach/dm644x.h b/arch/arm/mach-davinci/include/mach/dm644x.h
index 0efb73852c2..44e8f0fae9e 100644
--- a/arch/arm/mach-davinci/include/mach/dm644x.h
+++ b/arch/arm/mach-davinci/include/mach/dm644x.h
@@ -22,7 +22,6 @@
#ifndef __ASM_ARCH_DM644X_H
#define __ASM_ARCH_DM644X_H
-#include <linux/platform_device.h>
#include <mach/hardware.h>
#include <mach/emac.h>
#include <mach/asp.h>
diff --git a/arch/arm/mach-davinci/include/mach/irqs.h b/arch/arm/mach-davinci/include/mach/irqs.h
index 3c918a77261..354af71798d 100644
--- a/arch/arm/mach-davinci/include/mach/irqs.h
+++ b/arch/arm/mach-davinci/include/mach/irqs.h
@@ -217,6 +217,7 @@
#define IRQ_DM365_SDIOINT0 23
#define IRQ_DM365_MMCINT1 27
#define IRQ_DM365_PWMINT3 28
+#define IRQ_DM365_RTCINT 29
#define IRQ_DM365_SDIOINT1 31
#define IRQ_DM365_SPIINT0_0 42
#define IRQ_DM365_SPIINT3_0 43
diff --git a/arch/arm/mach-davinci/include/mach/mux.h b/arch/arm/mach-davinci/include/mach/mux.h
index bb84893a4e8..b60c693985f 100644
--- a/arch/arm/mach-davinci/include/mach/mux.h
+++ b/arch/arm/mach-davinci/include/mach/mux.h
@@ -40,6 +40,11 @@ enum davinci_dm644x_index {
/* AEAW functions */
DM644X_AEAW,
+ DM644X_AEAW0,
+ DM644X_AEAW1,
+ DM644X_AEAW2,
+ DM644X_AEAW3,
+ DM644X_AEAW4,
/* Memory Stick */
DM644X_MSTK,
@@ -237,8 +242,8 @@ enum davinci_dm365_index {
DM365_EMAC_MDIO,
DM365_EMAC_MDCLK,
- /* Keypad */
- DM365_KEYPAD,
+ /* Key Scan */
+ DM365_KEYSCAN,
/* PWM */
DM365_PWM0,
@@ -774,6 +779,14 @@ enum davinci_da850_index {
DA850_MII_RXD_0,
DA850_MDIO_CLK,
DA850_MDIO_D,
+ DA850_RMII_TXD_0,
+ DA850_RMII_TXD_1,
+ DA850_RMII_TXEN,
+ DA850_RMII_CRS_DV,
+ DA850_RMII_RXD_0,
+ DA850_RMII_RXD_1,
+ DA850_RMII_RXER,
+ DA850_RMII_MHZ_50_CLK,
/* McASP function */
DA850_ACLKR,
@@ -881,8 +894,9 @@ enum davinci_da850_index {
DA850_NEMA_CS_2,
/* GPIO function */
+ DA850_GPIO2_6,
+ DA850_GPIO2_8,
DA850_GPIO2_15,
- DA850_GPIO8_10,
DA850_GPIO4_0,
DA850_GPIO4_1,
};
diff --git a/arch/arm/mach-davinci/include/mach/system.h b/arch/arm/mach-davinci/include/mach/system.h
index 8e4f10fe126..5a7d7581b8c 100644
--- a/arch/arm/mach-davinci/include/mach/system.h
+++ b/arch/arm/mach-davinci/include/mach/system.h
@@ -11,9 +11,6 @@
#ifndef __ASM_ARCH_SYSTEM_H
#define __ASM_ARCH_SYSTEM_H
-#include <linux/io.h>
-#include <mach/hardware.h>
-
extern void davinci_watchdog_reset(void);
static inline void arch_idle(void)
diff --git a/arch/arm/mach-davinci/include/mach/usb.h b/arch/arm/mach-davinci/include/mach/usb.h
new file mode 100644
index 00000000000..e0bc4abe69c
--- /dev/null
+++ b/arch/arm/mach-davinci/include/mach/usb.h
@@ -0,0 +1,59 @@
+/*
+ * USB related definitions
+ *
+ * Copyright (C) 2009 MontaVista Software, Inc. <source@mvista.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#ifndef __ASM_ARCH_USB_H
+#define __ASM_ARCH_USB_H
+
+/* DA8xx CFGCHIP2 (USB 2.0 PHY Control) register bits */
+#define CFGCHIP2_PHYCLKGD (1 << 17)
+#define CFGCHIP2_VBUSSENSE (1 << 16)
+#define CFGCHIP2_RESET (1 << 15)
+#define CFGCHIP2_OTGMODE (3 << 13)
+#define CFGCHIP2_NO_OVERRIDE (0 << 13)
+#define CFGCHIP2_FORCE_HOST (1 << 13)
+#define CFGCHIP2_FORCE_DEVICE (2 << 13)
+#define CFGCHIP2_FORCE_HOST_VBUS_LOW (3 << 13)
+#define CFGCHIP2_USB1PHYCLKMUX (1 << 12)
+#define CFGCHIP2_USB2PHYCLKMUX (1 << 11)
+#define CFGCHIP2_PHYPWRDN (1 << 10)
+#define CFGCHIP2_OTGPWRDN (1 << 9)
+#define CFGCHIP2_DATPOL (1 << 8)
+#define CFGCHIP2_USB1SUSPENDM (1 << 7)
+#define CFGCHIP2_PHY_PLLON (1 << 6) /* override PLL suspend */
+#define CFGCHIP2_SESENDEN (1 << 5) /* Vsess_end comparator */
+#define CFGCHIP2_VBDTCTEN (1 << 4) /* Vbus comparator */
+#define CFGCHIP2_REFFREQ (0xf << 0)
+#define CFGCHIP2_REFFREQ_12MHZ (1 << 0)
+#define CFGCHIP2_REFFREQ_24MHZ (2 << 0)
+#define CFGCHIP2_REFFREQ_48MHZ (3 << 0)
+
+struct da8xx_ohci_root_hub;
+
+typedef void (*da8xx_ocic_handler_t)(struct da8xx_ohci_root_hub *hub,
+ unsigned port);
+
+/* Passed as the platform data to the OHCI driver */
+struct da8xx_ohci_root_hub {
+ /* Switch the port power on/off */
+ int (*set_power)(unsigned port, int on);
+ /* Read the port power status */
+ int (*get_power)(unsigned port);
+ /* Read the port over-current indicator */
+ int (*get_oci)(unsigned port);
+ /* Over-current indicator change notification (pass NULL to disable) */
+ int (*ocic_notify)(da8xx_ocic_handler_t handler);
+
+ /* Time from power on to power good (in 2 ms units) */
+ u8 potpgt;
+};
+
+void davinci_setup_usb(unsigned mA, unsigned potpgt_ms);
+
+#endif /* ifndef __ASM_ARCH_USB_H */
diff --git a/arch/arm/mach-davinci/mux.c b/arch/arm/mach-davinci/mux.c
index 898905e4894..f757e83415f 100644
--- a/arch/arm/mach-davinci/mux.c
+++ b/arch/arm/mach-davinci/mux.c
@@ -19,7 +19,6 @@
#include <linux/module.h>
#include <linux/spinlock.h>
-#include <mach/hardware.h>
#include <mach/mux.h>
#include <mach/common.h>
diff --git a/arch/arm/mach-davinci/psc.c b/arch/arm/mach-davinci/psc.c
index a78b657e916..04a3cb72c5a 100644
--- a/arch/arm/mach-davinci/psc.c
+++ b/arch/arm/mach-davinci/psc.c
@@ -19,14 +19,11 @@
*
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/init.h>
#include <linux/io.h>
#include <mach/cputype.h>
-#include <mach/hardware.h>
#include <mach/psc.h>
-#include <mach/mux.h>
/* PSC register offsets */
#define EPCPR 0x070
diff --git a/arch/arm/mach-davinci/serial.c b/arch/arm/mach-davinci/serial.c
index c530c7333d0..7ce5ba08657 100644
--- a/arch/arm/mach-davinci/serial.c
+++ b/arch/arm/mach-davinci/serial.c
@@ -28,14 +28,8 @@
#include <linux/clk.h>
#include <linux/io.h>
-#include <asm/irq.h>
-#include <mach/hardware.h>
#include <mach/serial.h>
-#include <mach/irqs.h>
#include <mach/cputype.h>
-#include <mach/common.h>
-
-#include "clock.h"
static inline unsigned int serial_read_reg(struct plat_serial8250_port *up,
int offset)
diff --git a/arch/arm/mach-davinci/sram.c b/arch/arm/mach-davinci/sram.c
index 4f1fc9b318b..db0f7787faf 100644
--- a/arch/arm/mach-davinci/sram.c
+++ b/arch/arm/mach-davinci/sram.c
@@ -9,15 +9,12 @@
* (at your option) any later version.
*/
#include <linux/module.h>
-#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/genalloc.h>
#include <mach/common.h>
-#include <mach/memory.h>
#include <mach/sram.h>
-
static struct gen_pool *sram_pool;
void *sram_alloc(size_t len, dma_addr_t *dma)
diff --git a/arch/arm/mach-davinci/time.c b/arch/arm/mach-davinci/time.c
index 0d1b6d407b4..42d985beece 100644
--- a/arch/arm/mach-davinci/time.c
+++ b/arch/arm/mach-davinci/time.c
@@ -14,20 +14,14 @@
#include <linux/interrupt.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
-#include <linux/spinlock.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/err.h>
-#include <linux/device.h>
#include <linux/platform_device.h>
#include <mach/hardware.h>
-#include <asm/system.h>
-#include <asm/irq.h>
#include <asm/mach/irq.h>
#include <asm/mach/time.h>
-#include <asm/errno.h>
-#include <mach/io.h>
#include <mach/cputype.h>
#include <mach/time.h>
#include "clock.h"
diff --git a/arch/arm/mach-davinci/usb.c b/arch/arm/mach-davinci/usb.c
index 06f55931620..31f0cbea0ca 100644
--- a/arch/arm/mach-davinci/usb.c
+++ b/arch/arm/mach-davinci/usb.c
@@ -1,21 +1,21 @@
/*
* USB
*/
-#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/usb/musb.h>
-#include <linux/usb/otg.h>
#include <mach/common.h>
-#include <mach/hardware.h>
#include <mach/irqs.h>
#include <mach/cputype.h>
+#include <mach/usb.h>
-#define DAVINCI_USB_OTG_BASE 0x01C64000
+#define DAVINCI_USB_OTG_BASE 0x01c64000
+
+#define DA8XX_USB0_BASE 0x01e00000
+#define DA8XX_USB1_BASE 0x01e25000
#if defined(CONFIG_USB_MUSB_HDRC) || defined(CONFIG_USB_MUSB_HDRC_MODULE)
static struct musb_hdrc_eps_bits musb_eps[] = {
@@ -85,10 +85,10 @@ static struct platform_device usb_dev = {
.num_resources = ARRAY_SIZE(usb_resources),
};
-void __init setup_usb(unsigned mA, unsigned potpgt_msec)
+void __init davinci_setup_usb(unsigned mA, unsigned potpgt_ms)
{
- usb_data.power = mA / 2;
- usb_data.potpgt = potpgt_msec / 2;
+ usb_data.power = mA > 510 ? 255 : mA / 2;
+ usb_data.potpgt = (potpgt_ms + 1) / 2;
if (cpu_is_davinci_dm646x()) {
/* Override the defaults as DM6467 uses different IRQs. */
@@ -100,11 +100,77 @@ void __init setup_usb(unsigned mA, unsigned potpgt_msec)
platform_device_register(&usb_dev);
}
+#ifdef CONFIG_ARCH_DAVINCI_DA8XX
+static struct resource da8xx_usb20_resources[] = {
+ {
+ .start = DA8XX_USB0_BASE,
+ .end = DA8XX_USB0_BASE + SZ_64K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = IRQ_DA8XX_USB_INT,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+int __init da8xx_register_usb20(unsigned mA, unsigned potpgt)
+{
+ usb_data.clock = "usb20";
+ usb_data.power = mA > 510 ? 255 : mA / 2;
+ usb_data.potpgt = (potpgt + 1) / 2;
+
+ usb_dev.resource = da8xx_usb20_resources;
+ usb_dev.num_resources = ARRAY_SIZE(da8xx_usb20_resources);
+
+ return platform_device_register(&usb_dev);
+}
+#endif /* CONFIG_DAVINCI_DA8XX */
+
#else
-void __init setup_usb(unsigned mA, unsigned potpgt_msec)
+void __init davinci_setup_usb(unsigned mA, unsigned potpgt_ms)
{
}
+#ifdef CONFIG_ARCH_DAVINCI_DA8XX
+int __init da8xx_register_usb20(unsigned mA, unsigned potpgt)
+{
+ return 0;
+}
+#endif
+
#endif /* CONFIG_USB_MUSB_HDRC */
+#ifdef CONFIG_ARCH_DAVINCI_DA8XX
+static struct resource da8xx_usb11_resources[] = {
+ [0] = {
+ .start = DA8XX_USB1_BASE,
+ .end = DA8XX_USB1_BASE + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = IRQ_DA8XX_IRQN,
+ .end = IRQ_DA8XX_IRQN,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static u64 da8xx_usb11_dma_mask = DMA_BIT_MASK(32);
+
+static struct platform_device da8xx_usb11_device = {
+ .name = "ohci",
+ .id = 0,
+ .dev = {
+ .dma_mask = &da8xx_usb11_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+ .num_resources = ARRAY_SIZE(da8xx_usb11_resources),
+ .resource = da8xx_usb11_resources,
+};
+
+int __init da8xx_register_usb11(struct da8xx_ohci_root_hub *pdata)
+{
+ da8xx_usb11_device.dev.platform_data = pdata;
+ return platform_device_register(&da8xx_usb11_device);
+}
+#endif /* CONFIG_DAVINCI_DA8XX */
diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c
index 5cc7e2e9e41..45876427eb2 100644
--- a/arch/blackfin/kernel/process.c
+++ b/arch/blackfin/kernel/process.c
@@ -215,22 +215,18 @@ copy_thread(unsigned long clone_flags,
/*
* sys_execve() executes a new program.
*/
-
asmlinkage int sys_execve(char __user *name, char __user * __user *argv, char __user * __user *envp)
{
int error;
char *filename;
struct pt_regs *regs = (struct pt_regs *)((&name) + 6);
- lock_kernel();
filename = getname(name);
error = PTR_ERR(filename);
if (IS_ERR(filename))
- goto out;
+ return error;
error = do_execve(filename, argv, envp, regs);
putname(filename);
- out:
- unlock_kernel();
return error;
}
diff --git a/arch/frv/kernel/process.c b/arch/frv/kernel/process.c
index 90425593821..21d0fd19276 100644
--- a/arch/frv/kernel/process.c
+++ b/arch/frv/kernel/process.c
@@ -255,15 +255,12 @@ asmlinkage int sys_execve(char __user *name, char __user * __user *argv, char __
int error;
char * filename;
- lock_kernel();
filename = getname(name);
error = PTR_ERR(filename);
if (IS_ERR(filename))
- goto out;
+ return error;
error = do_execve(filename, argv, envp, __frame);
putname(filename);
- out:
- unlock_kernel();
return error;
}
diff --git a/arch/h8300/kernel/process.c b/arch/h8300/kernel/process.c
index e2f33d0f996..bd883faa983 100644
--- a/arch/h8300/kernel/process.c
+++ b/arch/h8300/kernel/process.c
@@ -218,15 +218,12 @@ asmlinkage int sys_execve(char *name, char **argv, char **envp,int dummy,...)
char * filename;
struct pt_regs *regs = (struct pt_regs *) ((unsigned char *)&dummy-4);
- lock_kernel();
filename = getname(name);
error = PTR_ERR(filename);
if (IS_ERR(filename))
- goto out;
+ return error;
error = do_execve(filename, argv, envp, regs);
putname(filename);
-out:
- unlock_kernel();
return error;
}
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index 4990495d753..a35c661e5e8 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -473,7 +473,7 @@ void update_vsyscall_tz(void)
{
}
-void update_vsyscall(struct timespec *wall, struct clocksource *c)
+void update_vsyscall(struct timespec *wall, struct clocksource *c, u32 mult)
{
unsigned long flags;
@@ -481,7 +481,7 @@ void update_vsyscall(struct timespec *wall, struct clocksource *c)
/* copy fsyscall clock data */
fsyscall_gtod_data.clk_mask = c->mask;
- fsyscall_gtod_data.clk_mult = c->mult;
+ fsyscall_gtod_data.clk_mult = mult;
fsyscall_gtod_data.clk_shift = c->shift;
fsyscall_gtod_data.clk_fsys_mmio = c->fsys_mmio;
fsyscall_gtod_data.clk_cycle_last = c->cycle_last;
diff --git a/arch/m68knommu/kernel/process.c b/arch/m68knommu/kernel/process.c
index 8f8f4abab2f..5c9ecd42709 100644
--- a/arch/m68knommu/kernel/process.c
+++ b/arch/m68knommu/kernel/process.c
@@ -352,15 +352,12 @@ asmlinkage int sys_execve(char *name, char **argv, char **envp)
char * filename;
struct pt_regs *regs = (struct pt_regs *) &name;
- lock_kernel();
filename = getname(name);
error = PTR_ERR(filename);
if (IS_ERR(filename))
- goto out;
+ return error;
error = do_execve(filename, argv, envp, regs);
putname(filename);
-out:
- unlock_kernel();
return error;
}
diff --git a/arch/mips/include/asm/time.h b/arch/mips/include/asm/time.h
index df6a430de5e..c7f1bfef157 100644
--- a/arch/mips/include/asm/time.h
+++ b/arch/mips/include/asm/time.h
@@ -84,8 +84,16 @@ static inline int init_mips_clocksource(void)
#endif
}
-extern void clocksource_set_clock(struct clocksource *cs, unsigned int clock);
-extern void clockevent_set_clock(struct clock_event_device *cd,
- unsigned int clock);
+static inline void clocksource_set_clock(struct clocksource *cs,
+ unsigned int clock)
+{
+ clocksource_calc_mult_shift(cs, clock, 4);
+}
+
+static inline void clockevent_set_clock(struct clock_event_device *cd,
+ unsigned int clock)
+{
+ clockevents_calc_mult_shift(cd, clock, 4);
+}
#endif /* _ASM_TIME_H */
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c
index 1f467d53464..fb749740551 100644
--- a/arch/mips/kernel/time.c
+++ b/arch/mips/kernel/time.c
@@ -71,39 +71,6 @@ EXPORT_SYMBOL(perf_irq);
unsigned int mips_hpt_frequency;
-void __init clocksource_set_clock(struct clocksource *cs, unsigned int clock)
-{
- u64 temp;
- u32 shift;
-
- /* Find a shift value */
- for (shift = 32; shift > 0; shift--) {
- temp = (u64) NSEC_PER_SEC << shift;
- do_div(temp, clock);
- if ((temp >> 32) == 0)
- break;
- }
- cs->shift = shift;
- cs->mult = (u32) temp;
-}
-
-void __cpuinit clockevent_set_clock(struct clock_event_device *cd,
- unsigned int clock)
-{
- u64 temp;
- u32 shift;
-
- /* Find a shift value */
- for (shift = 32; shift > 0; shift--) {
- temp = (u64) clock << shift;
- do_div(temp, NSEC_PER_SEC);
- if ((temp >> 32) == 0)
- break;
- }
- cd->shift = shift;
- cd->mult = (u32) temp;
-}
-
/*
* This function exists in order to cause an error due to a duplicate
* definition if platform code should have its own implementation. The hook
diff --git a/arch/mn10300/kernel/process.c b/arch/mn10300/kernel/process.c
index 892cce82867..ec8a21df114 100644
--- a/arch/mn10300/kernel/process.c
+++ b/arch/mn10300/kernel/process.c
@@ -275,16 +275,12 @@ asmlinkage long sys_execve(char __user *name,
char *filename;
int error;
- lock_kernel();
-
filename = getname(name);
error = PTR_ERR(filename);
- if (!IS_ERR(filename)) {
- error = do_execve(filename, argv, envp, __frame);
- putname(filename);
- }
-
- unlock_kernel();
+ if (IS_ERR(filename))
+ return error;
+ error = do_execve(filename, argv, envp, __frame);
+ putname(filename);
return error;
}
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 36707dec94d..674800b242d 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -834,7 +834,8 @@ static cycle_t timebase_read(struct clocksource *cs)
return (cycle_t)get_tb();
}
-void update_vsyscall(struct timespec *wall_time, struct clocksource *clock)
+void update_vsyscall(struct timespec *wall_time, struct clocksource *clock,
+ u32 mult)
{
u64 t2x, stamp_xsec;
@@ -847,7 +848,7 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock)
/* XXX this assumes clock->shift == 22 */
/* 4611686018 ~= 2^(20+64-22) / 1e9 */
- t2x = (u64) clock->mult * 4611686018ULL;
+ t2x = (u64) mult * 4611686018ULL;
stamp_xsec = (u64) xtime.tv_nsec * XSEC_PER_SEC;
do_div(stamp_xsec, 1000000000);
stamp_xsec += (u64) xtime.tv_sec * XSEC_PER_SEC;
@@ -924,7 +925,7 @@ static void register_decrementer_clockevent(int cpu)
*dec = decrementer_clockevent;
dec->cpumask = cpumask_of(cpu);
- printk(KERN_DEBUG "clockevent: %s mult[%lx] shift[%d] cpu[%d]\n",
+ printk(KERN_DEBUG "clockevent: %s mult[%x] shift[%d] cpu[%d]\n",
dec->name, dec->mult, dec->shift, cpu);
clockevents_register_device(dec);
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 16c673096a2..c80235206c0 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -220,23 +220,8 @@ config AUDIT_ARCH
bool
default y
-config S390_SWITCH_AMODE
- bool "Switch kernel/user addressing modes"
- help
- This option allows to switch the addressing modes of kernel and user
- space. The kernel parameter switch_amode=on will enable this feature,
- default is disabled. Enabling this (via kernel parameter) on machines
- earlier than IBM System z9-109 EC/BC will reduce system performance.
-
- Note that this option will also be selected by selecting the execute
- protection option below. Enabling the execute protection via the
- noexec kernel parameter will also switch the addressing modes,
- independent of the switch_amode kernel parameter.
-
-
config S390_EXEC_PROTECT
bool "Data execute protection"
- select S390_SWITCH_AMODE
help
This option allows to enable a buffer overflow protection for user
space programs and it also selects the addressing mode option above.
diff --git a/arch/s390/crypto/prng.c b/arch/s390/crypto/prng.c
index b49c00ce65e..a3209906739 100644
--- a/arch/s390/crypto/prng.c
+++ b/arch/s390/crypto/prng.c
@@ -6,7 +6,6 @@
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/kernel.h>
-#include <linux/smp_lock.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -49,7 +48,6 @@ static unsigned char parm_block[32] = {
static int prng_open(struct inode *inode, struct file *file)
{
- cycle_kernel_lock();
return nonseekable_open(inode, file);
}
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index ab4464486b7..f4e53c6708d 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -185,7 +185,6 @@ CONFIG_HOTPLUG_CPU=y
CONFIG_COMPAT=y
CONFIG_SYSVIPC_COMPAT=y
CONFIG_AUDIT_ARCH=y
-CONFIG_S390_SWITCH_AMODE=y
CONFIG_S390_EXEC_PROTECT=y
#
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index ae7c8f9f94a..2a113d6a7df 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -21,7 +21,7 @@
#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
#define __CS_LOOP(ptr, op_val, op_string) ({ \
- typeof(ptr->counter) old_val, new_val; \
+ int old_val, new_val; \
asm volatile( \
" l %0,%2\n" \
"0: lr %1,%0\n" \
@@ -38,7 +38,7 @@
#else /* __GNUC__ */
#define __CS_LOOP(ptr, op_val, op_string) ({ \
- typeof(ptr->counter) old_val, new_val; \
+ int old_val, new_val; \
asm volatile( \
" l %0,0(%3)\n" \
"0: lr %1,%0\n" \
@@ -143,7 +143,7 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
#define __CSG_LOOP(ptr, op_val, op_string) ({ \
- typeof(ptr->counter) old_val, new_val; \
+ long long old_val, new_val; \
asm volatile( \
" lg %0,%2\n" \
"0: lgr %1,%0\n" \
@@ -160,7 +160,7 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
#else /* __GNUC__ */
#define __CSG_LOOP(ptr, op_val, op_string) ({ \
- typeof(ptr->counter) old_val, new_val; \
+ long long old_val, new_val; \
asm volatile( \
" lg %0,0(%3)\n" \
"0: lgr %1,%0\n" \
diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h
index 2a541955117..f4bd346a52d 100644
--- a/arch/s390/include/asm/ccwdev.h
+++ b/arch/s390/include/asm/ccwdev.h
@@ -142,6 +142,8 @@ struct ccw1;
extern int ccw_device_set_options_mask(struct ccw_device *, unsigned long);
extern int ccw_device_set_options(struct ccw_device *, unsigned long);
extern void ccw_device_clear_options(struct ccw_device *, unsigned long);
+int ccw_device_is_pathgroup(struct ccw_device *cdev);
+int ccw_device_is_multipath(struct ccw_device *cdev);
/* Allow for i/o completion notification after primary interrupt status. */
#define CCWDEV_EARLY_NOTIFICATION 0x0001
@@ -151,6 +153,8 @@ extern void ccw_device_clear_options(struct ccw_device *, unsigned long);
#define CCWDEV_DO_PATHGROUP 0x0004
/* Allow forced onlining of boxed devices. */
#define CCWDEV_ALLOW_FORCE 0x0008
+/* Try to use multipath mode. */
+#define CCWDEV_DO_MULTIPATH 0x0010
extern int ccw_device_start(struct ccw_device *, struct ccw1 *,
unsigned long, __u8, unsigned long);
diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h
index f23961ada7f..258ba88b7b5 100644
--- a/arch/s390/include/asm/cputime.h
+++ b/arch/s390/include/asm/cputime.h
@@ -183,6 +183,7 @@ struct s390_idle_data {
unsigned long long idle_count;
unsigned long long idle_enter;
unsigned long long idle_time;
+ int nohz_delay;
};
DECLARE_PER_CPU(struct s390_idle_data, s390_idle);
@@ -198,4 +199,11 @@ static inline void s390_idle_check(void)
vtime_start_cpu();
}
+static inline int s390_nohz_delay(int cpu)
+{
+ return per_cpu(s390_idle, cpu).nohz_delay != 0;
+}
+
+#define arch_needs_cpu(cpu) s390_nohz_delay(cpu)
+
#endif /* _S390_CPUTIME_H */
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index fc7edd6f41b..976e273988c 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -36,7 +36,7 @@ static inline int init_new_context(struct task_struct *tsk,
mm->context.has_pgste = 1;
mm->context.alloc_pgste = 1;
} else {
- mm->context.noexec = s390_noexec;
+ mm->context.noexec = (user_mode == SECONDARY_SPACE_MODE);
mm->context.has_pgste = 0;
mm->context.alloc_pgste = 0;
}
@@ -58,7 +58,7 @@ static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk)
pgd_t *pgd = mm->pgd;
S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd);
- if (switch_amode) {
+ if (user_mode != HOME_SPACE_MODE) {
/* Load primary space page table origin. */
pgd = mm->context.noexec ? get_shadow_table(pgd) : pgd;
S390_lowcore.user_exec_asce = mm->context.asce_bits | __pa(pgd);
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index ddad5903341..68940d0bad9 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -143,7 +143,8 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
spin_lock_init(&mm->context.list_lock);
INIT_LIST_HEAD(&mm->context.crst_list);
INIT_LIST_HEAD(&mm->context.pgtable_list);
- return (pgd_t *) crst_table_alloc(mm, s390_noexec);
+ return (pgd_t *)
+ crst_table_alloc(mm, user_mode == SECONDARY_SPACE_MODE);
}
#define pgd_free(mm, pgd) crst_table_free(mm, (unsigned long *) pgd)
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 60a7b1a1702..e2fa79cf061 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -169,12 +169,13 @@ extern unsigned long VMALLOC_START;
* STL Segment-Table-Length: Segment-table length (STL+1*16 entries -> up to 2048)
*
* A 64 bit pagetable entry of S390 has following format:
- * | PFRA |0IP0| OS |
+ * | PFRA |0IPC| OS |
* 0000000000111111111122222222223333333333444444444455555555556666
* 0123456789012345678901234567890123456789012345678901234567890123
*
* I Page-Invalid Bit: Page is not available for address-translation
* P Page-Protection Bit: Store access not possible for page
+ * C Change-bit override: HW is not required to set change bit
*
* A 64 bit segmenttable entry of S390 has following format:
* | P-table origin | TT
@@ -218,6 +219,7 @@ extern unsigned long VMALLOC_START;
*/
/* Hardware bits in the page table entry */
+#define _PAGE_CO 0x100 /* HW Change-bit override */
#define _PAGE_RO 0x200 /* HW read-only bit */
#define _PAGE_INVALID 0x400 /* HW invalid bit */
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index e37478e8728..52a779c337e 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -49,17 +49,12 @@ extern unsigned long memory_end;
void detect_memory_layout(struct mem_chunk chunk[]);
-#ifdef CONFIG_S390_SWITCH_AMODE
-extern unsigned int switch_amode;
-#else
-#define switch_amode (0)
-#endif
-
-#ifdef CONFIG_S390_EXEC_PROTECT
-extern unsigned int s390_noexec;
-#else
-#define s390_noexec (0)
-#endif
+#define PRIMARY_SPACE_MODE 0
+#define ACCESS_REGISTER_MODE 1
+#define SECONDARY_SPACE_MODE 2
+#define HOME_SPACE_MODE 3
+
+extern unsigned int user_mode;
/*
* Machine features detected in head.S
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index a868b272c25..2ab1141eeb5 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -1,57 +1,22 @@
/*
- * include/asm-s390/smp.h
- *
- * S390 version
- * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
- * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
- * Martin Schwidefsky (schwidefsky@de.ibm.com)
- * Heiko Carstens (heiko.carstens@de.ibm.com)
+ * Copyright IBM Corp. 1999,2009
+ * Author(s): Denis Joseph Barrow,
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>,
+ * Heiko Carstens <heiko.carstens@de.ibm.com>,
*/
#ifndef __ASM_SMP_H
#define __ASM_SMP_H
-#include <linux/threads.h>
-#include <linux/cpumask.h>
-#include <linux/bitops.h>
+#ifdef CONFIG_SMP
-#if defined(__KERNEL__) && defined(CONFIG_SMP) && !defined(__ASSEMBLY__)
-
-#include <asm/lowcore.h>
-#include <asm/sigp.h>
-#include <asm/ptrace.h>
#include <asm/system.h>
-
-/*
- s390 specific smp.c headers
- */
-typedef struct
-{
- int intresting;
- sigp_ccode ccode;
- __u32 status;
- __u16 cpu;
-} sigp_info;
+#include <asm/sigp.h>
extern void machine_restart_smp(char *);
extern void machine_halt_smp(void);
extern void machine_power_off_smp(void);
-#define NO_PROC_ID 0xFF /* No processor magic marker */
-
-/*
- * This magic constant controls our willingness to transfer
- * a process across CPUs. Such a transfer incurs misses on the L1
- * cache, and on a P6 or P5 with multiple L2 caches L2 hits. My
- * gut feeling is this will vary by board in value. For a board
- * with separate L2 cache it probably depends also on the RSS, and
- * for a board with shared L2 cache it ought to decay fast as other
- * processes are run.
- */
-
-#define PROC_CHANGE_PENALTY 20 /* Schedule penalty */
-
#define raw_smp_processor_id() (S390_lowcore.cpu_nr)
-#define cpu_logical_map(cpu) (cpu)
extern int __cpu_disable (void);
extern void __cpu_die (unsigned int cpu);
@@ -64,7 +29,9 @@ extern int smp_cpu_polarization[];
extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
-#endif
+extern union save_area *zfcpdump_save_areas[NR_CPUS + 1];
+
+#endif /* CONFIG_SMP */
#ifdef CONFIG_HOTPLUG_CPU
extern int smp_rescan_cpus(void);
@@ -72,5 +39,4 @@ extern int smp_rescan_cpus(void);
static inline int smp_rescan_cpus(void) { return 0; }
#endif
-extern union save_area *zfcpdump_save_areas[NR_CPUS + 1];
-#endif
+#endif /* __ASM_SMP_H */
diff --git a/arch/s390/include/asm/sockios.h b/arch/s390/include/asm/sockios.h
index f4fc16c7da5..6f60eee7324 100644
--- a/arch/s390/include/asm/sockios.h
+++ b/arch/s390/include/asm/sockios.h
@@ -1,21 +1,6 @@
-/*
- * include/asm-s390/sockios.h
- *
- * S390 version
- *
- * Derived from "include/asm-i386/sockios.h"
- */
+#ifndef _ASM_S390_SOCKIOS_H
+#define _ASM_S390_SOCKIOS_H
-#ifndef __ARCH_S390_SOCKIOS__
-#define __ARCH_S390_SOCKIOS__
-
-/* Socket-level I/O control calls. */
-#define FIOSETOWN 0x8901
-#define SIOCSPGRP 0x8902
-#define FIOGETOWN 0x8903
-#define SIOCGPGRP 0x8904
-#define SIOCATMARK 0x8905
-#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */
-#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */
+#include <asm-generic/sockios.h>
#endif
diff --git a/arch/s390/include/asm/termbits.h b/arch/s390/include/asm/termbits.h
index 58731853d52..71bf6ac6a2b 100644
--- a/arch/s390/include/asm/termbits.h
+++ b/arch/s390/include/asm/termbits.h
@@ -1,206 +1,6 @@
-/*
- * include/asm-s390/termbits.h
- *
- * S390 version
- *
- * Derived from "include/asm-i386/termbits.h"
- */
+#ifndef _ASM_S390_TERMBITS_H
+#define _ASM_S390_TERMBITS_H
-#ifndef __ARCH_S390_TERMBITS_H__
-#define __ARCH_S390_TERMBITS_H__
-
-#include <linux/posix_types.h>
-
-typedef unsigned char cc_t;
-typedef unsigned int speed_t;
-typedef unsigned int tcflag_t;
-
-#define NCCS 19
-struct termios {
- tcflag_t c_iflag; /* input mode flags */
- tcflag_t c_oflag; /* output mode flags */
- tcflag_t c_cflag; /* control mode flags */
- tcflag_t c_lflag; /* local mode flags */
- cc_t c_line; /* line discipline */
- cc_t c_cc[NCCS]; /* control characters */
-};
-
-struct termios2 {
- tcflag_t c_iflag; /* input mode flags */
- tcflag_t c_oflag; /* output mode flags */
- tcflag_t c_cflag; /* control mode flags */
- tcflag_t c_lflag; /* local mode flags */
- cc_t c_line; /* line discipline */
- cc_t c_cc[NCCS]; /* control characters */
- speed_t c_ispeed; /* input speed */
- speed_t c_ospeed; /* output speed */
-};
-
-struct ktermios {
- tcflag_t c_iflag; /* input mode flags */
- tcflag_t c_oflag; /* output mode flags */
- tcflag_t c_cflag; /* control mode flags */
- tcflag_t c_lflag; /* local mode flags */
- cc_t c_line; /* line discipline */
- cc_t c_cc[NCCS]; /* control characters */
- speed_t c_ispeed; /* input speed */
- speed_t c_ospeed; /* output speed */
-};
-
-/* c_cc characters */
-#define VINTR 0
-#define VQUIT 1
-#define VERASE 2
-#define VKILL 3
-#define VEOF 4
-#define VTIME 5
-#define VMIN 6
-#define VSWTC 7
-#define VSTART 8
-#define VSTOP 9
-#define VSUSP 10
-#define VEOL 11
-#define VREPRINT 12
-#define VDISCARD 13
-#define VWERASE 14
-#define VLNEXT 15
-#define VEOL2 16
-
-/* c_iflag bits */
-#define IGNBRK 0000001
-#define BRKINT 0000002
-#define IGNPAR 0000004
-#define PARMRK 0000010
-#define INPCK 0000020
-#define ISTRIP 0000040
-#define INLCR 0000100
-#define IGNCR 0000200
-#define ICRNL 0000400
-#define IUCLC 0001000
-#define IXON 0002000
-#define IXANY 0004000
-#define IXOFF 0010000
-#define IMAXBEL 0020000
-#define IUTF8 0040000
-
-/* c_oflag bits */
-#define OPOST 0000001
-#define OLCUC 0000002
-#define ONLCR 0000004
-#define OCRNL 0000010
-#define ONOCR 0000020
-#define ONLRET 0000040
-#define OFILL 0000100
-#define OFDEL 0000200
-#define NLDLY 0000400
-#define NL0 0000000
-#define NL1 0000400
-#define CRDLY 0003000
-#define CR0 0000000
-#define CR1 0001000
-#define CR2 0002000
-#define CR3 0003000
-#define TABDLY 0014000
-#define TAB0 0000000
-#define TAB1 0004000
-#define TAB2 0010000
-#define TAB3 0014000
-#define XTABS 0014000
-#define BSDLY 0020000
-#define BS0 0000000
-#define BS1 0020000
-#define VTDLY 0040000
-#define VT0 0000000
-#define VT1 0040000
-#define FFDLY 0100000
-#define FF0 0000000
-#define FF1 0100000
-
-/* c_cflag bit meaning */
-#define CBAUD 0010017
-#define B0 0000000 /* hang up */
-#define B50 0000001
-#define B75 0000002
-#define B110 0000003
-#define B134 0000004
-#define B150 0000005
-#define B200 0000006
-#define B300 0000007
-#define B600 0000010
-#define B1200 0000011
-#define B1800 0000012
-#define B2400 0000013
-#define B4800 0000014
-#define B9600 0000015
-#define B19200 0000016
-#define B38400 0000017
-#define EXTA B19200
-#define EXTB B38400
-#define CSIZE 0000060
-#define CS5 0000000
-#define CS6 0000020
-#define CS7 0000040
-#define CS8 0000060
-#define CSTOPB 0000100
-#define CREAD 0000200
-#define PARENB 0000400
-#define PARODD 0001000
-#define HUPCL 0002000
-#define CLOCAL 0004000
-#define CBAUDEX 0010000
-#define BOTHER 0010000
-#define B57600 0010001
-#define B115200 0010002
-#define B230400 0010003
-#define B460800 0010004
-#define B500000 0010005
-#define B576000 0010006
-#define B921600 0010007
-#define B1000000 0010010
-#define B1152000 0010011
-#define B1500000 0010012
-#define B2000000 0010013
-#define B2500000 0010014
-#define B3000000 0010015
-#define B3500000 0010016
-#define B4000000 0010017
-#define CIBAUD 002003600000 /* input baud rate */
-#define CMSPAR 010000000000 /* mark or space (stick) parity */
-#define CRTSCTS 020000000000 /* flow control */
-
-#define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */
-
-/* c_lflag bits */
-#define ISIG 0000001
-#define ICANON 0000002
-#define XCASE 0000004
-#define ECHO 0000010
-#define ECHOE 0000020
-#define ECHOK 0000040
-#define ECHONL 0000100
-#define NOFLSH 0000200
-#define TOSTOP 0000400
-#define ECHOCTL 0001000
-#define ECHOPRT 0002000
-#define ECHOKE 0004000
-#define FLUSHO 0010000
-#define PENDIN 0040000
-#define IEXTEN 0100000
-
-/* tcflow() and TCXONC use these */
-#define TCOOFF 0
-#define TCOON 1
-#define TCIOFF 2
-#define TCION 3
-
-/* tcflush() and TCFLSH use these */
-#define TCIFLUSH 0
-#define TCOFLUSH 1
-#define TCIOFLUSH 2
-
-/* tcsetattr uses these */
-#define TCSANOW 0
-#define TCSADRAIN 1
-#define TCSAFLUSH 2
+#include <asm-generic/termbits.h>
#endif
diff --git a/arch/s390/include/asm/todclk.h b/arch/s390/include/asm/todclk.h
deleted file mode 100644
index c7f62055488..00000000000
--- a/arch/s390/include/asm/todclk.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * File...........: linux/include/asm/todclk.h
- * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
- * Bugreports.to..: <Linux390@de.ibm.com>
- * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
- *
- * History of changes (starts July 2000)
- */
-
-#ifndef __ASM_TODCLK_H
-#define __ASM_TODCLK_H
-
-#ifdef __KERNEL__
-
-#define TOD_uSEC (0x1000ULL)
-#define TOD_mSEC (1000 * TOD_uSEC)
-#define TOD_SEC (1000 * TOD_mSEC)
-#define TOD_MIN (60 * TOD_SEC)
-#define TOD_HOUR (60 * TOD_MIN)
-
-#endif
-
-#endif
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index 8377e91533d..cbf0a8745bf 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -93,6 +93,8 @@ extern struct uaccess_ops uaccess_mvcos;
extern struct uaccess_ops uaccess_mvcos_switch;
extern struct uaccess_ops uaccess_pt;
+extern int __handle_fault(unsigned long, unsigned long, int);
+
static inline int __put_user_fn(size_t size, void __user *ptr, void *x)
{
size = uaccess.copy_to_user_small(size, ptr, x);
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index c7be8e10b87..683f6381cc5 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -44,6 +44,7 @@ obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_FUNCTION_TRACER) += $(if $(CONFIG_64BIT),mcount64.o,mcount.o)
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
+obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
# Kexec part
S390_KEXEC_OBJS := machine_kexec.o crash.o
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
index fda1a8123f9..25c31d68140 100644
--- a/arch/s390/kernel/compat_linux.c
+++ b/arch/s390/kernel/compat_linux.c
@@ -31,14 +31,8 @@
#include <linux/shm.h>
#include <linux/slab.h>
#include <linux/uio.h>
-#include <linux/nfs_fs.h>
#include <linux/quota.h>
#include <linux/module.h>
-#include <linux/sunrpc/svc.h>
-#include <linux/nfsd/nfsd.h>
-#include <linux/nfsd/cache.h>
-#include <linux/nfsd/xdr.h>
-#include <linux/nfsd/syscall.h>
#include <linux/poll.h>
#include <linux/personality.h>
#include <linux/stat.h>
diff --git a/arch/s390/kernel/compat_linux.h b/arch/s390/kernel/compat_linux.h
index 45e9092b3aa..cb97afc85c9 100644
--- a/arch/s390/kernel/compat_linux.h
+++ b/arch/s390/kernel/compat_linux.h
@@ -4,10 +4,6 @@
#include <linux/compat.h>
#include <linux/socket.h>
#include <linux/syscalls.h>
-#include <linux/nfs_fs.h>
-#include <linux/sunrpc/svc.h>
-#include <linux/nfsd/nfsd.h>
-#include <linux/nfsd/export.h>
/* Macro that masks the high order bit of an 32 bit pointer and converts it*/
/* to a 64 bit pointer */
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index 6a250808092..d984a2a380c 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -83,6 +83,8 @@ startup_continue:
slr %r0,%r0 # set cpuid to zero
sigp %r1,%r0,0x12 # switch to esame mode
sam64 # switch to 64 bit mode
+ llgfr %r13,%r13 # clear high-order half of base reg
+ lmh %r0,%r15,.Lzero64-.LPG1(%r13) # clear high-order half
lctlg %c0,%c15,.Lctl-.LPG1(%r13) # load control registers
lg %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area
# move IPL device to lowcore
@@ -127,6 +129,7 @@ startup_continue:
.L4malign:.quad 0xffffffffffc00000
.Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8
.Lnop: .long 0x07000700
+.Lzero64:.fill 16,4,0x0
#ifdef CONFIG_ZFCPDUMP
.Lcurrent_cpu:
.long 0x0
diff --git a/arch/s390/kernel/s390_ext.c b/arch/s390/kernel/s390_ext.c
index 0de305b598c..59618bcd99b 100644
--- a/arch/s390/kernel/s390_ext.c
+++ b/arch/s390/kernel/s390_ext.c
@@ -126,6 +126,8 @@ void __irq_entry do_extint(struct pt_regs *regs, unsigned short code)
/* Serve timer interrupts first. */
clock_comparator_work();
kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++;
+ if (code != 0x1004)
+ __get_cpu_var(s390_idle).nohz_delay = 1;
index = ext_hash(code);
for (p = ext_int_hash[index]; p; p = p->next) {
if (likely(p->code == code))
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 061479ff029..0663287fa1b 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -305,9 +305,8 @@ static int __init early_parse_mem(char *p)
}
early_param("mem", early_parse_mem);
-#ifdef CONFIG_S390_SWITCH_AMODE
-unsigned int switch_amode = 0;
-EXPORT_SYMBOL_GPL(switch_amode);
+unsigned int user_mode = HOME_SPACE_MODE;
+EXPORT_SYMBOL_GPL(user_mode);
static int set_amode_and_uaccess(unsigned long user_amode,
unsigned long user32_amode)
@@ -340,23 +339,29 @@ static int set_amode_and_uaccess(unsigned long user_amode,
*/
static int __init early_parse_switch_amode(char *p)
{
- switch_amode = 1;
+ if (user_mode != SECONDARY_SPACE_MODE)
+ user_mode = PRIMARY_SPACE_MODE;
return 0;
}
early_param("switch_amode", early_parse_switch_amode);
-#else /* CONFIG_S390_SWITCH_AMODE */
-static inline int set_amode_and_uaccess(unsigned long user_amode,
- unsigned long user32_amode)
+static int __init early_parse_user_mode(char *p)
{
+ if (p && strcmp(p, "primary") == 0)
+ user_mode = PRIMARY_SPACE_MODE;
+#ifdef CONFIG_S390_EXEC_PROTECT
+ else if (p && strcmp(p, "secondary") == 0)
+ user_mode = SECONDARY_SPACE_MODE;
+#endif
+ else if (!p || strcmp(p, "home") == 0)
+ user_mode = HOME_SPACE_MODE;
+ else
+ return 1;
return 0;
}
-#endif /* CONFIG_S390_SWITCH_AMODE */
+early_param("user_mode", early_parse_user_mode);
#ifdef CONFIG_S390_EXEC_PROTECT
-unsigned int s390_noexec = 0;
-EXPORT_SYMBOL_GPL(s390_noexec);
-
/*
* Enable execute protection?
*/
@@ -364,8 +369,7 @@ static int __init early_parse_noexec(char *p)
{
if (!strncmp(p, "off", 3))
return 0;
- switch_amode = 1;
- s390_noexec = 1;
+ user_mode = SECONDARY_SPACE_MODE;
return 0;
}
early_param("noexec", early_parse_noexec);
@@ -373,7 +377,7 @@ early_param("noexec", early_parse_noexec);
static void setup_addressing_mode(void)
{
- if (s390_noexec) {
+ if (user_mode == SECONDARY_SPACE_MODE) {
if (set_amode_and_uaccess(PSW_ASC_SECONDARY,
PSW32_ASC_SECONDARY))
pr_info("Execute protection active, "
@@ -381,7 +385,7 @@ static void setup_addressing_mode(void)
else
pr_info("Execute protection active, "
"mvcos not available\n");
- } else if (switch_amode) {
+ } else if (user_mode == PRIMARY_SPACE_MODE) {
if (set_amode_and_uaccess(PSW_ASC_PRIMARY, PSW32_ASC_PRIMARY))
pr_info("Address spaces switched, "
"mvcos available\n");
@@ -411,7 +415,7 @@ setup_lowcore(void)
lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
lc->restart_psw.addr =
PSW_ADDR_AMODE | (unsigned long) restart_int_handler;
- if (switch_amode)
+ if (user_mode != HOME_SPACE_MODE)
lc->restart_psw.mask |= PSW_ASC_HOME;
lc->external_new_psw.mask = psw_kernel_bits;
lc->external_new_psw.addr =
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 34162a0b2ca..65065ac48ed 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -214,7 +214,8 @@ struct clocksource * __init clocksource_default_clock(void)
return &clocksource_tod;
}
-void update_vsyscall(struct timespec *wall_time, struct clocksource *clock)
+void update_vsyscall(struct timespec *wall_time, struct clocksource *clock,
+ u32 mult)
{
if (clock != &clocksource_tod)
return;
@@ -334,7 +335,7 @@ int get_sync_clock(unsigned long long *clock)
sw0 = atomic_read(sw_ptr);
*clock = get_clock();
sw1 = atomic_read(sw_ptr);
- put_cpu_var(clock_sync_sync);
+ put_cpu_var(clock_sync_word);
if (sw0 == sw1 && (sw0 & 0x80000000U))
/* Success: time is in sync. */
return 0;
@@ -384,7 +385,7 @@ static inline int check_sync_clock(void)
sw_ptr = &get_cpu_var(clock_sync_word);
rc = (atomic_read(sw_ptr) & 0x80000000U) != 0;
- put_cpu_var(clock_sync_sync);
+ put_cpu_var(clock_sync_word);
return rc;
}
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index adfb32aa6d5..5f99e66c51c 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -86,7 +86,8 @@ static void vdso_init_data(struct vdso_data *vd)
unsigned int facility_list;
facility_list = stfl();
- vd->ectg_available = switch_amode && (facility_list & 1);
+ vd->ectg_available =
+ user_mode != HOME_SPACE_MODE && (facility_list & 1);
}
#ifdef CONFIG_64BIT
@@ -114,7 +115,7 @@ int vdso_alloc_per_cpu(int cpu, struct _lowcore *lowcore)
lowcore->vdso_per_cpu_data = __LC_PASTE;
- if (!switch_amode || !vdso_enabled)
+ if (user_mode == HOME_SPACE_MODE || !vdso_enabled)
return 0;
segment_table = __get_free_pages(GFP_KERNEL, SEGMENT_ORDER);
@@ -160,7 +161,7 @@ void vdso_free_per_cpu(int cpu, struct _lowcore *lowcore)
unsigned long segment_table, page_table, page_frame;
u32 *psal, *aste;
- if (!switch_amode || !vdso_enabled)
+ if (user_mode == HOME_SPACE_MODE || !vdso_enabled)
return;
psal = (u32 *)(addr_t) lowcore->paste[4];
@@ -184,7 +185,7 @@ static void __vdso_init_cr5(void *dummy)
static void vdso_init_cr5(void)
{
- if (switch_amode && vdso_enabled)
+ if (user_mode != HOME_SPACE_MODE && vdso_enabled)
on_each_cpu(__vdso_init_cr5, NULL, 1);
}
#endif /* CONFIG_64BIT */
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index c41bb0d416e..b59a812a010 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -167,6 +167,8 @@ void vtime_stop_cpu(void)
/* Wait for external, I/O or machine check interrupt. */
psw.mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_IO | PSW_MASK_EXT;
+ idle->nohz_delay = 0;
+
/* Check if the CPU timer needs to be reprogrammed. */
if (vq->do_spt) {
__u64 vmax = VTIMER_MAX_SLICE;
diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig
index bf164fc2186..6ee55ae84ce 100644
--- a/arch/s390/kvm/Kconfig
+++ b/arch/s390/kvm/Kconfig
@@ -20,7 +20,6 @@ config KVM
depends on HAVE_KVM && EXPERIMENTAL
select PREEMPT_NOTIFIERS
select ANON_INODES
- select S390_SWITCH_AMODE
---help---
Support hosting paravirtualized guest machines using the SIE
virtualization capability on the mainframe. This should work
diff --git a/arch/s390/lib/uaccess_mvcos.c b/arch/s390/lib/uaccess_mvcos.c
index 58da3f46121..60455f104ea 100644
--- a/arch/s390/lib/uaccess_mvcos.c
+++ b/arch/s390/lib/uaccess_mvcos.c
@@ -162,7 +162,6 @@ static size_t clear_user_mvcos(size_t size, void __user *to)
return size;
}
-#ifdef CONFIG_S390_SWITCH_AMODE
static size_t strnlen_user_mvcos(size_t count, const char __user *src)
{
char buf[256];
@@ -200,7 +199,6 @@ static size_t strncpy_from_user_mvcos(size_t count, const char __user *src,
} while ((len_str == len) && (done < count));
return done;
}
-#endif /* CONFIG_S390_SWITCH_AMODE */
struct uaccess_ops uaccess_mvcos = {
.copy_from_user = copy_from_user_mvcos_check,
@@ -215,7 +213,6 @@ struct uaccess_ops uaccess_mvcos = {
.futex_atomic_cmpxchg = futex_atomic_cmpxchg_std,
};
-#ifdef CONFIG_S390_SWITCH_AMODE
struct uaccess_ops uaccess_mvcos_switch = {
.copy_from_user = copy_from_user_mvcos,
.copy_from_user_small = copy_from_user_mvcos,
@@ -228,4 +225,3 @@ struct uaccess_ops uaccess_mvcos_switch = {
.futex_atomic_op = futex_atomic_op_pt,
.futex_atomic_cmpxchg = futex_atomic_cmpxchg_pt,
};
-#endif
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c
index cb5d59eab0e..404f2de296d 100644
--- a/arch/s390/lib/uaccess_pt.c
+++ b/arch/s390/lib/uaccess_pt.c
@@ -23,86 +23,21 @@ static inline pte_t *follow_table(struct mm_struct *mm, unsigned long addr)
pgd = pgd_offset(mm, addr);
if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
- return NULL;
+ return (pte_t *) 0x3a;
pud = pud_offset(pgd, addr);
if (pud_none(*pud) || unlikely(pud_bad(*pud)))
- return NULL;
+ return (pte_t *) 0x3b;
pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
- return NULL;
+ return (pte_t *) 0x10;
return pte_offset_map(pmd, addr);
}
-static int __handle_fault(struct mm_struct *mm, unsigned long address,
- int write_access)
-{
- struct vm_area_struct *vma;
- int ret = -EFAULT;
- int fault;
-
- if (in_atomic())
- return ret;
- down_read(&mm->mmap_sem);
- vma = find_vma(mm, address);
- if (unlikely(!vma))
- goto out;
- if (unlikely(vma->vm_start > address)) {
- if (!(vma->vm_flags & VM_GROWSDOWN))
- goto out;
- if (expand_stack(vma, address))
- goto out;
- }
-
- if (!write_access) {
- /* page not present, check vm flags */
- if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
- goto out;
- } else {
- if (!(vma->vm_flags & VM_WRITE))
- goto out;
- }
-
-survive:
- fault = handle_mm_fault(mm, vma, address, write_access ? FAULT_FLAG_WRITE : 0);
- if (unlikely(fault & VM_FAULT_ERROR)) {
- if (fault & VM_FAULT_OOM)
- goto out_of_memory;
- else if (fault & VM_FAULT_SIGBUS)
- goto out_sigbus;
- BUG();
- }
- if (fault & VM_FAULT_MAJOR)
- current->maj_flt++;
- else
- current->min_flt++;
- ret = 0;
-out:
- up_read(&mm->mmap_sem);
- return ret;
-
-out_of_memory:
- up_read(&mm->mmap_sem);
- if (is_global_init(current)) {
- yield();
- down_read(&mm->mmap_sem);
- goto survive;
- }
- printk("VM: killing process %s\n", current->comm);
- return ret;
-
-out_sigbus:
- up_read(&mm->mmap_sem);
- current->thread.prot_addr = address;
- current->thread.trap_no = 0x11;
- force_sig(SIGBUS, current);
- return ret;
-}
-
-static size_t __user_copy_pt(unsigned long uaddr, void *kptr,
- size_t n, int write_user)
+static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr,
+ size_t n, int write_user)
{
struct mm_struct *mm = current->mm;
unsigned long offset, pfn, done, size;
@@ -114,12 +49,17 @@ retry:
spin_lock(&mm->page_table_lock);
do {
pte = follow_table(mm, uaddr);
- if (!pte || !pte_present(*pte) ||
- (write_user && !pte_write(*pte)))
+ if ((unsigned long) pte < 0x1000)
goto fault;
+ if (!pte_present(*pte)) {
+ pte = (pte_t *) 0x11;
+ goto fault;
+ } else if (write_user && !pte_write(*pte)) {
+ pte = (pte_t *) 0x04;
+ goto fault;
+ }
pfn = pte_pfn(*pte);
-
offset = uaddr & (PAGE_SIZE - 1);
size = min(n - done, PAGE_SIZE - offset);
if (write_user) {
@@ -137,7 +77,7 @@ retry:
return n - done;
fault:
spin_unlock(&mm->page_table_lock);
- if (__handle_fault(mm, uaddr, write_user))
+ if (__handle_fault(uaddr, (unsigned long) pte, write_user))
return n - done;
goto retry;
}
@@ -146,30 +86,31 @@ fault:
* Do DAT for user address by page table walk, return kernel address.
* This function needs to be called with current->mm->page_table_lock held.
*/
-static unsigned long __dat_user_addr(unsigned long uaddr)
+static __always_inline unsigned long __dat_user_addr(unsigned long uaddr)
{
struct mm_struct *mm = current->mm;
- unsigned long pfn, ret;
+ unsigned long pfn;
pte_t *pte;
int rc;
- ret = 0;
retry:
pte = follow_table(mm, uaddr);
- if (!pte || !pte_present(*pte))
+ if ((unsigned long) pte < 0x1000)
goto fault;
+ if (!pte_present(*pte)) {
+ pte = (pte_t *) 0x11;
+ goto fault;
+ }
pfn = pte_pfn(*pte);
- ret = (pfn << PAGE_SHIFT) + (uaddr & (PAGE_SIZE - 1));
-out:
- return ret;
+ return (pfn << PAGE_SHIFT) + (uaddr & (PAGE_SIZE - 1));
fault:
spin_unlock(&mm->page_table_lock);
- rc = __handle_fault(mm, uaddr, 0);
+ rc = __handle_fault(uaddr, (unsigned long) pte, 0);
spin_lock(&mm->page_table_lock);
- if (rc)
- goto out;
- goto retry;
+ if (!rc)
+ goto retry;
+ return 0;
}
size_t copy_from_user_pt(size_t n, const void __user *from, void *to)
@@ -234,8 +175,12 @@ retry:
spin_lock(&mm->page_table_lock);
do {
pte = follow_table(mm, uaddr);
- if (!pte || !pte_present(*pte))
+ if ((unsigned long) pte < 0x1000)
+ goto fault;
+ if (!pte_present(*pte)) {
+ pte = (pte_t *) 0x11;
goto fault;
+ }
pfn = pte_pfn(*pte);
offset = uaddr & (PAGE_SIZE-1);
@@ -249,9 +194,8 @@ retry:
return done + 1;
fault:
spin_unlock(&mm->page_table_lock);
- if (__handle_fault(mm, uaddr, 0)) {
+ if (__handle_fault(uaddr, (unsigned long) pte, 0))
return 0;
- }
goto retry;
}
@@ -284,7 +228,7 @@ static size_t copy_in_user_pt(size_t n, void __user *to,
{
struct mm_struct *mm = current->mm;
unsigned long offset_from, offset_to, offset_max, pfn_from, pfn_to,
- uaddr, done, size;
+ uaddr, done, size, error_code;
unsigned long uaddr_from = (unsigned long) from;
unsigned long uaddr_to = (unsigned long) to;
pte_t *pte_from, *pte_to;
@@ -298,17 +242,28 @@ static size_t copy_in_user_pt(size_t n, void __user *to,
retry:
spin_lock(&mm->page_table_lock);
do {
+ write_user = 0;
+ uaddr = uaddr_from;
pte_from = follow_table(mm, uaddr_from);
- if (!pte_from || !pte_present(*pte_from)) {
- uaddr = uaddr_from;
- write_user = 0;
+ error_code = (unsigned long) pte_from;
+ if (error_code < 0x1000)
+ goto fault;
+ if (!pte_present(*pte_from)) {
+ error_code = 0x11;
goto fault;
}
+ write_user = 1;
+ uaddr = uaddr_to;
pte_to = follow_table(mm, uaddr_to);
- if (!pte_to || !pte_present(*pte_to) || !pte_write(*pte_to)) {
- uaddr = uaddr_to;
- write_user = 1;
+ error_code = (unsigned long) pte_to;
+ if (error_code < 0x1000)
+ goto fault;
+ if (!pte_present(*pte_to)) {
+ error_code = 0x11;
+ goto fault;
+ } else if (!pte_write(*pte_to)) {
+ error_code = 0x04;
goto fault;
}
@@ -329,7 +284,7 @@ retry:
return n - done;
fault:
spin_unlock(&mm->page_table_lock);
- if (__handle_fault(mm, uaddr, write_user))
+ if (__handle_fault(uaddr, error_code, write_user))
return n - done;
goto retry;
}
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c
index ff58779bf7e..76a3637b88e 100644
--- a/arch/s390/mm/cmm.c
+++ b/arch/s390/mm/cmm.c
@@ -18,6 +18,7 @@
#include <linux/swap.h>
#include <linux/kthread.h>
#include <linux/oom.h>
+#include <linux/suspend.h>
#include <asm/pgalloc.h>
#include <asm/uaccess.h>
@@ -44,6 +45,7 @@ static volatile long cmm_pages_target;
static volatile long cmm_timed_pages_target;
static long cmm_timeout_pages;
static long cmm_timeout_seconds;
+static int cmm_suspended;
static struct cmm_page_array *cmm_page_list;
static struct cmm_page_array *cmm_timed_page_list;
@@ -147,9 +149,9 @@ cmm_thread(void *dummy)
while (1) {
rc = wait_event_interruptible(cmm_thread_wait,
- (cmm_pages != cmm_pages_target ||
- cmm_timed_pages != cmm_timed_pages_target ||
- kthread_should_stop()));
+ (!cmm_suspended && (cmm_pages != cmm_pages_target ||
+ cmm_timed_pages != cmm_timed_pages_target)) ||
+ kthread_should_stop());
if (kthread_should_stop() || rc == -ERESTARTSYS) {
cmm_pages_target = cmm_pages;
cmm_timed_pages_target = cmm_timed_pages;
@@ -410,6 +412,38 @@ cmm_smsg_target(char *from, char *msg)
static struct ctl_table_header *cmm_sysctl_header;
+static int cmm_suspend(void)
+{
+ cmm_suspended = 1;
+ cmm_free_pages(cmm_pages, &cmm_pages, &cmm_page_list);
+ cmm_free_pages(cmm_timed_pages, &cmm_timed_pages, &cmm_timed_page_list);
+ return 0;
+}
+
+static int cmm_resume(void)
+{
+ cmm_suspended = 0;
+ cmm_kick_thread();
+ return 0;
+}
+
+static int cmm_power_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ switch (event) {
+ case PM_POST_HIBERNATION:
+ return cmm_resume();
+ case PM_HIBERNATION_PREPARE:
+ return cmm_suspend();
+ default:
+ return NOTIFY_DONE;
+ }
+}
+
+static struct notifier_block cmm_power_notifier = {
+ .notifier_call = cmm_power_event,
+};
+
static int
cmm_init (void)
{
@@ -418,7 +452,7 @@ cmm_init (void)
#ifdef CONFIG_CMM_PROC
cmm_sysctl_header = register_sysctl_table(cmm_dir_table);
if (!cmm_sysctl_header)
- goto out;
+ goto out_sysctl;
#endif
#ifdef CONFIG_CMM_IUCV
rc = smsg_register_callback(SMSG_PREFIX, cmm_smsg_target);
@@ -428,17 +462,21 @@ cmm_init (void)
rc = register_oom_notifier(&cmm_oom_nb);
if (rc < 0)
goto out_oom_notify;
+ rc = register_pm_notifier(&cmm_power_notifier);
+ if (rc)
+ goto out_pm;
init_waitqueue_head(&cmm_thread_wait);
init_timer(&cmm_timer);
cmm_thread_ptr = kthread_run(cmm_thread, NULL, "cmmthread");
rc = IS_ERR(cmm_thread_ptr) ? PTR_ERR(cmm_thread_ptr) : 0;
- if (!rc)
- goto out;
- /*
- * kthread_create failed. undo all the stuff from above again.
- */
- unregister_oom_notifier(&cmm_oom_nb);
+ if (rc)
+ goto out_kthread;
+ return 0;
+out_kthread:
+ unregister_pm_notifier(&cmm_power_notifier);
+out_pm:
+ unregister_oom_notifier(&cmm_oom_nb);
out_oom_notify:
#ifdef CONFIG_CMM_IUCV
smsg_unregister_callback(SMSG_PREFIX, cmm_smsg_target);
@@ -446,8 +484,8 @@ out_smsg:
#endif
#ifdef CONFIG_CMM_PROC
unregister_sysctl_table(cmm_sysctl_header);
+out_sysctl:
#endif
-out:
return rc;
}
@@ -455,6 +493,7 @@ static void
cmm_exit(void)
{
kthread_stop(cmm_thread_ptr);
+ unregister_pm_notifier(&cmm_power_notifier);
unregister_oom_notifier(&cmm_oom_nb);
cmm_free_pages(cmm_pages, &cmm_pages, &cmm_page_list);
cmm_free_pages(cmm_timed_pages, &cmm_timed_pages, &cmm_timed_page_list);
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 6d507462967..fc102e70d9c 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -34,16 +34,15 @@
#include <asm/pgtable.h>
#include <asm/s390_ext.h>
#include <asm/mmu_context.h>
+#include <asm/compat.h>
#include "../kernel/entry.h"
#ifndef CONFIG_64BIT
#define __FAIL_ADDR_MASK 0x7ffff000
-#define __FIXUP_MASK 0x7fffffff
#define __SUBCODE_MASK 0x0200
#define __PF_RES_FIELD 0ULL
#else /* CONFIG_64BIT */
#define __FAIL_ADDR_MASK -4096L
-#define __FIXUP_MASK ~0L
#define __SUBCODE_MASK 0x0600
#define __PF_RES_FIELD 0x8000000000000000ULL
#endif /* CONFIG_64BIT */
@@ -52,11 +51,15 @@
extern int sysctl_userprocess_debug;
#endif
-#ifdef CONFIG_KPROBES
-static inline int notify_page_fault(struct pt_regs *regs, long err)
+#define VM_FAULT_BADCONTEXT 0x010000
+#define VM_FAULT_BADMAP 0x020000
+#define VM_FAULT_BADACCESS 0x040000
+
+static inline int notify_page_fault(struct pt_regs *regs)
{
int ret = 0;
+#ifdef CONFIG_KPROBES
/* kprobe_running() needs smp_processor_id() */
if (!user_mode(regs)) {
preempt_disable();
@@ -64,15 +67,9 @@ static inline int notify_page_fault(struct pt_regs *regs, long err)
ret = 1;
preempt_enable();
}
-
+#endif
return ret;
}
-#else
-static inline int notify_page_fault(struct pt_regs *regs, long err)
-{
- return 0;
-}
-#endif
/*
@@ -100,57 +97,50 @@ void bust_spinlocks(int yes)
/*
* Returns the address space associated with the fault.
- * Returns 0 for kernel space, 1 for user space and
- * 2 for code execution in user space with noexec=on.
+ * Returns 0 for kernel space and 1 for user space.
*/
-static inline int check_space(struct task_struct *tsk)
+static inline int user_space_fault(unsigned long trans_exc_code)
{
/*
- * The lowest two bits of S390_lowcore.trans_exc_code
- * indicate which paging table was used.
+ * The lowest two bits of the translation exception
+ * identification indicate which paging table was used.
*/
- int desc = S390_lowcore.trans_exc_code & 3;
-
- if (desc == 3) /* Home Segment Table Descriptor */
- return switch_amode == 0;
- if (desc == 2) /* Secondary Segment Table Descriptor */
- return tsk->thread.mm_segment.ar4;
-#ifdef CONFIG_S390_SWITCH_AMODE
- if (unlikely(desc == 1)) { /* STD determined via access register */
- /* %a0 always indicates primary space. */
- if (S390_lowcore.exc_access_id != 0) {
- save_access_regs(tsk->thread.acrs);
- /*
- * An alet of 0 indicates primary space.
- * An alet of 1 indicates secondary space.
- * Any other alet values generate an
- * alen-translation exception.
- */
- if (tsk->thread.acrs[S390_lowcore.exc_access_id])
- return tsk->thread.mm_segment.ar4;
- }
- }
-#endif
- /* Primary Segment Table Descriptor */
- return switch_amode << s390_noexec;
+ trans_exc_code &= 3;
+ if (trans_exc_code == 2)
+ /* Access via secondary space, set_fs setting decides */
+ return current->thread.mm_segment.ar4;
+ if (user_mode == HOME_SPACE_MODE)
+ /* User space if the access has been done via home space. */
+ return trans_exc_code == 3;
+ /*
+ * If the user space is not the home space the kernel runs in home
+ * space. Access via secondary space has already been covered,
+ * access via primary space or access register is from user space
+ * and access via home space is from the kernel.
+ */
+ return trans_exc_code != 3;
}
/*
* Send SIGSEGV to task. This is an external routine
* to keep the stack usage of do_page_fault small.
*/
-static void do_sigsegv(struct pt_regs *regs, unsigned long error_code,
- int si_code, unsigned long address)
+static noinline void do_sigsegv(struct pt_regs *regs, long int_code,
+ int si_code, unsigned long trans_exc_code)
{
struct siginfo si;
+ unsigned long address;
+ address = trans_exc_code & __FAIL_ADDR_MASK;
+ current->thread.prot_addr = address;
+ current->thread.trap_no = int_code;
#if defined(CONFIG_SYSCTL) || defined(CONFIG_PROCESS_DEBUG)
#if defined(CONFIG_SYSCTL)
if (sysctl_userprocess_debug)
#endif
{
printk("User process fault: interruption code 0x%lX\n",
- error_code);
+ int_code);
printk("failing address: %lX\n", address);
show_regs(regs);
}
@@ -161,13 +151,14 @@ static void do_sigsegv(struct pt_regs *regs, unsigned long error_code,
force_sig_info(SIGSEGV, &si, current);
}
-static void do_no_context(struct pt_regs *regs, unsigned long error_code,
- unsigned long address)
+static noinline void do_no_context(struct pt_regs *regs, long int_code,
+ unsigned long trans_exc_code)
{
const struct exception_table_entry *fixup;
+ unsigned long address;
/* Are we prepared to handle this kernel fault? */
- fixup = search_exception_tables(regs->psw.addr & __FIXUP_MASK);
+ fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
if (fixup) {
regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE;
return;
@@ -177,129 +168,149 @@ static void do_no_context(struct pt_regs *regs, unsigned long error_code,
* Oops. The kernel tried to access some bad page. We'll have to
* terminate things with extreme prejudice.
*/
- if (check_space(current) == 0)
+ address = trans_exc_code & __FAIL_ADDR_MASK;
+ if (!user_space_fault(trans_exc_code))
printk(KERN_ALERT "Unable to handle kernel pointer dereference"
" at virtual kernel address %p\n", (void *)address);
else
printk(KERN_ALERT "Unable to handle kernel paging request"
" at virtual user address %p\n", (void *)address);
- die("Oops", regs, error_code);
+ die("Oops", regs, int_code);
do_exit(SIGKILL);
}
-static void do_low_address(struct pt_regs *regs, unsigned long error_code)
+static noinline void do_low_address(struct pt_regs *regs, long int_code,
+ unsigned long trans_exc_code)
{
/* Low-address protection hit in kernel mode means
NULL pointer write access in kernel mode. */
if (regs->psw.mask & PSW_MASK_PSTATE) {
/* Low-address protection hit in user mode 'cannot happen'. */
- die ("Low-address protection", regs, error_code);
+ die ("Low-address protection", regs, int_code);
do_exit(SIGKILL);
}
- do_no_context(regs, error_code, 0);
+ do_no_context(regs, int_code, trans_exc_code);
}
-static void do_sigbus(struct pt_regs *regs, unsigned long error_code,
- unsigned long address)
+static noinline void do_sigbus(struct pt_regs *regs, long int_code,
+ unsigned long trans_exc_code)
{
struct task_struct *tsk = current;
- struct mm_struct *mm = tsk->mm;
- up_read(&mm->mmap_sem);
/*
* Send a sigbus, regardless of whether we were in kernel
* or user mode.
*/
- tsk->thread.prot_addr = address;
- tsk->thread.trap_no = error_code;
+ tsk->thread.prot_addr = trans_exc_code & __FAIL_ADDR_MASK;
+ tsk->thread.trap_no = int_code;
force_sig(SIGBUS, tsk);
-
- /* Kernel mode? Handle exceptions or die */
- if (!(regs->psw.mask & PSW_MASK_PSTATE))
- do_no_context(regs, error_code, address);
}
#ifdef CONFIG_S390_EXEC_PROTECT
-static int signal_return(struct mm_struct *mm, struct pt_regs *regs,
- unsigned long address, unsigned long error_code)
+static noinline int signal_return(struct pt_regs *regs, long int_code,
+ unsigned long trans_exc_code)
{
u16 instruction;
int rc;
-#ifdef CONFIG_COMPAT
- int compat;
-#endif
- pagefault_disable();
rc = __get_user(instruction, (u16 __user *) regs->psw.addr);
- pagefault_enable();
- if (rc)
- return -EFAULT;
- up_read(&mm->mmap_sem);
- clear_tsk_thread_flag(current, TIF_SINGLE_STEP);
-#ifdef CONFIG_COMPAT
- compat = is_compat_task();
- if (compat && instruction == 0x0a77)
- sys32_sigreturn();
- else if (compat && instruction == 0x0aad)
- sys32_rt_sigreturn();
- else
-#endif
- if (instruction == 0x0a77)
- sys_sigreturn();
- else if (instruction == 0x0aad)
- sys_rt_sigreturn();
- else {
- current->thread.prot_addr = address;
- current->thread.trap_no = error_code;
- do_sigsegv(regs, error_code, SEGV_MAPERR, address);
- }
+ if (!rc && instruction == 0x0a77) {
+ clear_tsk_thread_flag(current, TIF_SINGLE_STEP);
+ if (is_compat_task())
+ sys32_sigreturn();
+ else
+ sys_sigreturn();
+ } else if (!rc && instruction == 0x0aad) {
+ clear_tsk_thread_flag(current, TIF_SINGLE_STEP);
+ if (is_compat_task())
+ sys32_rt_sigreturn();
+ else
+ sys_rt_sigreturn();
+ } else
+ do_sigsegv(regs, int_code, SEGV_MAPERR, trans_exc_code);
return 0;
}
#endif /* CONFIG_S390_EXEC_PROTECT */
+static noinline void do_fault_error(struct pt_regs *regs, long int_code,
+ unsigned long trans_exc_code, int fault)
+{
+ int si_code;
+
+ switch (fault) {
+ case VM_FAULT_BADACCESS:
+#ifdef CONFIG_S390_EXEC_PROTECT
+ if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_SECONDARY &&
+ (trans_exc_code & 3) == 0) {
+ signal_return(regs, int_code, trans_exc_code);
+ break;
+ }
+#endif /* CONFIG_S390_EXEC_PROTECT */
+ case VM_FAULT_BADMAP:
+ /* Bad memory access. Check if it is kernel or user space. */
+ if (regs->psw.mask & PSW_MASK_PSTATE) {
+ /* User mode accesses just cause a SIGSEGV */
+ si_code = (fault == VM_FAULT_BADMAP) ?
+ SEGV_MAPERR : SEGV_ACCERR;
+ do_sigsegv(regs, int_code, si_code, trans_exc_code);
+ return;
+ }
+ case VM_FAULT_BADCONTEXT:
+ do_no_context(regs, int_code, trans_exc_code);
+ break;
+ default: /* fault & VM_FAULT_ERROR */
+ if (fault & VM_FAULT_OOM)
+ pagefault_out_of_memory();
+ else if (fault & VM_FAULT_SIGBUS) {
+ do_sigbus(regs, int_code, trans_exc_code);
+ /* Kernel mode? Handle exceptions or die */
+ if (!(regs->psw.mask & PSW_MASK_PSTATE))
+ do_no_context(regs, int_code, trans_exc_code);
+ } else
+ BUG();
+ break;
+ }
+}
+
/*
* This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate
* routines.
*
- * error_code:
+ * interruption code (int_code):
* 04 Protection -> Write-Protection (suprression)
* 10 Segment translation -> Not present (nullification)
* 11 Page translation -> Not present (nullification)
* 3b Region third trans. -> Not present (nullification)
*/
-static inline void
-do_exception(struct pt_regs *regs, unsigned long error_code, int write)
+static inline int do_exception(struct pt_regs *regs, int access,
+ unsigned long trans_exc_code)
{
struct task_struct *tsk;
struct mm_struct *mm;
struct vm_area_struct *vma;
unsigned long address;
- int space;
- int si_code;
int fault;
- if (notify_page_fault(regs, error_code))
- return;
+ if (notify_page_fault(regs))
+ return 0;
tsk = current;
mm = tsk->mm;
- /* get the failing address and the affected space */
- address = S390_lowcore.trans_exc_code & __FAIL_ADDR_MASK;
- space = check_space(tsk);
-
/*
* Verify that the fault happened in user space, that
* we are not in an interrupt and that there is a
* user context.
*/
- if (unlikely(space == 0 || in_atomic() || !mm))
- goto no_context;
+ fault = VM_FAULT_BADCONTEXT;
+ if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
+ goto out;
+ address = trans_exc_code & __FAIL_ADDR_MASK;
/*
* When we get here, the fault happened in the current
* task's user address space, so we can switch on the
@@ -309,42 +320,26 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int write)
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
down_read(&mm->mmap_sem);
- si_code = SEGV_MAPERR;
+ fault = VM_FAULT_BADMAP;
vma = find_vma(mm, address);
if (!vma)
- goto bad_area;
-
-#ifdef CONFIG_S390_EXEC_PROTECT
- if (unlikely((space == 2) && !(vma->vm_flags & VM_EXEC)))
- if (!signal_return(mm, regs, address, error_code))
- /*
- * signal_return() has done an up_read(&mm->mmap_sem)
- * if it returns 0.
- */
- return;
-#endif
+ goto out_up;
- if (vma->vm_start <= address)
- goto good_area;
- if (!(vma->vm_flags & VM_GROWSDOWN))
- goto bad_area;
- if (expand_stack(vma, address))
- goto bad_area;
-/*
- * Ok, we have a good vm_area for this memory access, so
- * we can handle it..
- */
-good_area:
- si_code = SEGV_ACCERR;
- if (!write) {
- /* page not present, check vm flags */
- if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
- goto bad_area;
- } else {
- if (!(vma->vm_flags & VM_WRITE))
- goto bad_area;
+ if (unlikely(vma->vm_start > address)) {
+ if (!(vma->vm_flags & VM_GROWSDOWN))
+ goto out_up;
+ if (expand_stack(vma, address))
+ goto out_up;
}
+ /*
+ * Ok, we have a good vm_area for this memory access, so
+ * we can handle it..
+ */
+ fault = VM_FAULT_BADACCESS;
+ if (unlikely(!(vma->vm_flags & access)))
+ goto out_up;
+
if (is_vm_hugetlb_page(vma))
address &= HPAGE_MASK;
/*
@@ -352,18 +347,11 @@ good_area:
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
- fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
- if (unlikely(fault & VM_FAULT_ERROR)) {
- if (fault & VM_FAULT_OOM) {
- up_read(&mm->mmap_sem);
- pagefault_out_of_memory();
- return;
- } else if (fault & VM_FAULT_SIGBUS) {
- do_sigbus(regs, error_code, address);
- return;
- }
- BUG();
- }
+ fault = handle_mm_fault(mm, vma, address,
+ (access == VM_WRITE) ? FAULT_FLAG_WRITE : 0);
+ if (unlikely(fault & VM_FAULT_ERROR))
+ goto out_up;
+
if (fault & VM_FAULT_MAJOR) {
tsk->maj_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
@@ -373,74 +361,69 @@ good_area:
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
regs, address);
}
- up_read(&mm->mmap_sem);
/*
* The instruction that caused the program check will
* be repeated. Don't signal single step via SIGTRAP.
*/
clear_tsk_thread_flag(tsk, TIF_SINGLE_STEP);
- return;
-
-/*
- * Something tried to access memory that isn't in our memory map..
- * Fix it, but check if it's kernel or user first..
- */
-bad_area:
+ fault = 0;
+out_up:
up_read(&mm->mmap_sem);
-
- /* User mode accesses just cause a SIGSEGV */
- if (regs->psw.mask & PSW_MASK_PSTATE) {
- tsk->thread.prot_addr = address;
- tsk->thread.trap_no = error_code;
- do_sigsegv(regs, error_code, si_code, address);
- return;
- }
-
-no_context:
- do_no_context(regs, error_code, address);
+out:
+ return fault;
}
-void __kprobes do_protection_exception(struct pt_regs *regs,
- long error_code)
+void __kprobes do_protection_exception(struct pt_regs *regs, long int_code)
{
+ unsigned long trans_exc_code = S390_lowcore.trans_exc_code;
+ int fault;
+
/* Protection exception is supressing, decrement psw address. */
- regs->psw.addr -= (error_code >> 16);
+ regs->psw.addr -= (int_code >> 16);
/*
* Check for low-address protection. This needs to be treated
* as a special case because the translation exception code
* field is not guaranteed to contain valid data in this case.
*/
- if (unlikely(!(S390_lowcore.trans_exc_code & 4))) {
- do_low_address(regs, error_code);
+ if (unlikely(!(trans_exc_code & 4))) {
+ do_low_address(regs, int_code, trans_exc_code);
return;
}
- do_exception(regs, 4, 1);
+ fault = do_exception(regs, VM_WRITE, trans_exc_code);
+ if (unlikely(fault))
+ do_fault_error(regs, 4, trans_exc_code, fault);
}
-void __kprobes do_dat_exception(struct pt_regs *regs, long error_code)
+void __kprobes do_dat_exception(struct pt_regs *regs, long int_code)
{
- do_exception(regs, error_code & 0xff, 0);
+ unsigned long trans_exc_code = S390_lowcore.trans_exc_code;
+ int access, fault;
+
+ access = VM_READ | VM_EXEC | VM_WRITE;
+#ifdef CONFIG_S390_EXEC_PROTECT
+ if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_SECONDARY &&
+ (trans_exc_code & 3) == 0)
+ access = VM_EXEC;
+#endif
+ fault = do_exception(regs, access, trans_exc_code);
+ if (unlikely(fault))
+ do_fault_error(regs, int_code & 255, trans_exc_code, fault);
}
#ifdef CONFIG_64BIT
-void __kprobes do_asce_exception(struct pt_regs *regs, unsigned long error_code)
+void __kprobes do_asce_exception(struct pt_regs *regs, long int_code)
{
- struct mm_struct *mm;
+ unsigned long trans_exc_code = S390_lowcore.trans_exc_code;
+ struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
- unsigned long address;
- int space;
-
- mm = current->mm;
- address = S390_lowcore.trans_exc_code & __FAIL_ADDR_MASK;
- space = check_space(current);
- if (unlikely(space == 0 || in_atomic() || !mm))
+ if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
goto no_context;
local_irq_enable();
down_read(&mm->mmap_sem);
- vma = find_vma(mm, address);
+ vma = find_vma(mm, trans_exc_code & __FAIL_ADDR_MASK);
up_read(&mm->mmap_sem);
if (vma) {
@@ -450,17 +433,38 @@ void __kprobes do_asce_exception(struct pt_regs *regs, unsigned long error_code)
/* User mode accesses just cause a SIGSEGV */
if (regs->psw.mask & PSW_MASK_PSTATE) {
- current->thread.prot_addr = address;
- current->thread.trap_no = error_code;
- do_sigsegv(regs, error_code, SEGV_MAPERR, address);
+ do_sigsegv(regs, int_code, SEGV_MAPERR, trans_exc_code);
return;
}
no_context:
- do_no_context(regs, error_code, address);
+ do_no_context(regs, int_code, trans_exc_code);
}
#endif
+int __handle_fault(unsigned long uaddr, unsigned long int_code, int write_user)
+{
+ struct pt_regs regs;
+ int access, fault;
+
+ regs.psw.mask = psw_kernel_bits;
+ if (!irqs_disabled())
+ regs.psw.mask |= PSW_MASK_IO | PSW_MASK_EXT;
+ regs.psw.addr = (unsigned long) __builtin_return_address(0);
+ regs.psw.addr |= PSW_ADDR_AMODE;
+ uaddr &= PAGE_MASK;
+ access = write_user ? VM_WRITE : VM_READ;
+ fault = do_exception(&regs, access, uaddr | 2);
+ if (unlikely(fault)) {
+ if (fault & VM_FAULT_OOM) {
+ pagefault_out_of_memory();
+ fault = 0;
+ } else if (fault & VM_FAULT_SIGBUS)
+ do_sigbus(&regs, int_code, uaddr);
+ }
+ return fault ? -EFAULT : 0;
+}
+
#ifdef CONFIG_PFAULT
/*
* 'pfault' pseudo page faults routines.
@@ -522,7 +526,7 @@ void pfault_fini(void)
: : "a" (&refbk), "m" (refbk) : "cc");
}
-static void pfault_interrupt(__u16 error_code)
+static void pfault_interrupt(__u16 int_code)
{
struct task_struct *tsk;
__u16 subcode;
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 2757c5616a0..ad621e06ada 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -269,7 +269,7 @@ int s390_enable_sie(void)
struct mm_struct *mm, *old_mm;
/* Do we have switched amode? If no, we cannot do sie */
- if (!switch_amode)
+ if (user_mode == HOME_SPACE_MODE)
return -EINVAL;
/* Do we have pgstes? if yes, we are done */
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 5f91a38d759..300ab012b0f 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -70,8 +70,12 @@ static pte_t __ref *vmem_pte_alloc(void)
pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t));
if (!pte)
return NULL;
- clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY,
- PTRS_PER_PTE * sizeof(pte_t));
+ if (MACHINE_HAS_HPAGE)
+ clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY | _PAGE_CO,
+ PTRS_PER_PTE * sizeof(pte_t));
+ else
+ clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY,
+ PTRS_PER_PTE * sizeof(pte_t));
return pte;
}
@@ -112,7 +116,8 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
if (MACHINE_HAS_HPAGE && !(address & ~HPAGE_MASK) &&
(address + HPAGE_SIZE <= start + size) &&
(address >= HPAGE_SIZE)) {
- pte_val(pte) |= _SEGMENT_ENTRY_LARGE;
+ pte_val(pte) |= _SEGMENT_ENTRY_LARGE |
+ _SEGMENT_ENTRY_CO;
pmd_val(*pm_dir) = pte_val(pte);
address += HPAGE_SIZE - PAGE_SIZE;
continue;
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 88cdeb9f72d..0031a6979f3 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -16,7 +16,9 @@ config SUPERH
select HAVE_IOREMAP_PROT if MMU
select HAVE_ARCH_TRACEHOOK
select HAVE_DMA_API_DEBUG
+ select HAVE_DMA_ATTRS
select HAVE_PERF_EVENTS
+ select PERF_USE_VMALLOC
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_BZIP2
select HAVE_KERNEL_LZMA
@@ -37,6 +39,7 @@ config SUPERH32
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_DYNAMIC_FTRACE
select HAVE_FUNCTION_TRACE_MCOUNT_TEST
+ select HAVE_FTRACE_NMI_ENTER if DYNAMIC_FTRACE
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_ARCH_KGDB
select ARCH_HIBERNATION_POSSIBLE if MMU
@@ -170,6 +173,12 @@ config ARCH_HAS_CPU_IDLE_WAIT
config IO_TRAPPED
bool
+config DMA_COHERENT
+ bool
+
+config DMA_NONCOHERENT
+ def_bool !DMA_COHERENT
+
source "init/Kconfig"
source "kernel/Kconfig.freezer"
@@ -220,6 +229,7 @@ config CPU_SHX2
config CPU_SHX3
bool
+ select DMA_COHERENT
config ARCH_SHMOBILE
bool
@@ -761,17 +771,6 @@ config ENTRY_OFFSET
default "0x00010000" if PAGE_SIZE_64KB
default "0x00000000"
-config UBC_WAKEUP
- bool "Wakeup UBC on startup"
- depends on CPU_SH4 && !CPU_SH4A
- help
- Selecting this option will wakeup the User Break Controller (UBC) on
- startup. Although the UBC is left in an awake state when the processor
- comes up, some boot loaders misbehave by putting the UBC to sleep in a
- power saving state, which causes issues with things like ptrace().
-
- If unsure, say N.
-
choice
prompt "Kernel command line"
optional
@@ -818,7 +817,13 @@ config MAPLE
Dreamcast with a serial line terminal or a remote network
connection.
-source "arch/sh/drivers/pci/Kconfig"
+config PCI
+ bool "PCI support"
+ depends on SYS_SUPPORTS_PCI
+ help
+ Find out whether you have a PCI motherboard. PCI is the name of a
+ bus system, i.e. the way the CPU talks to the other stuff inside
+ your box. If you have PCI, say Y, otherwise N.
source "drivers/pci/pcie/Kconfig"
diff --git a/arch/sh/Makefile b/arch/sh/Makefile
index 66e40aabc60..ac17c5ac550 100644
--- a/arch/sh/Makefile
+++ b/arch/sh/Makefile
@@ -78,6 +78,9 @@ defaultimage-$(CONFIG_SUPERH32) := zImage
defaultimage-$(CONFIG_SH_SH7785LCR) := uImage
defaultimage-$(CONFIG_SH_RSK) := uImage
defaultimage-$(CONFIG_SH_URQUELL) := uImage
+defaultimage-$(CONFIG_SH_MIGOR) := uImage
+defaultimage-$(CONFIG_SH_AP325RXA) := uImage
+defaultimage-$(CONFIG_SH_7724_SOLUTION_ENGINE) := uImage
defaultimage-$(CONFIG_SH_7206_SOLUTION_ENGINE) := vmlinux
defaultimage-$(CONFIG_SH_7619_SOLUTION_ENGINE) := vmlinux
@@ -136,6 +139,7 @@ machdir-$(CONFIG_SH_7751_SYSTEMH) += mach-systemh
machdir-$(CONFIG_SH_EDOSK7705) += mach-edosk7705
machdir-$(CONFIG_SH_HIGHLANDER) += mach-highlander
machdir-$(CONFIG_SH_MIGOR) += mach-migor
+machdir-$(CONFIG_SH_AP325RXA) += mach-ap325rxa
machdir-$(CONFIG_SH_KFR2R09) += mach-kfr2r09
machdir-$(CONFIG_SH_ECOVEC) += mach-ecovec24
machdir-$(CONFIG_SH_SDK7780) += mach-sdk7780
diff --git a/arch/sh/boards/Makefile b/arch/sh/boards/Makefile
index 7baa2109023..ce0f2638178 100644
--- a/arch/sh/boards/Makefile
+++ b/arch/sh/boards/Makefile
@@ -1,7 +1,6 @@
#
# Specific board support, not covered by a mach group.
#
-obj-$(CONFIG_SH_AP325RXA) += board-ap325rxa.o
obj-$(CONFIG_SH_MAGIC_PANEL_R2) += board-magicpanelr2.o
obj-$(CONFIG_SH_SH7785LCR) += board-sh7785lcr.o
obj-$(CONFIG_SH_URQUELL) += board-urquell.o
diff --git a/arch/sh/boards/mach-ap325rxa/Makefile b/arch/sh/boards/mach-ap325rxa/Makefile
new file mode 100644
index 00000000000..4cf1774d261
--- /dev/null
+++ b/arch/sh/boards/mach-ap325rxa/Makefile
@@ -0,0 +1,2 @@
+obj-y := setup.o sdram.o
+
diff --git a/arch/sh/boards/mach-ap325rxa/sdram.S b/arch/sh/boards/mach-ap325rxa/sdram.S
new file mode 100644
index 00000000000..db24fbed4fc
--- /dev/null
+++ b/arch/sh/boards/mach-ap325rxa/sdram.S
@@ -0,0 +1,69 @@
+/*
+ * AP325RXA sdram self/auto-refresh setup code
+ *
+ * Copyright (C) 2009 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/sys.h>
+#include <linux/errno.h>
+#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+#include <asm/suspend.h>
+#include <asm/romimage-macros.h>
+
+/* code to enter and leave self-refresh. must be self-contained.
+ * this code will be copied to on-chip memory and executed from there.
+ */
+ .balign 4
+ENTRY(ap325rxa_sdram_enter_start)
+
+ /* SBSC: disable power down and put in self-refresh mode */
+ mov.l 1f, r4
+ mov.l 2f, r1
+ mov.l @r4, r2
+ or r1, r2
+ mov.l 3f, r3
+ and r3, r2
+ mov.l r2, @r4
+
+ rts
+ nop
+
+ .balign 4
+1: .long 0xfe400008 /* SDCR0 */
+2: .long 0x00000400
+3: .long 0xffff7fff
+ENTRY(ap325rxa_sdram_enter_end)
+
+ .balign 4
+ENTRY(ap325rxa_sdram_leave_start)
+
+ /* SBSC: set auto-refresh mode */
+ mov.l 1f, r4
+ mov.l @r4, r0
+ mov.l 4f, r1
+ and r1, r0
+ mov.l r0, @r4
+ mov.l 6f, r4
+ mov.l 8f, r0
+ mov.l @r4, r1
+ mov #-1, r4
+ add r4, r1
+ or r1, r0
+ mov.l 7f, r1
+ mov.l r0, @r1
+
+ rts
+ nop
+
+ .balign 4
+1: .long 0xfe400008 /* SDCR0 */
+4: .long 0xfffffbff
+6: .long 0xfe40001c /* RTCOR */
+7: .long 0xfe400018 /* RTCNT */
+8: .long 0xa55a0000
+ENTRY(ap325rxa_sdram_leave_end)
diff --git a/arch/sh/boards/board-ap325rxa.c b/arch/sh/boards/mach-ap325rxa/setup.c
index 2d080732a96..cf9dc12dfeb 100644
--- a/arch/sh/boards/board-ap325rxa.c
+++ b/arch/sh/boards/mach-ap325rxa/setup.c
@@ -20,8 +20,6 @@
#include <linux/i2c.h>
#include <linux/smsc911x.h>
#include <linux/gpio.h>
-#include <linux/spi/spi.h>
-#include <linux/spi/spi_gpio.h>
#include <media/ov772x.h>
#include <media/soc_camera.h>
#include <media/soc_camera_platform.h>
@@ -29,6 +27,7 @@
#include <video/sh_mobile_lcdc.h>
#include <asm/io.h>
#include <asm/clock.h>
+#include <asm/suspend.h>
#include <cpu/sh7723.h>
static struct smsc911x_platform_config smsc911x_config = {
@@ -409,17 +408,49 @@ static struct platform_device ceu_device = {
},
};
-struct spi_gpio_platform_data sdcard_cn3_platform_data = {
- .sck = GPIO_PTD0,
- .mosi = GPIO_PTD1,
- .miso = GPIO_PTD2,
- .num_chipselect = 1,
+static struct resource sdhi0_cn3_resources[] = {
+ [0] = {
+ .name = "SDHI0",
+ .start = 0x04ce0000,
+ .end = 0x04ce01ff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 101,
+ .flags = IORESOURCE_IRQ,
+ },
};
-static struct platform_device sdcard_cn3_device = {
- .name = "spi_gpio",
- .dev = {
- .platform_data = &sdcard_cn3_platform_data,
+static struct platform_device sdhi0_cn3_device = {
+ .name = "sh_mobile_sdhi",
+ .id = 0, /* "sdhi0" clock */
+ .num_resources = ARRAY_SIZE(sdhi0_cn3_resources),
+ .resource = sdhi0_cn3_resources,
+ .archdata = {
+ .hwblk_id = HWBLK_SDHI0,
+ },
+};
+
+static struct resource sdhi1_cn7_resources[] = {
+ [0] = {
+ .name = "SDHI1",
+ .start = 0x04cf0000,
+ .end = 0x04cf01ff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 24,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device sdhi1_cn7_device = {
+ .name = "sh_mobile_sdhi",
+ .id = 1, /* "sdhi1" clock */
+ .num_resources = ARRAY_SIZE(sdhi1_cn7_resources),
+ .resource = sdhi1_cn7_resources,
+ .archdata = {
+ .hwblk_id = HWBLK_SDHI1,
},
};
@@ -470,22 +501,26 @@ static struct platform_device *ap325rxa_devices[] __initdata = {
&lcdc_device,
&ceu_device,
&nand_flash_device,
- &sdcard_cn3_device,
+ &sdhi0_cn3_device,
+ &sdhi1_cn7_device,
&ap325rxa_camera[0],
&ap325rxa_camera[1],
};
-static struct spi_board_info ap325rxa_spi_devices[] = {
- {
- .modalias = "mmc_spi",
- .max_speed_hz = 5000000,
- .chip_select = 0,
- .controller_data = (void *) GPIO_PTD5,
- },
-};
+extern char ap325rxa_sdram_enter_start;
+extern char ap325rxa_sdram_enter_end;
+extern char ap325rxa_sdram_leave_start;
+extern char ap325rxa_sdram_leave_end;
static int __init ap325rxa_devices_setup(void)
{
+ /* register board specific self-refresh code */
+ sh_mobile_register_self_refresh(SUSP_SH_STANDBY | SUSP_SH_SF,
+ &ap325rxa_sdram_enter_start,
+ &ap325rxa_sdram_enter_end,
+ &ap325rxa_sdram_leave_start,
+ &ap325rxa_sdram_leave_end);
+
/* LD3 and LD4 LEDs */
gpio_request(GPIO_PTX5, NULL); /* RUN */
gpio_direction_output(GPIO_PTX5, 1);
@@ -578,12 +613,28 @@ static int __init ap325rxa_devices_setup(void)
platform_resource_setup_memory(&ceu_device, "ceu", 4 << 20);
+ /* SDHI0 - CN3 - SD CARD */
+ gpio_request(GPIO_FN_SDHI0CD_PTD, NULL);
+ gpio_request(GPIO_FN_SDHI0WP_PTD, NULL);
+ gpio_request(GPIO_FN_SDHI0D3_PTD, NULL);
+ gpio_request(GPIO_FN_SDHI0D2_PTD, NULL);
+ gpio_request(GPIO_FN_SDHI0D1_PTD, NULL);
+ gpio_request(GPIO_FN_SDHI0D0_PTD, NULL);
+ gpio_request(GPIO_FN_SDHI0CMD_PTD, NULL);
+ gpio_request(GPIO_FN_SDHI0CLK_PTD, NULL);
+
+ /* SDHI1 - CN7 - MICRO SD CARD */
+ gpio_request(GPIO_FN_SDHI1CD, NULL);
+ gpio_request(GPIO_FN_SDHI1D3, NULL);
+ gpio_request(GPIO_FN_SDHI1D2, NULL);
+ gpio_request(GPIO_FN_SDHI1D1, NULL);
+ gpio_request(GPIO_FN_SDHI1D0, NULL);
+ gpio_request(GPIO_FN_SDHI1CMD, NULL);
+ gpio_request(GPIO_FN_SDHI1CLK, NULL);
+
i2c_register_board_info(0, ap325rxa_i2c_devices,
ARRAY_SIZE(ap325rxa_i2c_devices));
- spi_register_board_info(ap325rxa_spi_devices,
- ARRAY_SIZE(ap325rxa_spi_devices));
-
return platform_add_devices(ap325rxa_devices,
ARRAY_SIZE(ap325rxa_devices));
}
diff --git a/arch/sh/boards/mach-ecovec24/Makefile b/arch/sh/boards/mach-ecovec24/Makefile
index 51f85215165..e69bc82208f 100644
--- a/arch/sh/boards/mach-ecovec24/Makefile
+++ b/arch/sh/boards/mach-ecovec24/Makefile
@@ -6,4 +6,4 @@
# for more details.
#
-obj-y := setup.o \ No newline at end of file
+obj-y := setup.o sdram.o \ No newline at end of file
diff --git a/arch/sh/boards/mach-ecovec24/sdram.S b/arch/sh/boards/mach-ecovec24/sdram.S
new file mode 100644
index 00000000000..83344004440
--- /dev/null
+++ b/arch/sh/boards/mach-ecovec24/sdram.S
@@ -0,0 +1,52 @@
+/*
+ * Ecovec24 sdram self/auto-refresh setup code
+ *
+ * Copyright (C) 2009 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/sys.h>
+#include <linux/errno.h>
+#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+#include <asm/suspend.h>
+#include <asm/romimage-macros.h>
+
+/* code to enter and leave self-refresh. must be self-contained.
+ * this code will be copied to on-chip memory and executed from there.
+ */
+ .balign 4
+ENTRY(ecovec24_sdram_enter_start)
+
+ /* DBSC: put memory in self-refresh mode */
+
+ ED 0xFD000010, 0x00000000 /* DBEN */
+ ED 0xFD000040, 0x00000000 /* DBRFPDN0 */
+ ED 0xFD000014, 0x00000002 /* DBCMDCNT (PALL) */
+ ED 0xFD000014, 0x00000004 /* DBCMDCNT (REF) */
+ ED 0xFD000040, 0x00000001 /* DBRFPDN0 */
+
+ rts
+ nop
+
+ENTRY(ecovec24_sdram_enter_end)
+
+ .balign 4
+ENTRY(ecovec24_sdram_leave_start)
+
+ /* DBSC: put memory in auto-refresh mode */
+
+ ED 0xFD000040, 0x00000000 /* DBRFPDN0 */
+ WAIT 1
+ ED 0xFD000014, 0x00000002 /* DBCMDCNT (PALL) */
+ ED 0xFD000014, 0x00000004 /* DBCMDCNT (REF) */
+ ED 0xFD000010, 0x00000001 /* DBEN */
+ ED 0xFD000040, 0x00010000 /* DBRFPDN0 */
+
+ rts
+ nop
+
+ENTRY(ecovec24_sdram_leave_end)
diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c
index 3b1ceb46fa5..826e62326d5 100644
--- a/arch/sh/boards/mach-ecovec24/setup.c
+++ b/arch/sh/boards/mach-ecovec24/setup.c
@@ -20,12 +20,14 @@
#include <linux/i2c.h>
#include <linux/i2c/tsc2007.h>
#include <linux/input.h>
+#include <linux/input/sh_keysc.h>
+#include <linux/mfd/sh_mobile_sdhi.h>
#include <video/sh_mobile_lcdc.h>
#include <media/sh_mobile_ceu.h>
#include <asm/heartbeat.h>
#include <asm/sh_eth.h>
-#include <asm/sh_keysc.h>
#include <asm/clock.h>
+#include <asm/suspend.h>
#include <cpu/sh7724.h>
/*
@@ -147,6 +149,9 @@ static struct platform_device sh_eth_device = {
},
.num_resources = ARRAY_SIZE(sh_eth_resources),
.resource = sh_eth_resources,
+ .archdata = {
+ .hwblk_id = HWBLK_ETHER,
+ },
};
/* USB0 host */
@@ -185,30 +190,18 @@ static struct platform_device usb0_host_device = {
.resource = usb0_host_resources,
};
-/*
- * USB1
- *
- * CN5 can use both host/function,
- * and we can determine it by checking PTB[3]
- *
- * This time only USB1 host is supported.
- */
+/* USB1 host/function */
void usb1_port_power(int port, int power)
{
- if (!gpio_get_value(GPIO_PTB3)) {
- printk(KERN_ERR "USB1 function is not supported\n");
- return;
- }
-
gpio_set_value(GPIO_PTB5, power);
}
-static struct r8a66597_platdata usb1_host_data = {
+static struct r8a66597_platdata usb1_common_data = {
.on_chip = 1,
.port_power = usb1_port_power,
};
-static struct resource usb1_host_resources[] = {
+static struct resource usb1_common_resources[] = {
[0] = {
.start = 0xa4d90000,
.end = 0xa4d90124 - 1,
@@ -221,16 +214,16 @@ static struct resource usb1_host_resources[] = {
},
};
-static struct platform_device usb1_host_device = {
- .name = "r8a66597_hcd",
+static struct platform_device usb1_common_device = {
+ /* .name will be added in arch_setup */
.id = 1,
.dev = {
.dma_mask = NULL, /* not use dma */
.coherent_dma_mask = 0xffffffff,
- .platform_data = &usb1_host_data,
+ .platform_data = &usb1_common_data,
},
- .num_resources = ARRAY_SIZE(usb1_host_resources),
- .resource = usb1_host_resources,
+ .num_resources = ARRAY_SIZE(usb1_common_resources),
+ .resource = usb1_common_resources,
};
/* LCDC */
@@ -428,16 +421,90 @@ static struct i2c_board_info ts_i2c_clients = {
.irq = IRQ0,
};
+/* SHDI0 */
+static void sdhi0_set_pwr(struct platform_device *pdev, int state)
+{
+ gpio_set_value(GPIO_PTB6, state);
+}
+
+static struct sh_mobile_sdhi_info sdhi0_info = {
+ .set_pwr = sdhi0_set_pwr,
+};
+
+static struct resource sdhi0_resources[] = {
+ [0] = {
+ .name = "SDHI0",
+ .start = 0x04ce0000,
+ .end = 0x04ce01ff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 101,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device sdhi0_device = {
+ .name = "sh_mobile_sdhi",
+ .num_resources = ARRAY_SIZE(sdhi0_resources),
+ .resource = sdhi0_resources,
+ .id = 0,
+ .dev = {
+ .platform_data = &sdhi0_info,
+ },
+ .archdata = {
+ .hwblk_id = HWBLK_SDHI0,
+ },
+};
+
+/* SHDI1 */
+static void sdhi1_set_pwr(struct platform_device *pdev, int state)
+{
+ gpio_set_value(GPIO_PTB7, state);
+}
+
+static struct sh_mobile_sdhi_info sdhi1_info = {
+ .set_pwr = sdhi1_set_pwr,
+};
+
+static struct resource sdhi1_resources[] = {
+ [0] = {
+ .name = "SDHI1",
+ .start = 0x04cf0000,
+ .end = 0x04cf01ff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 24,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device sdhi1_device = {
+ .name = "sh_mobile_sdhi",
+ .num_resources = ARRAY_SIZE(sdhi1_resources),
+ .resource = sdhi1_resources,
+ .id = 1,
+ .dev = {
+ .platform_data = &sdhi1_info,
+ },
+ .archdata = {
+ .hwblk_id = HWBLK_SDHI1,
+ },
+};
+
static struct platform_device *ecovec_devices[] __initdata = {
&heartbeat_device,
&nor_flash_device,
&sh_eth_device,
&usb0_host_device,
- &usb1_host_device, /* USB1 host support */
+ &usb1_common_device,
&lcdc_device,
&ceu0_device,
&ceu1_device,
&keysc_device,
+ &sdhi0_device,
+ &sdhi1_device,
};
#define EEPROM_ADDR 0x50
@@ -466,12 +533,9 @@ static u8 mac_read(struct i2c_adapter *a, u8 command)
return buf;
}
-#define MAC_LEN 6
-static void __init sh_eth_init(void)
+static void __init sh_eth_init(struct sh_eth_plat_data *pd)
{
struct i2c_adapter *a = i2c_get_adapter(1);
- struct clk *eth_clk;
- u8 mac[MAC_LEN];
int i;
if (!a) {
@@ -479,39 +543,30 @@ static void __init sh_eth_init(void)
return;
}
- eth_clk = clk_get(NULL, "eth0");
- if (!eth_clk) {
- pr_err("can not get eth0 clk\n");
- return;
- }
-
/* read MAC address frome EEPROM */
- for (i = 0; i < MAC_LEN; i++) {
- mac[i] = mac_read(a, 0x10 + i);
+ for (i = 0; i < sizeof(pd->mac_addr); i++) {
+ pd->mac_addr[i] = mac_read(a, 0x10 + i);
msleep(10);
}
-
- /* clock enable */
- clk_enable(eth_clk);
-
- /* reset sh-eth */
- ctrl_outl(0x1, SH_ETH_ADDR + 0x0);
-
- /* set MAC addr */
- ctrl_outl((mac[0] << 24) |
- (mac[1] << 16) |
- (mac[2] << 8) |
- (mac[3] << 0), SH_ETH_MAHR);
- ctrl_outl((mac[4] << 8) |
- (mac[5] << 0), SH_ETH_MALR);
-
- clk_put(eth_clk);
}
#define PORT_HIZA 0xA4050158
#define IODRIVEA 0xA405018A
+
+extern char ecovec24_sdram_enter_start;
+extern char ecovec24_sdram_enter_end;
+extern char ecovec24_sdram_leave_start;
+extern char ecovec24_sdram_leave_end;
+
static int __init arch_setup(void)
{
+ /* register board specific self-refresh code */
+ sh_mobile_register_self_refresh(SUSP_SH_STANDBY | SUSP_SH_SF,
+ &ecovec24_sdram_enter_start,
+ &ecovec24_sdram_enter_end,
+ &ecovec24_sdram_leave_start,
+ &ecovec24_sdram_leave_end);
+
/* enable STATUS0, STATUS2 and PDSTATUS */
gpio_request(GPIO_FN_STATUS0, NULL);
gpio_request(GPIO_FN_STATUS2, NULL);
@@ -561,6 +616,14 @@ static int __init arch_setup(void)
ctrl_outw(0x0600, 0xa40501d4);
ctrl_outw(0x0600, 0xa4050192);
+ if (gpio_get_value(GPIO_PTB3)) {
+ printk(KERN_INFO "USB1 function is selected\n");
+ usb1_common_device.name = "r8a66597_udc";
+ } else {
+ printk(KERN_INFO "USB1 host is selected\n");
+ usb1_common_device.name = "r8a66597_hcd";
+ }
+
/* enable LCDC */
gpio_request(GPIO_FN_LCDD23, NULL);
gpio_request(GPIO_FN_LCDD22, NULL);
@@ -603,8 +666,8 @@ static int __init arch_setup(void)
gpio_direction_output(GPIO_PTR1, 0);
gpio_direction_output(GPIO_PTA2, 0);
- /* I/O buffer drive ability is low */
- ctrl_outw((ctrl_inw(IODRIVEA) & ~0x00c0) | 0x0040 , IODRIVEA);
+ /* I/O buffer drive ability is high */
+ ctrl_outw((ctrl_inw(IODRIVEA) & ~0x00c0) | 0x0080 , IODRIVEA);
if (gpio_get_value(GPIO_PTE6)) {
/* DVI */
@@ -710,6 +773,33 @@ static int __init arch_setup(void)
gpio_direction_input(GPIO_PTR5);
gpio_direction_input(GPIO_PTR6);
+ /* enable SDHI0 (needs DS2.4 set to ON) */
+ gpio_request(GPIO_FN_SDHI0CD, NULL);
+ gpio_request(GPIO_FN_SDHI0WP, NULL);
+ gpio_request(GPIO_FN_SDHI0CMD, NULL);
+ gpio_request(GPIO_FN_SDHI0CLK, NULL);
+ gpio_request(GPIO_FN_SDHI0D3, NULL);
+ gpio_request(GPIO_FN_SDHI0D2, NULL);
+ gpio_request(GPIO_FN_SDHI0D1, NULL);
+ gpio_request(GPIO_FN_SDHI0D0, NULL);
+ gpio_request(GPIO_PTB6, NULL);
+ gpio_direction_output(GPIO_PTB6, 0);
+
+ /* enable SDHI1 (needs DS2.6,7 set to ON,OFF) */
+ gpio_request(GPIO_FN_SDHI1CD, NULL);
+ gpio_request(GPIO_FN_SDHI1WP, NULL);
+ gpio_request(GPIO_FN_SDHI1CMD, NULL);
+ gpio_request(GPIO_FN_SDHI1CLK, NULL);
+ gpio_request(GPIO_FN_SDHI1D3, NULL);
+ gpio_request(GPIO_FN_SDHI1D2, NULL);
+ gpio_request(GPIO_FN_SDHI1D1, NULL);
+ gpio_request(GPIO_FN_SDHI1D0, NULL);
+ gpio_request(GPIO_PTB7, NULL);
+ gpio_direction_output(GPIO_PTB7, 0);
+
+ /* I/O buffer drive ability is high for SDHI1 */
+ ctrl_outw((ctrl_inw(IODRIVEA) & ~0x3000) | 0x2000 , IODRIVEA);
+
/* enable I2C device */
i2c_register_board_info(1, i2c1_devices,
ARRAY_SIZE(i2c1_devices));
@@ -721,12 +811,11 @@ arch_initcall(arch_setup);
static int __init devices_setup(void)
{
- sh_eth_init();
+ sh_eth_init(&sh_eth_plat);
return 0;
}
device_initcall(devices_setup);
-
static struct sh_machine_vector mv_ecovec __initmv = {
.mv_name = "R0P7724 (EcoVec)",
};
diff --git a/arch/sh/boards/mach-highlander/setup.c b/arch/sh/boards/mach-highlander/setup.c
index 566e69d8d72..f663c14d888 100644
--- a/arch/sh/boards/mach-highlander/setup.c
+++ b/arch/sh/boards/mach-highlander/setup.c
@@ -384,7 +384,7 @@ static unsigned char irl2irq[HL_NR_IRL];
static int highlander_irq_demux(int irq)
{
- if (irq >= HL_NR_IRL || !irl2irq[irq])
+ if (irq >= HL_NR_IRL || irq < 0 || !irl2irq[irq])
return irq;
return irl2irq[irq];
diff --git a/arch/sh/boards/mach-kfr2r09/Makefile b/arch/sh/boards/mach-kfr2r09/Makefile
index 5d5867826e3..4e577a3bf65 100644
--- a/arch/sh/boards/mach-kfr2r09/Makefile
+++ b/arch/sh/boards/mach-kfr2r09/Makefile
@@ -1,2 +1,2 @@
-obj-y := setup.o
+obj-y := setup.o sdram.o
obj-$(CONFIG_FB_SH_MOBILE_LCDC) += lcd_wqvga.o
diff --git a/arch/sh/boards/mach-kfr2r09/sdram.S b/arch/sh/boards/mach-kfr2r09/sdram.S
new file mode 100644
index 00000000000..0c9f55bec2f
--- /dev/null
+++ b/arch/sh/boards/mach-kfr2r09/sdram.S
@@ -0,0 +1,80 @@
+/*
+ * KFR2R09 sdram self/auto-refresh setup code
+ *
+ * Copyright (C) 2009 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/sys.h>
+#include <linux/errno.h>
+#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+#include <asm/suspend.h>
+#include <asm/romimage-macros.h>
+
+/* code to enter and leave self-refresh. must be self-contained.
+ * this code will be copied to on-chip memory and executed from there.
+ */
+ .balign 4
+ENTRY(kfr2r09_sdram_enter_start)
+
+ /* DBSC: put memory in self-refresh mode */
+
+ ED 0xFD000010, 0x00000000 /* DBEN */
+ ED 0xFD000040, 0x00000000 /* DBRFPDN0 */
+ ED 0xFD000014, 0x00000002 /* DBCMDCNT (PALL) */
+ ED 0xFD000014, 0x00000004 /* DBCMDCNT (REF) */
+ ED 0xFD000040, 0x00000001 /* DBRFPDN0 */
+
+ rts
+ nop
+
+ENTRY(kfr2r09_sdram_enter_end)
+
+ .balign 4
+ENTRY(kfr2r09_sdram_leave_start)
+
+ /* DBSC: put memory in auto-refresh mode */
+
+ mov.l @(SH_SLEEP_MODE, r5), r0
+ tst #SUSP_SH_RSTANDBY, r0
+ bf resume_rstandby
+
+ ED 0xFD000040, 0x00000000 /* DBRFPDN0 */
+ WAIT 1
+ ED 0xFD000014, 0x00000002 /* DBCMDCNT (PALL) */
+ ED 0xFD000014, 0x00000004 /* DBCMDCNT (REF) */
+ ED 0xFD000010, 0x00000001 /* DBEN */
+ ED 0xFD000040, 0x00010000 /* DBRFPDN0 */
+
+ rts
+ nop
+
+resume_rstandby:
+
+ /* DBSC: re-initialize and put in auto-refresh */
+
+ ED 0xFD000108, 0x40000301 /* DBPDCNT0 */
+ ED 0xFD000020, 0x011B0002 /* DBCONF */
+ ED 0xFD000030, 0x03060E02 /* DBTR0 */
+ ED 0xFD000034, 0x01020102 /* DBTR1 */
+ ED 0xFD000038, 0x01090406 /* DBTR2 */
+ ED 0xFD000008, 0x00000004 /* DBKIND */
+ ED 0xFD000040, 0x00000001 /* DBRFPDN0 */
+ ED 0xFD000040, 0x00000000 /* DBRFPDN0 */
+ ED 0xFD000018, 0x00000001 /* DBCKECNT */
+ WAIT 1
+ ED 0xFD000010, 0x00000001 /* DBEN */
+ ED 0xFD000044, 0x000004AF /* DBRFPDN1 */
+ ED 0xFD000048, 0x20CF0037 /* DBRFPDN2 */
+ ED 0xFD000014, 0x00000004 /* DBCMDCNT (REF) */
+ ED 0xFD000108, 0x40000300 /* DBPDCNT0 */
+ ED 0xFD000040, 0x00010000 /* DBRFPDN0 */
+
+ rts
+ nop
+
+ENTRY(kfr2r09_sdram_leave_end)
diff --git a/arch/sh/boards/mach-kfr2r09/setup.c b/arch/sh/boards/mach-kfr2r09/setup.c
index c08d33fe210..87438d6603d 100644
--- a/arch/sh/boards/mach-kfr2r09/setup.c
+++ b/arch/sh/boards/mach-kfr2r09/setup.c
@@ -16,13 +16,16 @@
#include <linux/clk.h>
#include <linux/gpio.h>
#include <linux/input.h>
+#include <linux/input/sh_keysc.h>
#include <linux/i2c.h>
#include <linux/usb/r8a66597.h>
+#include <media/soc_camera.h>
+#include <media/sh_mobile_ceu.h>
#include <video/sh_mobile_lcdc.h>
+#include <asm/suspend.h>
#include <asm/clock.h>
#include <asm/machvec.h>
#include <asm/io.h>
-#include <asm/sh_keysc.h>
#include <cpu/sh7724.h>
#include <mach/kfr2r09.h>
@@ -212,11 +215,154 @@ static struct platform_device kfr2r09_usb0_gadget_device = {
.resource = kfr2r09_usb0_gadget_resources,
};
+static struct sh_mobile_ceu_info sh_mobile_ceu_info = {
+ .flags = SH_CEU_FLAG_USE_8BIT_BUS,
+};
+
+static struct resource kfr2r09_ceu_resources[] = {
+ [0] = {
+ .name = "CEU",
+ .start = 0xfe910000,
+ .end = 0xfe91009f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 52,
+ .end = 52,
+ .flags = IORESOURCE_IRQ,
+ },
+ [2] = {
+ /* place holder for contiguous memory */
+ },
+};
+
+static struct platform_device kfr2r09_ceu_device = {
+ .name = "sh_mobile_ceu",
+ .id = 0, /* "ceu0" clock */
+ .num_resources = ARRAY_SIZE(kfr2r09_ceu_resources),
+ .resource = kfr2r09_ceu_resources,
+ .dev = {
+ .platform_data = &sh_mobile_ceu_info,
+ },
+ .archdata = {
+ .hwblk_id = HWBLK_CEU0,
+ },
+};
+
+static struct i2c_board_info kfr2r09_i2c_camera = {
+ I2C_BOARD_INFO("rj54n1cb0c", 0x50),
+};
+
+static struct clk *camera_clk;
+
+#define DRVCRB 0xA405018C
+static int camera_power(struct device *dev, int mode)
+{
+ int ret;
+
+ if (mode) {
+ long rate;
+
+ camera_clk = clk_get(NULL, "video_clk");
+ if (IS_ERR(camera_clk))
+ return PTR_ERR(camera_clk);
+
+ /* set VIO_CKO clock to 25MHz */
+ rate = clk_round_rate(camera_clk, 25000000);
+ ret = clk_set_rate(camera_clk, rate);
+ if (ret < 0)
+ goto eclkrate;
+
+ /* set DRVCRB
+ *
+ * use 1.8 V for VccQ_VIO
+ * use 2.85V for VccQ_SR
+ */
+ ctrl_outw((ctrl_inw(DRVCRB) & ~0x0003) | 0x0001, DRVCRB);
+
+ /* reset clear */
+ ret = gpio_request(GPIO_PTB4, NULL);
+ if (ret < 0)
+ goto eptb4;
+ ret = gpio_request(GPIO_PTB7, NULL);
+ if (ret < 0)
+ goto eptb7;
+
+ ret = gpio_direction_output(GPIO_PTB4, 1);
+ if (!ret)
+ ret = gpio_direction_output(GPIO_PTB7, 1);
+ if (ret < 0)
+ goto egpioout;
+ msleep(1);
+
+ ret = clk_enable(camera_clk); /* start VIO_CKO */
+ if (ret < 0)
+ goto eclkon;
+
+ return 0;
+ }
+
+ ret = 0;
+
+ clk_disable(camera_clk);
+eclkon:
+ gpio_set_value(GPIO_PTB7, 0);
+egpioout:
+ gpio_set_value(GPIO_PTB4, 0);
+ gpio_free(GPIO_PTB7);
+eptb7:
+ gpio_free(GPIO_PTB4);
+eptb4:
+eclkrate:
+ clk_put(camera_clk);
+ return ret;
+}
+
+static struct soc_camera_link rj54n1_link = {
+ .power = camera_power,
+ .board_info = &kfr2r09_i2c_camera,
+ .i2c_adapter_id = 1,
+ .module_name = "rj54n1cb0c",
+};
+
+static struct platform_device kfr2r09_camera = {
+ .name = "soc-camera-pdrv",
+ .id = 0,
+ .dev = {
+ .platform_data = &rj54n1_link,
+ },
+};
+
+static struct resource kfr2r09_sh_sdhi0_resources[] = {
+ [0] = {
+ .name = "SDHI0",
+ .start = 0x04ce0000,
+ .end = 0x04ce01ff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 101,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device kfr2r09_sh_sdhi0_device = {
+ .name = "sh_mobile_sdhi",
+ .num_resources = ARRAY_SIZE(kfr2r09_sh_sdhi0_resources),
+ .resource = kfr2r09_sh_sdhi0_resources,
+ .archdata = {
+ .hwblk_id = HWBLK_SDHI0,
+ },
+};
+
static struct platform_device *kfr2r09_devices[] __initdata = {
&kfr2r09_nor_flash_device,
&kfr2r09_nand_flash_device,
&kfr2r09_sh_keysc_device,
&kfr2r09_sh_lcdc_device,
+ &kfr2r09_ceu_device,
+ &kfr2r09_camera,
+ &kfr2r09_sh_sdhi0_device,
};
#define BSC_CS0BCR 0xfec10004
@@ -268,11 +414,59 @@ static int kfr2r09_usb0_gadget_i2c_setup(void)
return 0;
}
+
+static int kfr2r09_serial_i2c_setup(void)
+{
+ struct i2c_adapter *a;
+ struct i2c_msg msg;
+ unsigned char buf[2];
+ int ret;
+
+ a = i2c_get_adapter(0);
+ if (!a)
+ return -ENODEV;
+
+ /* set bit 6 (the 7th bit) of chip at 0x09, register 0x13 */
+ buf[0] = 0x13;
+ msg.addr = 0x09;
+ msg.buf = buf;
+ msg.len = 1;
+ msg.flags = 0;
+ ret = i2c_transfer(a, &msg, 1);
+ if (ret != 1)
+ return -ENODEV;
+
+ buf[0] = 0;
+ msg.addr = 0x09;
+ msg.buf = buf;
+ msg.len = 1;
+ msg.flags = I2C_M_RD;
+ ret = i2c_transfer(a, &msg, 1);
+ if (ret != 1)
+ return -ENODEV;
+
+ buf[1] = buf[0] | (1 << 6);
+ buf[0] = 0x13;
+ msg.addr = 0x09;
+ msg.buf = buf;
+ msg.len = 2;
+ msg.flags = 0;
+ ret = i2c_transfer(a, &msg, 1);
+ if (ret != 1)
+ return -ENODEV;
+
+ return 0;
+}
#else
static int kfr2r09_usb0_gadget_i2c_setup(void)
{
return -ENODEV;
}
+
+static int kfr2r09_serial_i2c_setup(void)
+{
+ return -ENODEV;
+}
#endif
static int kfr2r09_usb0_gadget_setup(void)
@@ -299,11 +493,27 @@ static int kfr2r09_usb0_gadget_setup(void)
return 0;
}
+extern char kfr2r09_sdram_enter_start;
+extern char kfr2r09_sdram_enter_end;
+extern char kfr2r09_sdram_leave_start;
+extern char kfr2r09_sdram_leave_end;
+
static int __init kfr2r09_devices_setup(void)
{
+ /* register board specific self-refresh code */
+ sh_mobile_register_self_refresh(SUSP_SH_STANDBY | SUSP_SH_SF |
+ SUSP_SH_RSTANDBY,
+ &kfr2r09_sdram_enter_start,
+ &kfr2r09_sdram_enter_end,
+ &kfr2r09_sdram_leave_start,
+ &kfr2r09_sdram_leave_end);
+
/* enable SCIF1 serial port for YC401 console support */
gpio_request(GPIO_FN_SCIF1_RXD, NULL);
gpio_request(GPIO_FN_SCIF1_TXD, NULL);
+ kfr2r09_serial_i2c_setup(); /* ECONTMSK(bit6=L10ONEN) set 1 */
+ gpio_request(GPIO_PTG3, NULL); /* HPON_ON */
+ gpio_direction_output(GPIO_PTG3, 1); /* HPON_ON = H */
/* setup NOR flash at CS0 */
ctrl_outl(0x36db0400, BSC_CS0BCR);
@@ -361,6 +571,32 @@ static int __init kfr2r09_devices_setup(void)
if (kfr2r09_usb0_gadget_setup() == 0)
platform_device_register(&kfr2r09_usb0_gadget_device);
+ /* CEU */
+ gpio_request(GPIO_FN_VIO_CKO, NULL);
+ gpio_request(GPIO_FN_VIO0_CLK, NULL);
+ gpio_request(GPIO_FN_VIO0_VD, NULL);
+ gpio_request(GPIO_FN_VIO0_HD, NULL);
+ gpio_request(GPIO_FN_VIO0_FLD, NULL);
+ gpio_request(GPIO_FN_VIO0_D7, NULL);
+ gpio_request(GPIO_FN_VIO0_D6, NULL);
+ gpio_request(GPIO_FN_VIO0_D5, NULL);
+ gpio_request(GPIO_FN_VIO0_D4, NULL);
+ gpio_request(GPIO_FN_VIO0_D3, NULL);
+ gpio_request(GPIO_FN_VIO0_D2, NULL);
+ gpio_request(GPIO_FN_VIO0_D1, NULL);
+ gpio_request(GPIO_FN_VIO0_D0, NULL);
+
+ platform_resource_setup_memory(&kfr2r09_ceu_device, "ceu", 4 << 20);
+
+ /* SDHI0 connected to yc304 */
+ gpio_request(GPIO_FN_SDHI0CD, NULL);
+ gpio_request(GPIO_FN_SDHI0D3, NULL);
+ gpio_request(GPIO_FN_SDHI0D2, NULL);
+ gpio_request(GPIO_FN_SDHI0D1, NULL);
+ gpio_request(GPIO_FN_SDHI0D0, NULL);
+ gpio_request(GPIO_FN_SDHI0CMD, NULL);
+ gpio_request(GPIO_FN_SDHI0CLK, NULL);
+
return platform_add_devices(kfr2r09_devices,
ARRAY_SIZE(kfr2r09_devices));
}
diff --git a/arch/sh/boards/mach-migor/Makefile b/arch/sh/boards/mach-migor/Makefile
index 5f231dd25c0..4601a89e5ac 100644
--- a/arch/sh/boards/mach-migor/Makefile
+++ b/arch/sh/boards/mach-migor/Makefile
@@ -1,2 +1,2 @@
-obj-y := setup.o
+obj-y := setup.o sdram.o
obj-$(CONFIG_SH_MIGOR_QVGA) += lcd_qvga.o
diff --git a/arch/sh/boards/mach-migor/sdram.S b/arch/sh/boards/mach-migor/sdram.S
new file mode 100644
index 00000000000..614aa3a1398
--- /dev/null
+++ b/arch/sh/boards/mach-migor/sdram.S
@@ -0,0 +1,69 @@
+/*
+ * Migo-R sdram self/auto-refresh setup code
+ *
+ * Copyright (C) 2009 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/sys.h>
+#include <linux/errno.h>
+#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+#include <asm/suspend.h>
+#include <asm/romimage-macros.h>
+
+/* code to enter and leave self-refresh. must be self-contained.
+ * this code will be copied to on-chip memory and executed from there.
+ */
+ .balign 4
+ENTRY(migor_sdram_enter_start)
+
+ /* SBSC: disable power down and put in self-refresh mode */
+ mov.l 1f, r4
+ mov.l 2f, r1
+ mov.l @r4, r2
+ or r1, r2
+ mov.l 3f, r3
+ and r3, r2
+ mov.l r2, @r4
+
+ rts
+ nop
+
+ .balign 4
+1: .long 0xfe400008 /* SDCR0 */
+2: .long 0x00000400
+3: .long 0xffff7fff
+ENTRY(migor_sdram_enter_end)
+
+ .balign 4
+ENTRY(migor_sdram_leave_start)
+
+ /* SBSC: set auto-refresh mode */
+ mov.l 1f, r4
+ mov.l @r4, r0
+ mov.l 4f, r1
+ and r1, r0
+ mov.l r0, @r4
+ mov.l 6f, r4
+ mov.l 8f, r0
+ mov.l @r4, r1
+ mov #-1, r4
+ add r4, r1
+ or r1, r0
+ mov.l 7f, r1
+ mov.l r0, @r1
+
+ rts
+ nop
+
+ .balign 4
+1: .long 0xfe400008 /* SDCR0 */
+4: .long 0xfffffbff
+6: .long 0xfe40001c /* RTCOR */
+7: .long 0xfe400018 /* RTCNT */
+8: .long 0xa55a0000
+ENTRY(migor_sdram_leave_end)
diff --git a/arch/sh/boards/mach-migor/setup.c b/arch/sh/boards/mach-migor/setup.c
index 6ed1fd32369..9099b6da995 100644
--- a/arch/sh/boards/mach-migor/setup.c
+++ b/arch/sh/boards/mach-migor/setup.c
@@ -11,6 +11,7 @@
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/input.h>
+#include <linux/input/sh_keysc.h>
#include <linux/mtd/physmap.h>
#include <linux/mtd/nand.h>
#include <linux/i2c.h>
@@ -18,8 +19,6 @@
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/gpio.h>
-#include <linux/spi/spi.h>
-#include <linux/spi/spi_gpio.h>
#include <video/sh_mobile_lcdc.h>
#include <media/sh_mobile_ceu.h>
#include <media/ov772x.h>
@@ -27,7 +26,7 @@
#include <asm/clock.h>
#include <asm/machvec.h>
#include <asm/io.h>
-#include <asm/sh_keysc.h>
+#include <asm/suspend.h>
#include <mach/migor.h>
#include <cpu/sh7722.h>
@@ -390,17 +389,25 @@ static struct platform_device migor_ceu_device = {
},
};
-struct spi_gpio_platform_data sdcard_cn9_platform_data = {
- .sck = GPIO_PTD0,
- .mosi = GPIO_PTD1,
- .miso = GPIO_PTD2,
- .num_chipselect = 1,
+static struct resource sdhi_cn9_resources[] = {
+ [0] = {
+ .name = "SDHI",
+ .start = 0x04ce0000,
+ .end = 0x04ce01ff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 101,
+ .flags = IORESOURCE_IRQ,
+ },
};
-static struct platform_device sdcard_cn9_device = {
- .name = "spi_gpio",
- .dev = {
- .platform_data = &sdcard_cn9_platform_data,
+static struct platform_device sdhi_cn9_device = {
+ .name = "sh_mobile_sdhi",
+ .num_resources = ARRAY_SIZE(sdhi_cn9_resources),
+ .resource = sdhi_cn9_resources,
+ .archdata = {
+ .hwblk_id = HWBLK_SDHI,
},
};
@@ -467,23 +474,24 @@ static struct platform_device *migor_devices[] __initdata = {
&migor_ceu_device,
&migor_nor_flash_device,
&migor_nand_flash_device,
- &sdcard_cn9_device,
+ &sdhi_cn9_device,
&migor_camera[0],
&migor_camera[1],
};
-static struct spi_board_info migor_spi_devices[] = {
- {
- .modalias = "mmc_spi",
- .max_speed_hz = 5000000,
- .chip_select = 0,
- .controller_data = (void *) GPIO_PTD5,
- },
-};
+extern char migor_sdram_enter_start;
+extern char migor_sdram_enter_end;
+extern char migor_sdram_leave_start;
+extern char migor_sdram_leave_end;
static int __init migor_devices_setup(void)
{
-
+ /* register board specific self-refresh code */
+ sh_mobile_register_self_refresh(SUSP_SH_STANDBY | SUSP_SH_SF,
+ &migor_sdram_enter_start,
+ &migor_sdram_enter_end,
+ &migor_sdram_leave_start,
+ &migor_sdram_leave_end);
#ifdef CONFIG_PM
/* Let D11 LED show STATUS0 */
gpio_request(GPIO_FN_STATUS0, NULL);
@@ -525,6 +533,16 @@ static int __init migor_devices_setup(void)
gpio_request(GPIO_PTA1, NULL);
gpio_direction_input(GPIO_PTA1);
+ /* SDHI */
+ gpio_request(GPIO_FN_SDHICD, NULL);
+ gpio_request(GPIO_FN_SDHIWP, NULL);
+ gpio_request(GPIO_FN_SDHID3, NULL);
+ gpio_request(GPIO_FN_SDHID2, NULL);
+ gpio_request(GPIO_FN_SDHID1, NULL);
+ gpio_request(GPIO_FN_SDHID0, NULL);
+ gpio_request(GPIO_FN_SDHICMD, NULL);
+ gpio_request(GPIO_FN_SDHICLK, NULL);
+
/* Touch Panel */
gpio_request(GPIO_FN_IRQ6, NULL);
@@ -612,9 +630,6 @@ static int __init migor_devices_setup(void)
i2c_register_board_info(0, migor_i2c_devices,
ARRAY_SIZE(migor_i2c_devices));
- spi_register_board_info(migor_spi_devices,
- ARRAY_SIZE(migor_spi_devices));
-
return platform_add_devices(migor_devices, ARRAY_SIZE(migor_devices));
}
arch_initcall(migor_devices_setup);
diff --git a/arch/sh/boards/mach-r2d/irq.c b/arch/sh/boards/mach-r2d/irq.c
index c70fecedcac..78d7b27c80d 100644
--- a/arch/sh/boards/mach-r2d/irq.c
+++ b/arch/sh/boards/mach-r2d/irq.c
@@ -116,7 +116,7 @@ static unsigned char irl2irq[R2D_NR_IRL];
int rts7751r2d_irq_demux(int irq)
{
- if (irq >= R2D_NR_IRL || !irl2irq[irq])
+ if (irq >= R2D_NR_IRL || irq < 0 || !irl2irq[irq])
return irq;
return irl2irq[irq];
diff --git a/arch/sh/boards/mach-se/7722/irq.c b/arch/sh/boards/mach-se/7722/irq.c
index 02d21a3e2a8..4eb31acfafe 100644
--- a/arch/sh/boards/mach-se/7722/irq.c
+++ b/arch/sh/boards/mach-se/7722/irq.c
@@ -16,15 +16,17 @@
#include <asm/io.h>
#include <mach-se/mach/se7722.h>
+unsigned int se7722_fpga_irq[SE7722_FPGA_IRQ_NR] = { 0, };
+
static void disable_se7722_irq(unsigned int irq)
{
- unsigned int bit = irq - SE7722_FPGA_IRQ_BASE;
+ unsigned int bit = (unsigned int)get_irq_chip_data(irq);
ctrl_outw(ctrl_inw(IRQ01_MASK) | 1 << bit, IRQ01_MASK);
}
static void enable_se7722_irq(unsigned int irq)
{
- unsigned int bit = irq - SE7722_FPGA_IRQ_BASE;
+ unsigned int bit = (unsigned int)get_irq_chip_data(irq);
ctrl_outw(ctrl_inw(IRQ01_MASK) & ~(1 << bit), IRQ01_MASK);
}
@@ -38,18 +40,15 @@ static struct irq_chip se7722_irq_chip __read_mostly = {
static void se7722_irq_demux(unsigned int irq, struct irq_desc *desc)
{
unsigned short intv = ctrl_inw(IRQ01_STS);
- struct irq_desc *ext_desc;
- unsigned int ext_irq = SE7722_FPGA_IRQ_BASE;
+ unsigned int ext_irq = 0;
intv &= (1 << SE7722_FPGA_IRQ_NR) - 1;
- while (intv) {
- if (intv & 1) {
- ext_desc = irq_desc + ext_irq;
- handle_level_irq(ext_irq, ext_desc);
- }
- intv >>= 1;
- ext_irq++;
+ for (; intv; intv >>= 1, ext_irq++) {
+ if (!(intv & 1))
+ continue;
+
+ generic_handle_irq(se7722_fpga_irq[ext_irq]);
}
}
@@ -63,11 +62,18 @@ void __init init_se7722_IRQ(void)
ctrl_outw(0, IRQ01_MASK); /* disable all irqs */
ctrl_outw(0x2000, 0xb03fffec); /* mrshpc irq enable */
- for (i = 0; i < SE7722_FPGA_IRQ_NR; i++)
- set_irq_chip_and_handler_name(SE7722_FPGA_IRQ_BASE + i,
+ for (i = 0; i < SE7722_FPGA_IRQ_NR; i++) {
+ se7722_fpga_irq[i] = create_irq();
+ if (se7722_fpga_irq[i] < 0)
+ return;
+
+ set_irq_chip_and_handler_name(se7722_fpga_irq[i],
&se7722_irq_chip,
handle_level_irq, "level");
+ set_irq_chip_data(se7722_fpga_irq[i], (void *)i);
+ }
+
set_irq_chained_handler(IRQ0_IRQ, se7722_irq_demux);
set_irq_type(IRQ0_IRQ, IRQ_TYPE_LEVEL_LOW);
diff --git a/arch/sh/boards/mach-se/7722/setup.c b/arch/sh/boards/mach-se/7722/setup.c
index 36374078e52..b1cb9425b60 100644
--- a/arch/sh/boards/mach-se/7722/setup.c
+++ b/arch/sh/boards/mach-se/7722/setup.c
@@ -14,6 +14,7 @@
#include <linux/platform_device.h>
#include <linux/ata_platform.h>
#include <linux/input.h>
+#include <linux/input/sh_keysc.h>
#include <linux/smc91x.h>
#include <mach-se/mach/se7722.h>
#include <mach-se/mach/mrshpc.h>
@@ -21,7 +22,6 @@
#include <asm/clock.h>
#include <asm/io.h>
#include <asm/heartbeat.h>
-#include <asm/sh_keysc.h>
#include <cpu/sh7722.h>
/* Heartbeat */
@@ -60,8 +60,7 @@ static struct resource smc91x_eth_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = SMC_IRQ,
- .end = SMC_IRQ,
+ /* Filled in later */
.flags = IORESOURCE_IRQ,
},
};
@@ -90,8 +89,7 @@ static struct resource cf_ide_resources[] = {
.flags = IORESOURCE_IO,
},
[2] = {
- .start = MRSHPC_IRQ0,
- .end = MRSHPC_IRQ0,
+ /* Filled in later */
.flags = IORESOURCE_IRQ,
},
};
@@ -153,6 +151,14 @@ static struct platform_device *se7722_devices[] __initdata = {
static int __init se7722_devices_setup(void)
{
mrshpc_setup_windows();
+
+ /* Wire-up dynamic vectors */
+ cf_ide_resources[2].start = cf_ide_resources[2].end =
+ se7722_fpga_irq[SE7722_FPGA_IRQ_MRSHPC0];
+
+ smc91x_eth_resources[1].start = smc91x_eth_resources[1].end =
+ se7722_fpga_irq[SE7722_FPGA_IRQ_SMC];
+
return platform_add_devices(se7722_devices, ARRAY_SIZE(se7722_devices));
}
device_initcall(se7722_devices_setup);
@@ -193,6 +199,5 @@ static void __init se7722_setup(char **cmdline_p)
static struct sh_machine_vector mv_se7722 __initmv = {
.mv_name = "Solution Engine 7722" ,
.mv_setup = se7722_setup ,
- .mv_nr_irqs = SE7722_FPGA_IRQ_BASE + SE7722_FPGA_IRQ_NR,
.mv_init_irq = init_se7722_IRQ,
};
diff --git a/arch/sh/boards/mach-se/7724/Makefile b/arch/sh/boards/mach-se/7724/Makefile
index 349cbd6ce82..a08b36830f0 100644
--- a/arch/sh/boards/mach-se/7724/Makefile
+++ b/arch/sh/boards/mach-se/7724/Makefile
@@ -7,4 +7,4 @@
#
#
-obj-y := setup.o irq.o \ No newline at end of file
+obj-y := setup.o irq.o sdram.o
diff --git a/arch/sh/boards/mach-se/7724/sdram.S b/arch/sh/boards/mach-se/7724/sdram.S
new file mode 100644
index 00000000000..9040167d502
--- /dev/null
+++ b/arch/sh/boards/mach-se/7724/sdram.S
@@ -0,0 +1,52 @@
+/*
+ * MS7724SE sdram self/auto-refresh setup code
+ *
+ * Copyright (C) 2009 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/sys.h>
+#include <linux/errno.h>
+#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+#include <asm/suspend.h>
+#include <asm/romimage-macros.h>
+
+/* code to enter and leave self-refresh. must be self-contained.
+ * this code will be copied to on-chip memory and executed from there.
+ */
+ .balign 4
+ENTRY(ms7724se_sdram_enter_start)
+
+ /* DBSC: put memory in self-refresh mode */
+
+ ED 0xFD000010, 0x00000000 /* DBEN */
+ ED 0xFD000040, 0x00000000 /* DBRFPDN0 */
+ ED 0xFD000014, 0x00000002 /* DBCMDCNT (PALL) */
+ ED 0xFD000014, 0x00000004 /* DBCMDCNT (REF) */
+ ED 0xFD000040, 0x00000001 /* DBRFPDN0 */
+
+ rts
+ nop
+
+ENTRY(ms7724se_sdram_enter_end)
+
+ .balign 4
+ENTRY(ms7724se_sdram_leave_start)
+
+ /* DBSC: put memory in auto-refresh mode */
+
+ ED 0xFD000040, 0x00000000 /* DBRFPDN0 */
+ WAIT 1
+ ED 0xFD000014, 0x00000002 /* DBCMDCNT (PALL) */
+ ED 0xFD000014, 0x00000004 /* DBCMDCNT (REF) */
+ ED 0xFD000010, 0x00000001 /* DBEN */
+ ED 0xFD000040, 0x00010000 /* DBRFPDN0 */
+
+ rts
+ nop
+
+ENTRY(ms7724se_sdram_leave_end)
diff --git a/arch/sh/boards/mach-se/7724/setup.c b/arch/sh/boards/mach-se/7724/setup.c
index 0894bba9fad..4b0f0c0dc2b 100644
--- a/arch/sh/boards/mach-se/7724/setup.c
+++ b/arch/sh/boards/mach-se/7724/setup.c
@@ -19,6 +19,7 @@
#include <linux/smc91x.h>
#include <linux/gpio.h>
#include <linux/input.h>
+#include <linux/input/sh_keysc.h>
#include <linux/usb/r8a66597.h>
#include <video/sh_mobile_lcdc.h>
#include <media/sh_mobile_ceu.h>
@@ -27,7 +28,7 @@
#include <asm/heartbeat.h>
#include <asm/sh_eth.h>
#include <asm/clock.h>
-#include <asm/sh_keysc.h>
+#include <asm/suspend.h>
#include <cpu/sh7724.h>
#include <mach-se/mach/se7724.h>
@@ -451,6 +452,52 @@ static struct platform_device sh7724_usb1_gadget_device = {
.resource = sh7724_usb1_gadget_resources,
};
+static struct resource sdhi0_cn7_resources[] = {
+ [0] = {
+ .name = "SDHI0",
+ .start = 0x04ce0000,
+ .end = 0x04ce01ff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 101,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device sdhi0_cn7_device = {
+ .name = "sh_mobile_sdhi",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(sdhi0_cn7_resources),
+ .resource = sdhi0_cn7_resources,
+ .archdata = {
+ .hwblk_id = HWBLK_SDHI0,
+ },
+};
+
+static struct resource sdhi1_cn8_resources[] = {
+ [0] = {
+ .name = "SDHI1",
+ .start = 0x04cf0000,
+ .end = 0x04cf01ff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 24,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device sdhi1_cn8_device = {
+ .name = "sh_mobile_sdhi",
+ .id = 1,
+ .num_resources = ARRAY_SIZE(sdhi1_cn8_resources),
+ .resource = sdhi1_cn8_resources,
+ .archdata = {
+ .hwblk_id = HWBLK_SDHI1,
+ },
+};
+
static struct platform_device *ms7724se_devices[] __initdata = {
&heartbeat_device,
&smc91x_eth_device,
@@ -463,6 +510,8 @@ static struct platform_device *ms7724se_devices[] __initdata = {
&sh7724_usb0_host_device,
&sh7724_usb1_gadget_device,
&fsi_device,
+ &sdhi0_cn7_device,
+ &sdhi1_cn8_device,
};
#define EEPROM_OP 0xBA206000
@@ -487,7 +536,7 @@ static int __init sh_eth_is_eeprom_ready(void)
static void __init sh_eth_init(void)
{
int i;
- u16 mac[3];
+ u16 mac;
/* check EEPROM status */
if (!sh_eth_is_eeprom_ready())
@@ -501,16 +550,10 @@ static void __init sh_eth_init(void)
if (!sh_eth_is_eeprom_ready())
return;
- mac[i] = ctrl_inw(EEPROM_DATA);
- mac[i] = ((mac[i] & 0xFF) << 8) | (mac[i] >> 8); /* swap */
+ mac = ctrl_inw(EEPROM_DATA);
+ sh_eth_plat.mac_addr[i << 1] = mac & 0xff;
+ sh_eth_plat.mac_addr[(i << 1) + 1] = mac >> 8;
}
-
- /* reset sh-eth */
- ctrl_outl(0x1, SH_ETH_ADDR + 0x0);
-
- /* set MAC addr */
- ctrl_outl(((mac[0] << 16) | (mac[1])), SH_ETH_MAHR);
- ctrl_outl((mac[2]), SH_ETH_MALR);
}
#define SW4140 0xBA201000
@@ -527,11 +570,22 @@ static void __init sh_eth_init(void)
#define SW41_G 0x4000
#define SW41_H 0x8000
+extern char ms7724se_sdram_enter_start;
+extern char ms7724se_sdram_enter_end;
+extern char ms7724se_sdram_leave_start;
+extern char ms7724se_sdram_leave_end;
+
static int __init devices_setup(void)
{
u16 sw = ctrl_inw(SW4140); /* select camera, monitor */
struct clk *fsia_clk;
+ /* register board specific self-refresh code */
+ sh_mobile_register_self_refresh(SUSP_SH_STANDBY | SUSP_SH_SF,
+ &ms7724se_sdram_enter_start,
+ &ms7724se_sdram_enter_end,
+ &ms7724se_sdram_leave_start,
+ &ms7724se_sdram_leave_end);
/* Reset Release */
ctrl_outw(ctrl_inw(FPGA_OUT) &
~((1 << 1) | /* LAN */
@@ -701,6 +755,26 @@ static int __init devices_setup(void)
clk_set_rate(&fsimcka_clk, 11000);
clk_put(fsia_clk);
+ /* SDHI0 connected to cn7 */
+ gpio_request(GPIO_FN_SDHI0CD, NULL);
+ gpio_request(GPIO_FN_SDHI0WP, NULL);
+ gpio_request(GPIO_FN_SDHI0D3, NULL);
+ gpio_request(GPIO_FN_SDHI0D2, NULL);
+ gpio_request(GPIO_FN_SDHI0D1, NULL);
+ gpio_request(GPIO_FN_SDHI0D0, NULL);
+ gpio_request(GPIO_FN_SDHI0CMD, NULL);
+ gpio_request(GPIO_FN_SDHI0CLK, NULL);
+
+ /* SDHI1 connected to cn8 */
+ gpio_request(GPIO_FN_SDHI1CD, NULL);
+ gpio_request(GPIO_FN_SDHI1WP, NULL);
+ gpio_request(GPIO_FN_SDHI1D3, NULL);
+ gpio_request(GPIO_FN_SDHI1D2, NULL);
+ gpio_request(GPIO_FN_SDHI1D1, NULL);
+ gpio_request(GPIO_FN_SDHI1D0, NULL);
+ gpio_request(GPIO_FN_SDHI1CMD, NULL);
+ gpio_request(GPIO_FN_SDHI1CLK, NULL);
+
/*
* enable SH-Eth
*
diff --git a/arch/sh/boot/compressed/misc.c b/arch/sh/boot/compressed/misc.c
index fd56a71ca9d..b51b1fc4baa 100644
--- a/arch/sh/boot/compressed/misc.c
+++ b/arch/sh/boot/compressed/misc.c
@@ -131,7 +131,7 @@ void decompress_kernel(void)
#ifdef CONFIG_SUPERH64
output_addr = (CONFIG_MEMORY_START + 0x2000);
#else
- output_addr = PHYSADDR((unsigned long)&_text+PAGE_SIZE);
+ output_addr = __pa((unsigned long)&_text+PAGE_SIZE);
#ifdef CONFIG_29BIT
output_addr |= P2SEG;
#endif
diff --git a/arch/sh/boot/romimage/Makefile b/arch/sh/boot/romimage/Makefile
index 5806eee84f6..f473a24a2d9 100644
--- a/arch/sh/boot/romimage/Makefile
+++ b/arch/sh/boot/romimage/Makefile
@@ -4,16 +4,22 @@
# create an image suitable for burning to flash from zImage
#
-targets := vmlinux head.o
+targets := vmlinux head.o zeropage.bin piggy.o
OBJECTS = $(obj)/head.o
-LDFLAGS_vmlinux := --oformat $(ld-bfd) -Ttext 0 -e romstart
+LDFLAGS_vmlinux := --oformat $(ld-bfd) -Ttext 0 -e romstart \
+ -T $(obj)/../../kernel/vmlinux.lds
$(obj)/vmlinux: $(OBJECTS) $(obj)/piggy.o FORCE
$(call if_changed,ld)
@:
+OBJCOPYFLAGS += -j .empty_zero_page
+
+$(obj)/zeropage.bin: vmlinux FORCE
+ $(call if_changed,objcopy)
+
LDFLAGS_piggy.o := -r --format binary --oformat $(ld-bfd) -T
-$(obj)/piggy.o: $(obj)/vmlinux.scr arch/sh/boot/zImage FORCE
+$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/zeropage.bin arch/sh/boot/zImage FORCE
$(call if_changed,ld)
diff --git a/arch/sh/boot/romimage/head.S b/arch/sh/boot/romimage/head.S
index 219bc626dd7..93e779a405e 100644
--- a/arch/sh/boot/romimage/head.S
+++ b/arch/sh/boot/romimage/head.S
@@ -5,6 +5,44 @@
*/
.text
+ #include <asm/page.h>
+
.global romstart
romstart:
+ /* include board specific setup code */
#include <mach/romimage.h>
+
+ /* copy the empty_zero_page contents to where vmlinux expects it */
+ mova empty_zero_page_src, r0
+ mov.l empty_zero_page_dst, r1
+ mov #(PAGE_SHIFT - 4), r4
+ mov #1, r3
+ shld r4, r3 /* r3 = PAGE_SIZE / 16 */
+
+1:
+ mov.l @r0, r4
+ mov.l @(4, r0), r5
+ mov.l @(8, r0), r6
+ mov.l @(12, r0), r7
+ add #16,r0
+ mov.l r4, @r1
+ mov.l r5, @(4, r1)
+ mov.l r6, @(8, r1)
+ mov.l r7, @(12, r1)
+ dt r3
+ add #16,r1
+ bf 1b
+
+ /* jump to the zImage entry point located after the zero page data */
+ mov #PAGE_SHIFT, r4
+ mov #1, r1
+ shld r4, r1
+ mova empty_zero_page_src, r0
+ add r1, r0
+ jmp @r0
+ nop
+
+ .align 2
+empty_zero_page_dst:
+ .long _text
+empty_zero_page_src:
diff --git a/arch/sh/drivers/dma/dma-sysfs.c b/arch/sh/drivers/dma/dma-sysfs.c
index 347ee11351e..1ee631d3725 100644
--- a/arch/sh/drivers/dma/dma-sysfs.c
+++ b/arch/sh/drivers/dma/dma-sysfs.c
@@ -13,7 +13,6 @@
#include <linux/init.h>
#include <linux/sysdev.h>
#include <linux/platform_device.h>
-#include <linux/module.h>
#include <linux/err.h>
#include <linux/string.h>
#include <asm/dma.h>
@@ -21,7 +20,6 @@
static struct sysdev_class dma_sysclass = {
.name = "dma",
};
-EXPORT_SYMBOL(dma_sysclass);
static ssize_t dma_show_devices(struct sys_device *dev,
struct sysdev_attribute *attr, char *buf)
diff --git a/arch/sh/drivers/pci/Kconfig b/arch/sh/drivers/pci/Kconfig
deleted file mode 100644
index e8db585a663..00000000000
--- a/arch/sh/drivers/pci/Kconfig
+++ /dev/null
@@ -1,19 +0,0 @@
-config PCI
- bool "PCI support"
- depends on SYS_SUPPORTS_PCI
- help
- Find out whether you have a PCI motherboard. PCI is the name of a
- bus system, i.e. the way the CPU talks to the other stuff inside
- your box. If you have PCI, say Y, otherwise N.
-
-config SH_PCIDMA_NONCOHERENT
- bool "Cache and PCI noncoherent"
- depends on PCI
- default y
- help
- Enable this option if your platform does not have a CPU cache which
- remains coherent with PCI DMA. It is safest to say 'Y', although you
- will see better performance if you can say 'N', because the PCI DMA
- code will not have to flush the CPU's caches. If you have a PCI host
- bridge integrated with your SH CPU, refer carefully to the chip specs
- to see if you can say 'N' here. Otherwise, leave it as 'Y'.
diff --git a/arch/sh/include/asm/addrspace.h b/arch/sh/include/asm/addrspace.h
index 80d40813e05..99d6b3ecbe2 100644
--- a/arch/sh/include/asm/addrspace.h
+++ b/arch/sh/include/asm/addrspace.h
@@ -28,9 +28,6 @@
/* Returns the privileged segment base of a given address */
#define PXSEG(a) (((unsigned long)(a)) & 0xe0000000)
-/* Returns the physical address of a PnSEG (n=1,2) address */
-#define PHYSADDR(a) (((unsigned long)(a)) & 0x1fffffff)
-
#if defined(CONFIG_29BIT) || defined(CONFIG_PMB_FIXED)
/*
* Map an address to a certain privileged segment
@@ -60,5 +57,11 @@
#define P3_ADDR_MAX P4SEG
#endif
+#ifndef __ASSEMBLY__
+#ifdef CONFIG_PMB
+extern int __in_29bit_mode(void);
+#endif /* CONFIG_PMB */
+#endif /* __ASSEMBLY__ */
+
#endif /* __KERNEL__ */
#endif /* __ASM_SH_ADDRSPACE_H */
diff --git a/arch/sh/include/asm/atomic.h b/arch/sh/include/asm/atomic.h
index e8e78137c6f..b16388d7195 100644
--- a/arch/sh/include/asm/atomic.h
+++ b/arch/sh/include/asm/atomic.h
@@ -78,11 +78,10 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
-/* Atomic operations are already serializing on SH */
-#define smp_mb__before_atomic_dec() barrier()
-#define smp_mb__after_atomic_dec() barrier()
-#define smp_mb__before_atomic_inc() barrier()
-#define smp_mb__after_atomic_inc() barrier()
+#define smp_mb__before_atomic_dec() smp_mb()
+#define smp_mb__after_atomic_dec() smp_mb()
+#define smp_mb__before_atomic_inc() smp_mb()
+#define smp_mb__after_atomic_inc() smp_mb()
#include <asm-generic/atomic-long.h>
#include <asm-generic/atomic64.h>
diff --git a/arch/sh/include/asm/bitops.h b/arch/sh/include/asm/bitops.h
index ebe595b7ab1..98511e4d28c 100644
--- a/arch/sh/include/asm/bitops.h
+++ b/arch/sh/include/asm/bitops.h
@@ -26,8 +26,8 @@
/*
* clear_bit() doesn't provide any barrier for the compiler.
*/
-#define smp_mb__before_clear_bit() barrier()
-#define smp_mb__after_clear_bit() barrier()
+#define smp_mb__before_clear_bit() smp_mb()
+#define smp_mb__after_clear_bit() smp_mb()
#ifdef CONFIG_SUPERH32
static inline unsigned long ffz(unsigned long word)
diff --git a/arch/sh/include/asm/bugs.h b/arch/sh/include/asm/bugs.h
index 46260fcbdf4..02a19a1c033 100644
--- a/arch/sh/include/asm/bugs.h
+++ b/arch/sh/include/asm/bugs.h
@@ -14,11 +14,15 @@
#include <asm/processor.h>
+extern void select_idle_routine(void);
+
static void __init check_bugs(void)
{
extern unsigned long loops_per_jiffy;
char *p = &init_utsname()->machine[2]; /* "sh" */
+ select_idle_routine();
+
current_cpu_data.loops_per_jiffy = loops_per_jiffy;
switch (current_cpu_data.family) {
diff --git a/arch/sh/include/asm/dma-mapping.h b/arch/sh/include/asm/dma-mapping.h
index 69d56dd4c96..87ced133a36 100644
--- a/arch/sh/include/asm/dma-mapping.h
+++ b/arch/sh/include/asm/dma-mapping.h
@@ -1,219 +1,108 @@
#ifndef __ASM_SH_DMA_MAPPING_H
#define __ASM_SH_DMA_MAPPING_H
-#include <linux/mm.h>
-#include <linux/scatterlist.h>
-#include <linux/dma-debug.h>
-#include <asm/cacheflush.h>
-#include <asm/io.h>
+extern struct dma_map_ops *dma_ops;
+extern void no_iommu_init(void);
+
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+{
+ return dma_ops;
+}
+
#include <asm-generic/dma-coherent.h>
+#include <asm-generic/dma-mapping-common.h>
+
+static inline int dma_supported(struct device *dev, u64 mask)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
-extern struct bus_type pci_bus_type;
+ if (ops->dma_supported)
+ return ops->dma_supported(dev, mask);
-#define dma_supported(dev, mask) (1)
+ return 1;
+}
static inline int dma_set_mask(struct device *dev, u64 mask)
{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
if (!dev->dma_mask || !dma_supported(dev, mask))
return -EIO;
+ if (ops->set_dma_mask)
+ return ops->set_dma_mask(dev, mask);
*dev->dma_mask = mask;
return 0;
}
-void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag);
-
-void dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle);
-
void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction dir);
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-#define dma_is_consistent(d, h) (1)
-
-static inline dma_addr_t dma_map_single(struct device *dev,
- void *ptr, size_t size,
- enum dma_data_direction dir)
-{
- dma_addr_t addr = virt_to_phys(ptr);
-
-#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
- if (dev->bus == &pci_bus_type)
- return addr;
-#endif
- dma_cache_sync(dev, ptr, size, dir);
-
- debug_dma_map_page(dev, virt_to_page(ptr),
- (unsigned long)ptr & ~PAGE_MASK, size,
- dir, addr, true);
-
- return addr;
-}
-
-static inline void dma_unmap_single(struct device *dev, dma_addr_t addr,
- size_t size, enum dma_data_direction dir)
-{
- debug_dma_unmap_page(dev, addr, size, dir, true);
-}
-static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir)
-{
- int i;
-
- for (i = 0; i < nents; i++) {
-#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
- dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir);
+#ifdef CONFIG_DMA_COHERENT
+#define dma_is_consistent(d, h) (1)
+#else
+#define dma_is_consistent(d, h) (0)
#endif
- sg[i].dma_address = sg_phys(&sg[i]);
- sg[i].dma_length = sg[i].length;
- }
- debug_dma_map_sg(dev, sg, nents, i, dir);
-
- return nents;
-}
-
-static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir)
-{
- debug_dma_unmap_sg(dev, sg, nents, dir);
-}
-
-static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir)
-{
- return dma_map_single(dev, page_address(page) + offset, size, dir);
-}
-
-static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
- size_t size, enum dma_data_direction dir)
-{
- dma_unmap_single(dev, dma_address, size, dir);
-}
-
-static inline void __dma_sync_single(struct device *dev, dma_addr_t dma_handle,
- size_t size, enum dma_data_direction dir)
+static inline int dma_get_cache_alignment(void)
{
-#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
- if (dev->bus == &pci_bus_type)
- return;
-#endif
- dma_cache_sync(dev, phys_to_virt(dma_handle), size, dir);
+ /*
+ * Each processor family will define its own L1_CACHE_SHIFT,
+ * L1_CACHE_BYTES wraps to this, so this is always safe.
+ */
+ return L1_CACHE_BYTES;
}
-static inline void dma_sync_single_range(struct device *dev,
- dma_addr_t dma_handle,
- unsigned long offset, size_t size,
- enum dma_data_direction dir)
+static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
-#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
- if (dev->bus == &pci_bus_type)
- return;
-#endif
- dma_cache_sync(dev, phys_to_virt(dma_handle) + offset, size, dir);
-}
+ struct dma_map_ops *ops = get_dma_ops(dev);
-static inline void __dma_sync_sg(struct device *dev, struct scatterlist *sg,
- int nelems, enum dma_data_direction dir)
-{
- int i;
+ if (ops->mapping_error)
+ return ops->mapping_error(dev, dma_addr);
- for (i = 0; i < nelems; i++) {
-#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
- dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir);
-#endif
- sg[i].dma_address = sg_phys(&sg[i]);
- sg[i].dma_length = sg[i].length;
- }
+ return dma_addr == 0;
}
-static inline void dma_sync_single_for_cpu(struct device *dev,
- dma_addr_t dma_handle, size_t size,
- enum dma_data_direction dir)
+static inline void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp)
{
- __dma_sync_single(dev, dma_handle, size, dir);
- debug_dma_sync_single_for_cpu(dev, dma_handle, size, dir);
-}
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ void *memory;
-static inline void dma_sync_single_for_device(struct device *dev,
- dma_addr_t dma_handle,
- size_t size,
- enum dma_data_direction dir)
-{
- __dma_sync_single(dev, dma_handle, size, dir);
- debug_dma_sync_single_for_device(dev, dma_handle, size, dir);
-}
+ if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
+ return memory;
+ if (!ops->alloc_coherent)
+ return NULL;
-static inline void dma_sync_single_range_for_cpu(struct device *dev,
- dma_addr_t dma_handle,
- unsigned long offset,
- size_t size,
- enum dma_data_direction direction)
-{
- dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction);
- debug_dma_sync_single_range_for_cpu(dev, dma_handle,
- offset, size, direction);
-}
+ memory = ops->alloc_coherent(dev, size, dma_handle, gfp);
+ debug_dma_alloc_coherent(dev, size, *dma_handle, memory);
-static inline void dma_sync_single_range_for_device(struct device *dev,
- dma_addr_t dma_handle,
- unsigned long offset,
- size_t size,
- enum dma_data_direction direction)
-{
- dma_sync_single_for_device(dev, dma_handle+offset, size, direction);
- debug_dma_sync_single_range_for_device(dev, dma_handle,
- offset, size, direction);
+ return memory;
}
-
-static inline void dma_sync_sg_for_cpu(struct device *dev,
- struct scatterlist *sg, int nelems,
- enum dma_data_direction dir)
+static inline void dma_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle)
{
- __dma_sync_sg(dev, sg, nelems, dir);
- debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
-}
+ struct dma_map_ops *ops = get_dma_ops(dev);
-static inline void dma_sync_sg_for_device(struct device *dev,
- struct scatterlist *sg, int nelems,
- enum dma_data_direction dir)
-{
- __dma_sync_sg(dev, sg, nelems, dir);
- debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
-}
+ WARN_ON(irqs_disabled()); /* for portability */
-static inline int dma_get_cache_alignment(void)
-{
- /*
- * Each processor family will define its own L1_CACHE_SHIFT,
- * L1_CACHE_BYTES wraps to this, so this is always safe.
- */
- return L1_CACHE_BYTES;
-}
+ if (dma_release_from_coherent(dev, get_order(size), vaddr))
+ return;
-static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- return dma_addr == 0;
+ debug_dma_free_coherent(dev, size, vaddr, dma_handle);
+ if (ops->free_coherent)
+ ops->free_coherent(dev, size, vaddr, dma_handle);
}
-#define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
-
-extern int
-dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
- dma_addr_t device_addr, size_t size, int flags);
-
-extern void
-dma_release_declared_memory(struct device *dev);
-
-extern void *
-dma_mark_declared_memory_occupied(struct device *dev,
- dma_addr_t device_addr, size_t size);
+/* arch/sh/mm/consistent.c */
+extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_addr, gfp_t flag);
+extern void dma_generic_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle);
#endif /* __ASM_SH_DMA_MAPPING_H */
diff --git a/arch/sh/include/asm/dwarf.h b/arch/sh/include/asm/dwarf.h
index ced6795891a..bdccbbfdc0b 100644
--- a/arch/sh/include/asm/dwarf.h
+++ b/arch/sh/include/asm/dwarf.h
@@ -194,6 +194,12 @@
#define DWARF_ARCH_RA_REG 17
#ifndef __ASSEMBLY__
+
+#include <linux/compiler.h>
+#include <linux/bug.h>
+#include <linux/list.h>
+#include <linux/module.h>
+
/*
* Read either the frame pointer (r14) or the stack pointer (r15).
* NOTE: this MUST be inlined.
@@ -241,6 +247,12 @@ struct dwarf_cie {
unsigned long flags;
#define DWARF_CIE_Z_AUGMENTATION (1 << 0)
+
+ /*
+ * 'mod' will be non-NULL if this CIE came from a module's
+ * .eh_frame section.
+ */
+ struct module *mod;
};
/**
@@ -255,6 +267,12 @@ struct dwarf_fde {
unsigned char *instructions;
unsigned char *end;
struct list_head link;
+
+ /*
+ * 'mod' will be non-NULL if this FDE came from a module's
+ * .eh_frame section.
+ */
+ struct module *mod;
};
/**
@@ -364,6 +382,12 @@ static inline unsigned int DW_CFA_operand(unsigned long insn)
extern struct dwarf_frame *dwarf_unwind_stack(unsigned long,
struct dwarf_frame *);
+extern void dwarf_free_frame(struct dwarf_frame *);
+
+extern int module_dwarf_finalize(const Elf_Ehdr *, const Elf_Shdr *,
+ struct module *);
+extern void module_dwarf_cleanup(struct module *);
+
#endif /* !__ASSEMBLY__ */
#define CFI_STARTPROC .cfi_startproc
@@ -391,6 +415,10 @@ extern struct dwarf_frame *dwarf_unwind_stack(unsigned long,
static inline void dwarf_unwinder_init(void)
{
}
+
+#define module_dwarf_finalize(hdr, sechdrs, me) (0)
+#define module_dwarf_cleanup(mod) do { } while (0)
+
#endif
#endif /* CONFIG_DWARF_UNWINDER */
diff --git a/arch/sh/include/asm/fixmap.h b/arch/sh/include/asm/fixmap.h
index 721fcc4d5e9..5ac1e40a511 100644
--- a/arch/sh/include/asm/fixmap.h
+++ b/arch/sh/include/asm/fixmap.h
@@ -14,9 +14,9 @@
#define _ASM_FIXMAP_H
#include <linux/kernel.h>
+#include <linux/threads.h>
#include <asm/page.h>
#ifdef CONFIG_HIGHMEM
-#include <linux/threads.h>
#include <asm/kmap_types.h>
#endif
@@ -46,9 +46,15 @@
* fix-mapped?
*/
enum fixed_addresses {
-#define FIX_N_COLOURS 16
+ /*
+ * The FIX_CMAP entries are used by kmap_coherent() to get virtual
+ * addresses which are of a known color, and so their values are
+ * important. __fix_to_virt(FIX_CMAP_END - n) must give an address
+ * which is the same color as a page (n<<PAGE_SHIFT).
+ */
+#define FIX_N_COLOURS 8
FIX_CMAP_BEGIN,
- FIX_CMAP_END = FIX_CMAP_BEGIN + FIX_N_COLOURS,
+ FIX_CMAP_END = FIX_CMAP_BEGIN + (FIX_N_COLOURS * NR_CPUS) - 1,
FIX_UNCACHED,
#ifdef CONFIG_HIGHMEM
FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
diff --git a/arch/sh/include/asm/fpu.h b/arch/sh/include/asm/fpu.h
index 1d3aee04b5c..fb6bbb9b1cc 100644
--- a/arch/sh/include/asm/fpu.h
+++ b/arch/sh/include/asm/fpu.h
@@ -18,16 +18,15 @@ static inline void grab_fpu(struct pt_regs *regs)
struct task_struct;
-extern void save_fpu(struct task_struct *__tsk, struct pt_regs *regs);
+extern void save_fpu(struct task_struct *__tsk);
+void fpu_state_restore(struct pt_regs *regs);
#else
+#define save_fpu(tsk) do { } while (0)
#define release_fpu(regs) do { } while (0)
#define grab_fpu(regs) do { } while (0)
+#define fpu_state_restore(regs) do { } while (0)
-static inline void save_fpu(struct task_struct *tsk, struct pt_regs *regs)
-{
- clear_tsk_thread_flag(tsk, TIF_USEDFPU);
-}
#endif
struct user_regset;
@@ -39,19 +38,28 @@ extern int fpregs_get(struct task_struct *target,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf);
+static inline void __unlazy_fpu(struct task_struct *tsk, struct pt_regs *regs)
+{
+ if (task_thread_info(tsk)->status & TS_USEDFPU) {
+ task_thread_info(tsk)->status &= ~TS_USEDFPU;
+ save_fpu(tsk);
+ release_fpu(regs);
+ } else
+ tsk->fpu_counter = 0;
+}
+
static inline void unlazy_fpu(struct task_struct *tsk, struct pt_regs *regs)
{
preempt_disable();
- if (test_tsk_thread_flag(tsk, TIF_USEDFPU))
- save_fpu(tsk, regs);
+ __unlazy_fpu(tsk, regs);
preempt_enable();
}
static inline void clear_fpu(struct task_struct *tsk, struct pt_regs *regs)
{
preempt_disable();
- if (test_tsk_thread_flag(tsk, TIF_USEDFPU)) {
- clear_tsk_thread_flag(tsk, TIF_USEDFPU);
+ if (task_thread_info(tsk)->status & TS_USEDFPU) {
+ task_thread_info(tsk)->status &= ~TS_USEDFPU;
release_fpu(regs);
}
preempt_enable();
diff --git a/arch/sh/include/asm/ftrace.h b/arch/sh/include/asm/ftrace.h
index 12f3a31f20a..13e9966464c 100644
--- a/arch/sh/include/asm/ftrace.h
+++ b/arch/sh/include/asm/ftrace.h
@@ -35,4 +35,21 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
#endif /* __ASSEMBLY__ */
#endif /* CONFIG_FUNCTION_TRACER */
+#ifndef __ASSEMBLY__
+
+/* arch/sh/kernel/return_address.c */
+extern void *return_address(unsigned int);
+
+#define HAVE_ARCH_CALLER_ADDR
+
+#define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
+#define CALLER_ADDR1 ((unsigned long)return_address(1))
+#define CALLER_ADDR2 ((unsigned long)return_address(2))
+#define CALLER_ADDR3 ((unsigned long)return_address(3))
+#define CALLER_ADDR4 ((unsigned long)return_address(4))
+#define CALLER_ADDR5 ((unsigned long)return_address(5))
+#define CALLER_ADDR6 ((unsigned long)return_address(6))
+
+#endif /* __ASSEMBLY__ */
+
#endif /* __ASM_SH_FTRACE_H */
diff --git a/arch/sh/include/asm/gpio.h b/arch/sh/include/asm/gpio.h
index 61f93da2c62..f8d9a731e90 100644
--- a/arch/sh/include/asm/gpio.h
+++ b/arch/sh/include/asm/gpio.h
@@ -20,7 +20,7 @@
#endif
#define ARCH_NR_GPIOS 512
-#include <asm-generic/gpio.h>
+#include <linux/sh_pfc.h>
#ifdef CONFIG_GPIOLIB
@@ -53,84 +53,4 @@ static inline int irq_to_gpio(unsigned int irq)
#endif /* CONFIG_GPIOLIB */
-typedef unsigned short pinmux_enum_t;
-typedef unsigned short pinmux_flag_t;
-
-#define PINMUX_TYPE_NONE 0
-#define PINMUX_TYPE_FUNCTION 1
-#define PINMUX_TYPE_GPIO 2
-#define PINMUX_TYPE_OUTPUT 3
-#define PINMUX_TYPE_INPUT 4
-#define PINMUX_TYPE_INPUT_PULLUP 5
-#define PINMUX_TYPE_INPUT_PULLDOWN 6
-
-#define PINMUX_FLAG_TYPE (0x7)
-#define PINMUX_FLAG_WANT_PULLUP (1 << 3)
-#define PINMUX_FLAG_WANT_PULLDOWN (1 << 4)
-
-#define PINMUX_FLAG_DBIT_SHIFT 5
-#define PINMUX_FLAG_DBIT (0x1f << PINMUX_FLAG_DBIT_SHIFT)
-#define PINMUX_FLAG_DREG_SHIFT 10
-#define PINMUX_FLAG_DREG (0x3f << PINMUX_FLAG_DREG_SHIFT)
-
-struct pinmux_gpio {
- pinmux_enum_t enum_id;
- pinmux_flag_t flags;
-};
-
-#define PINMUX_GPIO(gpio, data_or_mark) [gpio] = { data_or_mark }
-#define PINMUX_DATA(data_or_mark, ids...) data_or_mark, ids, 0
-
-struct pinmux_cfg_reg {
- unsigned long reg, reg_width, field_width;
- unsigned long *cnt;
- pinmux_enum_t *enum_ids;
-};
-
-#define PINMUX_CFG_REG(name, r, r_width, f_width) \
- .reg = r, .reg_width = r_width, .field_width = f_width, \
- .cnt = (unsigned long [r_width / f_width]) {}, \
- .enum_ids = (pinmux_enum_t [(r_width / f_width) * (1 << f_width)]) \
-
-struct pinmux_data_reg {
- unsigned long reg, reg_width, reg_shadow;
- pinmux_enum_t *enum_ids;
-};
-
-#define PINMUX_DATA_REG(name, r, r_width) \
- .reg = r, .reg_width = r_width, \
- .enum_ids = (pinmux_enum_t [r_width]) \
-
-struct pinmux_range {
- pinmux_enum_t begin;
- pinmux_enum_t end;
- pinmux_enum_t force;
-};
-
-struct pinmux_info {
- char *name;
- pinmux_enum_t reserved_id;
- struct pinmux_range data;
- struct pinmux_range input;
- struct pinmux_range input_pd;
- struct pinmux_range input_pu;
- struct pinmux_range output;
- struct pinmux_range mark;
- struct pinmux_range function;
-
- unsigned first_gpio, last_gpio;
-
- struct pinmux_gpio *gpios;
- struct pinmux_cfg_reg *cfg_regs;
- struct pinmux_data_reg *data_regs;
-
- pinmux_enum_t *gpio_data;
- unsigned int gpio_data_size;
-
- unsigned long *gpio_in_use;
- struct gpio_chip chip;
-};
-
-int register_pinmux(struct pinmux_info *pip);
-
#endif /* __ASM_SH_GPIO_H */
diff --git a/arch/sh/include/asm/hardirq.h b/arch/sh/include/asm/hardirq.h
index a5be4afa790..48b191313a9 100644
--- a/arch/sh/include/asm/hardirq.h
+++ b/arch/sh/include/asm/hardirq.h
@@ -1,9 +1,16 @@
#ifndef __ASM_SH_HARDIRQ_H
#define __ASM_SH_HARDIRQ_H
-extern void ack_bad_irq(unsigned int irq);
-#define ack_bad_irq ack_bad_irq
+#include <linux/threads.h>
+#include <linux/irq.h>
+
+typedef struct {
+ unsigned int __softirq_pending;
+ unsigned int __nmi_count; /* arch dependent */
+} ____cacheline_aligned irq_cpustat_t;
-#include <asm-generic/hardirq.h>
+#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
+
+extern void ack_bad_irq(unsigned int irq);
#endif /* __ASM_SH_HARDIRQ_H */
diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h
index 5be45ea4dfe..512cd3e9d0c 100644
--- a/arch/sh/include/asm/io.h
+++ b/arch/sh/include/asm/io.h
@@ -90,15 +90,11 @@
#define ctrl_outl __raw_writel
#define ctrl_outq __raw_writeq
+extern unsigned long generic_io_base;
+
static inline void ctrl_delay(void)
{
-#ifdef CONFIG_CPU_SH4
- __raw_readw(CCN_PVR);
-#elif defined(P2SEG)
- __raw_readw(P2SEG);
-#else
-#error "Need a dummy address for delay"
-#endif
+ __raw_readw(generic_io_base);
}
#define __BUILD_MEMORY_STRING(bwlq, type) \
@@ -186,8 +182,6 @@ __BUILD_MEMORY_STRING(q, u64)
#define IO_SPACE_LIMIT 0xffffffff
-extern unsigned long generic_io_base;
-
/*
* This function provides a method for the generic case where a
* board-specific ioport_map simply needs to return the port + some
@@ -246,7 +240,7 @@ void __iounmap(void __iomem *addr);
static inline void __iomem *
__ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags)
{
-#if defined(CONFIG_SUPERH32) && !defined(CONFIG_PMB_FIXED)
+#if defined(CONFIG_SUPERH32) && !defined(CONFIG_PMB_FIXED) && !defined(CONFIG_PMB)
unsigned long last_addr = offset + size - 1;
#endif
void __iomem *ret;
@@ -255,7 +249,7 @@ __ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags)
if (ret)
return ret;
-#if defined(CONFIG_SUPERH32) && !defined(CONFIG_PMB_FIXED)
+#if defined(CONFIG_SUPERH32) && !defined(CONFIG_PMB_FIXED) && !defined(CONFIG_PMB)
/*
* For P1 and P2 space this is trivial, as everything is already
* mapped. Uncached access for P1 addresses are done through P2.
diff --git a/arch/sh/include/asm/irqflags.h b/arch/sh/include/asm/irqflags.h
index 46e71da5be6..a741153b41c 100644
--- a/arch/sh/include/asm/irqflags.h
+++ b/arch/sh/include/asm/irqflags.h
@@ -1,34 +1,9 @@
#ifndef __ASM_SH_IRQFLAGS_H
#define __ASM_SH_IRQFLAGS_H
-#ifdef CONFIG_SUPERH32
-#include "irqflags_32.h"
-#else
-#include "irqflags_64.h"
-#endif
+#define RAW_IRQ_DISABLED 0xf0
+#define RAW_IRQ_ENABLED 0x00
-#define raw_local_save_flags(flags) \
- do { (flags) = __raw_local_save_flags(); } while (0)
-
-static inline int raw_irqs_disabled_flags(unsigned long flags)
-{
- return (flags != 0);
-}
-
-static inline int raw_irqs_disabled(void)
-{
- unsigned long flags = __raw_local_save_flags();
-
- return raw_irqs_disabled_flags(flags);
-}
-
-#define raw_local_irq_save(flags) \
- do { (flags) = __raw_local_irq_save(); } while (0)
-
-static inline void raw_local_irq_restore(unsigned long flags)
-{
- if ((flags & 0xf0) != 0xf0)
- raw_local_irq_enable();
-}
+#include <asm-generic/irqflags.h>
#endif /* __ASM_SH_IRQFLAGS_H */
diff --git a/arch/sh/include/asm/irqflags_32.h b/arch/sh/include/asm/irqflags_32.h
deleted file mode 100644
index 60218f54134..00000000000
--- a/arch/sh/include/asm/irqflags_32.h
+++ /dev/null
@@ -1,99 +0,0 @@
-#ifndef __ASM_SH_IRQFLAGS_32_H
-#define __ASM_SH_IRQFLAGS_32_H
-
-static inline void raw_local_irq_enable(void)
-{
- unsigned long __dummy0, __dummy1;
-
- __asm__ __volatile__ (
- "stc sr, %0\n\t"
- "and %1, %0\n\t"
-#ifdef CONFIG_CPU_HAS_SR_RB
- "stc r6_bank, %1\n\t"
- "or %1, %0\n\t"
-#endif
- "ldc %0, sr\n\t"
- : "=&r" (__dummy0), "=r" (__dummy1)
- : "1" (~0x000000f0)
- : "memory"
- );
-}
-
-static inline void raw_local_irq_disable(void)
-{
- unsigned long flags;
-
- __asm__ __volatile__ (
- "stc sr, %0\n\t"
- "or #0xf0, %0\n\t"
- "ldc %0, sr\n\t"
- : "=&z" (flags)
- : /* no inputs */
- : "memory"
- );
-}
-
-static inline void set_bl_bit(void)
-{
- unsigned long __dummy0, __dummy1;
-
- __asm__ __volatile__ (
- "stc sr, %0\n\t"
- "or %2, %0\n\t"
- "and %3, %0\n\t"
- "ldc %0, sr\n\t"
- : "=&r" (__dummy0), "=r" (__dummy1)
- : "r" (0x10000000), "r" (0xffffff0f)
- : "memory"
- );
-}
-
-static inline void clear_bl_bit(void)
-{
- unsigned long __dummy0, __dummy1;
-
- __asm__ __volatile__ (
- "stc sr, %0\n\t"
- "and %2, %0\n\t"
- "ldc %0, sr\n\t"
- : "=&r" (__dummy0), "=r" (__dummy1)
- : "1" (~0x10000000)
- : "memory"
- );
-}
-
-static inline unsigned long __raw_local_save_flags(void)
-{
- unsigned long flags;
-
- __asm__ __volatile__ (
- "stc sr, %0\n\t"
- "and #0xf0, %0\n\t"
- : "=&z" (flags)
- : /* no inputs */
- : "memory"
- );
-
- return flags;
-}
-
-static inline unsigned long __raw_local_irq_save(void)
-{
- unsigned long flags, __dummy;
-
- __asm__ __volatile__ (
- "stc sr, %1\n\t"
- "mov %1, %0\n\t"
- "or #0xf0, %0\n\t"
- "ldc %0, sr\n\t"
- "mov %1, %0\n\t"
- "and #0xf0, %0\n\t"
- : "=&z" (flags), "=&r" (__dummy)
- : /* no inputs */
- : "memory"
- );
-
- return flags;
-}
-
-#endif /* __ASM_SH_IRQFLAGS_32_H */
diff --git a/arch/sh/include/asm/irqflags_64.h b/arch/sh/include/asm/irqflags_64.h
deleted file mode 100644
index 88f65222c1d..00000000000
--- a/arch/sh/include/asm/irqflags_64.h
+++ /dev/null
@@ -1,85 +0,0 @@
-#ifndef __ASM_SH_IRQFLAGS_64_H
-#define __ASM_SH_IRQFLAGS_64_H
-
-#include <cpu/registers.h>
-
-#define SR_MASK_LL 0x00000000000000f0LL
-#define SR_BL_LL 0x0000000010000000LL
-
-static inline void raw_local_irq_enable(void)
-{
- unsigned long long __dummy0, __dummy1 = ~SR_MASK_LL;
-
- __asm__ __volatile__("getcon " __SR ", %0\n\t"
- "and %0, %1, %0\n\t"
- "putcon %0, " __SR "\n\t"
- : "=&r" (__dummy0)
- : "r" (__dummy1));
-}
-
-static inline void raw_local_irq_disable(void)
-{
- unsigned long long __dummy0, __dummy1 = SR_MASK_LL;
-
- __asm__ __volatile__("getcon " __SR ", %0\n\t"
- "or %0, %1, %0\n\t"
- "putcon %0, " __SR "\n\t"
- : "=&r" (__dummy0)
- : "r" (__dummy1));
-}
-
-static inline void set_bl_bit(void)
-{
- unsigned long long __dummy0, __dummy1 = SR_BL_LL;
-
- __asm__ __volatile__("getcon " __SR ", %0\n\t"
- "or %0, %1, %0\n\t"
- "putcon %0, " __SR "\n\t"
- : "=&r" (__dummy0)
- : "r" (__dummy1));
-
-}
-
-static inline void clear_bl_bit(void)
-{
- unsigned long long __dummy0, __dummy1 = ~SR_BL_LL;
-
- __asm__ __volatile__("getcon " __SR ", %0\n\t"
- "and %0, %1, %0\n\t"
- "putcon %0, " __SR "\n\t"
- : "=&r" (__dummy0)
- : "r" (__dummy1));
-}
-
-static inline unsigned long __raw_local_save_flags(void)
-{
- unsigned long long __dummy = SR_MASK_LL;
- unsigned long flags;
-
- __asm__ __volatile__ (
- "getcon " __SR ", %0\n\t"
- "and %0, %1, %0"
- : "=&r" (flags)
- : "r" (__dummy));
-
- return flags;
-}
-
-static inline unsigned long __raw_local_irq_save(void)
-{
- unsigned long long __dummy0, __dummy1 = SR_MASK_LL;
- unsigned long flags;
-
- __asm__ __volatile__ (
- "getcon " __SR ", %1\n\t"
- "or %1, r63, %0\n\t"
- "or %1, %2, %1\n\t"
- "putcon %1, " __SR "\n\t"
- "and %0, %2, %0"
- : "=&r" (flags), "=&r" (__dummy0)
- : "r" (__dummy1));
-
- return flags;
-}
-
-#endif /* __ASM_SH_IRQFLAGS_64_H */
diff --git a/arch/sh/include/asm/mmu.h b/arch/sh/include/asm/mmu.h
index f5963037c9d..c7426ad9926 100644
--- a/arch/sh/include/asm/mmu.h
+++ b/arch/sh/include/asm/mmu.h
@@ -7,12 +7,16 @@
#define PMB_PASCR 0xff000070
#define PMB_IRMCR 0xff000078
+#define PASCR_SE 0x80000000
+
#define PMB_ADDR 0xf6100000
#define PMB_DATA 0xf7100000
#define PMB_ENTRY_MAX 16
#define PMB_E_MASK 0x0000000f
#define PMB_E_SHIFT 8
+#define PMB_PFN_MASK 0xff000000
+
#define PMB_SZ_16M 0x00000000
#define PMB_SZ_64M 0x00000010
#define PMB_SZ_128M 0x00000080
@@ -62,17 +66,10 @@ struct pmb_entry {
};
/* arch/sh/mm/pmb.c */
-int __set_pmb_entry(unsigned long vpn, unsigned long ppn,
- unsigned long flags, int *entry);
-int set_pmb_entry(struct pmb_entry *pmbe);
-void clear_pmb_entry(struct pmb_entry *pmbe);
-struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
- unsigned long flags);
-void pmb_free(struct pmb_entry *pmbe);
long pmb_remap(unsigned long virt, unsigned long phys,
unsigned long size, unsigned long flags);
void pmb_unmap(unsigned long addr);
+int pmb_init(void);
#endif /* __ASSEMBLY__ */
#endif /* __MMU_H */
-
diff --git a/arch/sh/include/asm/pci.h b/arch/sh/include/asm/pci.h
index 4163950cd1c..67f3999b544 100644
--- a/arch/sh/include/asm/pci.h
+++ b/arch/sh/include/asm/pci.h
@@ -3,8 +3,6 @@
#ifdef __KERNEL__
-#include <linux/dma-mapping.h>
-
/* Can be used to override the logic in pci_scan_bus for skipping
already-configured bus numbers - to be used for buggy BIOSes
or architectures with incomplete PCI setup by the loader */
@@ -54,30 +52,18 @@ static inline void pcibios_penalize_isa_irq(int irq, int active)
* address space. The networking and block device layers use
* this boolean for bounce buffer decisions.
*/
-#define PCI_DMA_BUS_IS_PHYS (1)
-
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <asm/scatterlist.h>
-#include <linux/string.h>
-#include <asm/io.h>
+#define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys)
/* pci_unmap_{single,page} being a nop depends upon the
* configuration.
*/
-#ifdef CONFIG_SH_PCIDMA_NONCOHERENT
-#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
- dma_addr_t ADDR_NAME;
-#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
- __u32 LEN_NAME;
-#define pci_unmap_addr(PTR, ADDR_NAME) \
- ((PTR)->ADDR_NAME)
-#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
- (((PTR)->ADDR_NAME) = (VAL))
-#define pci_unmap_len(PTR, LEN_NAME) \
- ((PTR)->LEN_NAME)
-#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
- (((PTR)->LEN_NAME) = (VAL))
+#ifdef CONFIG_DMA_NONCOHERENT
+#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME;
+#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME;
+#define pci_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
+#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
+#define pci_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
+#define pci_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
#else
#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
diff --git a/arch/sh/include/asm/perf_event.h b/arch/sh/include/asm/perf_event.h
index 11a302297ab..3d0c9f36d15 100644
--- a/arch/sh/include/asm/perf_event.h
+++ b/arch/sh/include/asm/perf_event.h
@@ -1,8 +1,35 @@
#ifndef __ASM_SH_PERF_EVENT_H
#define __ASM_SH_PERF_EVENT_H
-/* SH only supports software events through this interface. */
-static inline void set_perf_event_pending(void) {}
+struct hw_perf_event;
+
+#define MAX_HWEVENTS 2
+
+struct sh_pmu {
+ const char *name;
+ unsigned int num_events;
+ void (*disable_all)(void);
+ void (*enable_all)(void);
+ void (*enable)(struct hw_perf_event *, int);
+ void (*disable)(struct hw_perf_event *, int);
+ u64 (*read)(int);
+ int (*event_map)(int);
+ unsigned int max_events;
+ unsigned long raw_event_mask;
+ const int (*cache_events)[PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX];
+};
+
+/* arch/sh/kernel/perf_event.c */
+extern int register_sh_pmu(struct sh_pmu *);
+extern int reserve_pmc_hardware(void);
+extern void release_pmc_hardware(void);
+
+static inline void set_perf_event_pending(void)
+{
+ /* Nothing to see here, move along. */
+}
#define PERF_EVENT_INDEX_OFFSET 0
diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h
index 4f3efa7d5a6..ba3046e4f06 100644
--- a/arch/sh/include/asm/pgtable.h
+++ b/arch/sh/include/asm/pgtable.h
@@ -75,13 +75,31 @@ static inline unsigned long long neff_sign_extend(unsigned long val)
#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
#define FIRST_USER_ADDRESS 0
-#ifdef CONFIG_32BIT
-#define PHYS_ADDR_MASK 0xffffffff
+#define PHYS_ADDR_MASK29 0x1fffffff
+#define PHYS_ADDR_MASK32 0xffffffff
+
+#ifdef CONFIG_PMB
+static inline unsigned long phys_addr_mask(void)
+{
+ /* Is the MMU in 29bit mode? */
+ if (__in_29bit_mode())
+ return PHYS_ADDR_MASK29;
+
+ return PHYS_ADDR_MASK32;
+}
+#elif defined(CONFIG_32BIT)
+static inline unsigned long phys_addr_mask(void)
+{
+ return PHYS_ADDR_MASK32;
+}
#else
-#define PHYS_ADDR_MASK 0x1fffffff
+static inline unsigned long phys_addr_mask(void)
+{
+ return PHYS_ADDR_MASK29;
+}
#endif
-#define PTE_PHYS_MASK (PHYS_ADDR_MASK & PAGE_MASK)
+#define PTE_PHYS_MASK (phys_addr_mask() & PAGE_MASK)
#define PTE_FLAGS_MASK (~(PTE_PHYS_MASK) << PAGE_SHIFT)
#ifdef CONFIG_SUPERH32
diff --git a/arch/sh/include/asm/pgtable_32.h b/arch/sh/include/asm/pgtable_32.h
index c0d359ce337..b3543551620 100644
--- a/arch/sh/include/asm/pgtable_32.h
+++ b/arch/sh/include/asm/pgtable_32.h
@@ -108,7 +108,7 @@ static inline unsigned long copy_ptea_attributes(unsigned long x)
#define _PAGE_CLEAR_FLAGS (_PAGE_PROTNONE | _PAGE_ACCESSED | _PAGE_FILE)
#endif
-#define _PAGE_FLAGS_HARDWARE_MASK (PHYS_ADDR_MASK & ~(_PAGE_CLEAR_FLAGS))
+#define _PAGE_FLAGS_HARDWARE_MASK (phys_addr_mask() & ~(_PAGE_CLEAR_FLAGS))
/* Hardware flags, page size encoding */
#if !defined(CONFIG_MMU)
diff --git a/arch/sh/include/asm/processor_32.h b/arch/sh/include/asm/processor_32.h
index 9a8714945dc..1f3d6fab660 100644
--- a/arch/sh/include/asm/processor_32.h
+++ b/arch/sh/include/asm/processor_32.h
@@ -56,6 +56,7 @@ asmlinkage void __init sh_cpu_init(void);
#define SR_DSP 0x00001000
#define SR_IMASK 0x000000f0
#define SR_FD 0x00008000
+#define SR_MD 0x40000000
/*
* DSP structure and data
@@ -136,7 +137,7 @@ struct mm_struct;
extern void release_thread(struct task_struct *);
/* Prepare to copy thread state - unlazy all lazy status */
-#define prepare_to_copy(tsk) do { } while (0)
+void prepare_to_copy(struct task_struct *tsk);
/*
* create a kernel thread without removing it from tasklists
diff --git a/arch/sh/include/asm/scatterlist.h b/arch/sh/include/asm/scatterlist.h
index 327cc2e4c97..e38d1d4c7f6 100644
--- a/arch/sh/include/asm/scatterlist.h
+++ b/arch/sh/include/asm/scatterlist.h
@@ -1,7 +1,7 @@
#ifndef __ASM_SH_SCATTERLIST_H
#define __ASM_SH_SCATTERLIST_H
-#define ISA_DMA_THRESHOLD PHYS_ADDR_MASK
+#define ISA_DMA_THRESHOLD phys_addr_mask()
#include <asm-generic/scatterlist.h>
diff --git a/arch/sh/include/asm/suspend.h b/arch/sh/include/asm/suspend.h
index 5c8ea28ff7a..fe9c2a1ad04 100644
--- a/arch/sh/include/asm/suspend.h
+++ b/arch/sh/include/asm/suspend.h
@@ -2,6 +2,7 @@
#define _ASM_SH_SUSPEND_H
#ifndef __ASSEMBLY__
+#include <linux/notifier.h>
static inline int arch_prepare_suspend(void) { return 0; }
#include <asm/ptrace.h>
@@ -19,6 +20,69 @@ void sh_mobile_setup_cpuidle(void);
static inline void sh_mobile_setup_cpuidle(void) {}
#endif
+/* notifier chains for pre/post sleep hooks */
+extern struct atomic_notifier_head sh_mobile_pre_sleep_notifier_list;
+extern struct atomic_notifier_head sh_mobile_post_sleep_notifier_list;
+
+/* priority levels for notifiers */
+#define SH_MOBILE_SLEEP_BOARD 0
+#define SH_MOBILE_SLEEP_CPU 1
+#define SH_MOBILE_PRE(x) (x)
+#define SH_MOBILE_POST(x) (-(x))
+
+/* board code registration function for self-refresh assembly snippets */
+void sh_mobile_register_self_refresh(unsigned long flags,
+ void *pre_start, void *pre_end,
+ void *post_start, void *post_end);
+
+/* register structure for address/data information */
+struct sh_sleep_regs {
+ unsigned long stbcr;
+ unsigned long bar;
+
+ /* MMU */
+ unsigned long pteh;
+ unsigned long ptel;
+ unsigned long ttb;
+ unsigned long tea;
+ unsigned long mmucr;
+ unsigned long ptea;
+ unsigned long pascr;
+ unsigned long irmcr;
+
+ /* Cache */
+ unsigned long ccr;
+ unsigned long ramcr;
+};
+
+/* data area for low-level sleep code */
+struct sh_sleep_data {
+ /* current sleep mode (SUSP_SH_...) */
+ unsigned long mode;
+
+ /* addresses of board specific self-refresh snippets */
+ unsigned long sf_pre;
+ unsigned long sf_post;
+
+ /* address of resume code */
+ unsigned long resume;
+
+ /* register state saved and restored by the assembly code */
+ unsigned long vbr;
+ unsigned long spc;
+ unsigned long sr;
+ unsigned long sp;
+
+ /* structure for keeping register addresses */
+ struct sh_sleep_regs addr;
+
+ /* structure for saving/restoring register state */
+ struct sh_sleep_regs data;
+};
+
+/* a bitmap of supported sleep modes (SUSP_SH..) */
+extern unsigned long sh_mobile_sleep_supported;
+
#endif
/* flags passed to assembly suspend code */
@@ -27,5 +91,6 @@ static inline void sh_mobile_setup_cpuidle(void) {}
#define SUSP_SH_RSTANDBY (1 << 2) /* SH-Mobile R-standby mode */
#define SUSP_SH_USTANDBY (1 << 3) /* SH-Mobile U-standby mode */
#define SUSP_SH_SF (1 << 4) /* Enable self-refresh */
+#define SUSP_SH_MMU (1 << 5) /* Save/restore MMU and cache */
#endif /* _ASM_SH_SUSPEND_H */
diff --git a/arch/sh/include/asm/system.h b/arch/sh/include/asm/system.h
index b5c5acdc8c0..c15415b4b16 100644
--- a/arch/sh/include/asm/system.h
+++ b/arch/sh/include/asm/system.h
@@ -171,10 +171,6 @@ BUILD_TRAP_HANDLER(fpu_error);
BUILD_TRAP_HANDLER(fpu_state_restore);
BUILD_TRAP_HANDLER(nmi);
-#ifdef CONFIG_BUG
-extern void handle_BUG(struct pt_regs *);
-#endif
-
#define arch_align_stack(x) (x)
struct mem_access {
diff --git a/arch/sh/include/asm/system_32.h b/arch/sh/include/asm/system_32.h
index 607d413f616..06814f5b59c 100644
--- a/arch/sh/include/asm/system_32.h
+++ b/arch/sh/include/asm/system_32.h
@@ -232,4 +232,33 @@ asmlinkage void do_exception_error(unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7,
struct pt_regs __regs);
+static inline void set_bl_bit(void)
+{
+ unsigned long __dummy0, __dummy1;
+
+ __asm__ __volatile__ (
+ "stc sr, %0\n\t"
+ "or %2, %0\n\t"
+ "and %3, %0\n\t"
+ "ldc %0, sr\n\t"
+ : "=&r" (__dummy0), "=r" (__dummy1)
+ : "r" (0x10000000), "r" (0xffffff0f)
+ : "memory"
+ );
+}
+
+static inline void clear_bl_bit(void)
+{
+ unsigned long __dummy0, __dummy1;
+
+ __asm__ __volatile__ (
+ "stc sr, %0\n\t"
+ "and %2, %0\n\t"
+ "ldc %0, sr\n\t"
+ : "=&r" (__dummy0), "=r" (__dummy1)
+ : "1" (~0x10000000)
+ : "memory"
+ );
+}
+
#endif /* __ASM_SH_SYSTEM_32_H */
diff --git a/arch/sh/include/asm/system_64.h b/arch/sh/include/asm/system_64.h
index 8e4a03e7966..ab1dd917ea8 100644
--- a/arch/sh/include/asm/system_64.h
+++ b/arch/sh/include/asm/system_64.h
@@ -12,6 +12,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
+#include <cpu/registers.h>
#include <asm/processor.h>
/*
@@ -47,4 +48,29 @@ static inline reg_size_t register_align(void *val)
return (unsigned long long)(signed long long)(signed long)val;
}
+#define SR_BL_LL 0x0000000010000000LL
+
+static inline void set_bl_bit(void)
+{
+ unsigned long long __dummy0, __dummy1 = SR_BL_LL;
+
+ __asm__ __volatile__("getcon " __SR ", %0\n\t"
+ "or %0, %1, %0\n\t"
+ "putcon %0, " __SR "\n\t"
+ : "=&r" (__dummy0)
+ : "r" (__dummy1));
+
+}
+
+static inline void clear_bl_bit(void)
+{
+ unsigned long long __dummy0, __dummy1 = ~SR_BL_LL;
+
+ __asm__ __volatile__("getcon " __SR ", %0\n\t"
+ "and %0, %1, %0\n\t"
+ "putcon %0, " __SR "\n\t"
+ : "=&r" (__dummy0)
+ : "r" (__dummy1));
+}
+
#endif /* __ASM_SH_SYSTEM_64_H */
diff --git a/arch/sh/include/asm/thread_info.h b/arch/sh/include/asm/thread_info.h
index bdeb9d46d17..1f3d927e226 100644
--- a/arch/sh/include/asm/thread_info.h
+++ b/arch/sh/include/asm/thread_info.h
@@ -19,6 +19,7 @@ struct thread_info {
struct task_struct *task; /* main task structure */
struct exec_domain *exec_domain; /* execution domain */
unsigned long flags; /* low level flags */
+ __u32 status; /* thread synchronous flags */
__u32 cpu;
int preempt_count; /* 0 => preemptable, <0 => BUG */
mm_segment_t addr_limit; /* thread address space */
@@ -50,6 +51,7 @@ struct thread_info {
.task = &tsk, \
.exec_domain = &default_exec_domain, \
.flags = 0, \
+ .status = 0, \
.cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
.addr_limit = KERNEL_DS, \
@@ -111,13 +113,11 @@ extern void free_thread_info(struct thread_info *ti);
#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
#define TIF_SIGPENDING 1 /* signal pending */
#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
-#define TIF_RESTORE_SIGMASK 3 /* restore signal mask in do_signal() */
#define TIF_SINGLESTEP 4 /* singlestepping active */
#define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */
#define TIF_SECCOMP 6 /* secure computing */
#define TIF_NOTIFY_RESUME 7 /* callback before returning to user */
#define TIF_SYSCALL_TRACEPOINT 8 /* for ftrace syscall instrumentation */
-#define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */
#define TIF_MEMDIE 18
#define TIF_FREEZE 19 /* Freezing for suspend */
@@ -125,13 +125,11 @@ extern void free_thread_info(struct thread_info *ti);
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
-#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
-#define _TIF_USEDFPU (1 << TIF_USEDFPU)
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
#define _TIF_FREEZE (1 << TIF_FREEZE)
@@ -149,13 +147,33 @@ extern void free_thread_info(struct thread_info *ti);
/* work to do on any return to u-space */
#define _TIF_ALLWORK_MASK (_TIF_SYSCALL_TRACE | _TIF_SIGPENDING | \
_TIF_NEED_RESCHED | _TIF_SYSCALL_AUDIT | \
- _TIF_SINGLESTEP | _TIF_RESTORE_SIGMASK | \
- _TIF_NOTIFY_RESUME | _TIF_SYSCALL_TRACEPOINT)
+ _TIF_SINGLESTEP | _TIF_NOTIFY_RESUME | \
+ _TIF_SYSCALL_TRACEPOINT)
/* work to do on interrupt/exception return */
#define _TIF_WORK_MASK (_TIF_ALLWORK_MASK & ~(_TIF_SYSCALL_TRACE | \
_TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP))
+/*
+ * Thread-synchronous status.
+ *
+ * This is different from the flags in that nobody else
+ * ever touches our thread-synchronous status, so we don't
+ * have to worry about atomic accesses.
+ */
+#define TS_RESTORE_SIGMASK 0x0001 /* restore signal mask in do_signal() */
+#define TS_USEDFPU 0x0002 /* FPU used by this task this quantum */
+
+#ifndef __ASSEMBLY__
+#define HAVE_SET_RESTORE_SIGMASK 1
+static inline void set_restore_sigmask(void)
+{
+ struct thread_info *ti = current_thread_info();
+ ti->status |= TS_RESTORE_SIGMASK;
+ set_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags);
+}
+#endif /* !__ASSEMBLY__ */
+
#endif /* __KERNEL__ */
#endif /* __ASM_SH_THREAD_INFO_H */
diff --git a/arch/sh/include/asm/topology.h b/arch/sh/include/asm/topology.h
index 65e7bd2f224..37cdadd975a 100644
--- a/arch/sh/include/asm/topology.h
+++ b/arch/sh/include/asm/topology.h
@@ -40,6 +40,14 @@
#endif
+#define mc_capable() (1)
+
+const struct cpumask *cpu_coregroup_mask(unsigned int cpu);
+
+extern cpumask_t cpu_core_map[NR_CPUS];
+
+#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
+
#include <asm-generic/topology.h>
#endif /* _ASM_SH_TOPOLOGY_H */
diff --git a/arch/sh/include/asm/ubc.h b/arch/sh/include/asm/ubc.h
index 4ca4b771737..9bf96168443 100644
--- a/arch/sh/include/asm/ubc.h
+++ b/arch/sh/include/asm/ubc.h
@@ -60,16 +60,5 @@
#define BRCR_UBDE (1 << 0)
#endif
-#ifndef __ASSEMBLY__
-/* arch/sh/kernel/cpu/ubc.S */
-extern void ubc_sleep(void);
-
-#ifdef CONFIG_UBC_WAKEUP
-extern void ubc_wakeup(void);
-#else
-#define ubc_wakeup() do { } while (0)
-#endif
-#endif
-
#endif /* __KERNEL__ */
#endif /* __ASM_SH_UBC_H */
diff --git a/arch/sh/include/asm/watchdog.h b/arch/sh/include/asm/watchdog.h
index 2fe7cee9e43..19dfff5c851 100644
--- a/arch/sh/include/asm/watchdog.h
+++ b/arch/sh/include/asm/watchdog.h
@@ -2,6 +2,8 @@
* include/asm-sh/watchdog.h
*
* Copyright (C) 2002, 2003 Paul Mundt
+ * Copyright (C) 2009 Siemens AG
+ * Copyright (C) 2009 Valentin Sitdikov
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -61,6 +63,61 @@
#define WTCSR_CKS_2048 0x06
#define WTCSR_CKS_4096 0x07
+#if defined(CONFIG_CPU_SUBTYPE_SH7785) || defined(CONFIG_CPU_SUBTYPE_SH7780)
+/**
+ * sh_wdt_read_cnt - Read from Counter
+ * Reads back the WTCNT value.
+ */
+static inline __u32 sh_wdt_read_cnt(void)
+{
+ return ctrl_inl(WTCNT_R);
+}
+
+/**
+ * sh_wdt_write_cnt - Write to Counter
+ * @val: Value to write
+ *
+ * Writes the given value @val to the lower byte of the timer counter.
+ * The upper byte is set manually on each write.
+ */
+static inline void sh_wdt_write_cnt(__u32 val)
+{
+ ctrl_outl((WTCNT_HIGH << 24) | (__u32)val, WTCNT);
+}
+
+/**
+ * sh_wdt_write_bst - Write to Counter
+ * @val: Value to write
+ *
+ * Writes the given value @val to the lower byte of the timer counter.
+ * The upper byte is set manually on each write.
+ */
+static inline void sh_wdt_write_bst(__u32 val)
+{
+ ctrl_outl((WTBST_HIGH << 24) | (__u32)val, WTBST);
+}
+/**
+ * sh_wdt_read_csr - Read from Control/Status Register
+ *
+ * Reads back the WTCSR value.
+ */
+static inline __u32 sh_wdt_read_csr(void)
+{
+ return ctrl_inl(WTCSR_R);
+}
+
+/**
+ * sh_wdt_write_csr - Write to Control/Status Register
+ * @val: Value to write
+ *
+ * Writes the given value @val to the lower byte of the control/status
+ * register. The upper byte is set manually on each write.
+ */
+static inline void sh_wdt_write_csr(__u32 val)
+{
+ ctrl_outl((WTCSR_HIGH << 24) | (__u32)val, WTCSR);
+}
+#else
/**
* sh_wdt_read_cnt - Read from Counter
* Reads back the WTCNT value.
@@ -103,6 +160,6 @@ static inline void sh_wdt_write_csr(__u8 val)
{
ctrl_outw((WTCSR_HIGH << 8) | (__u16)val, WTCSR);
}
-
+#endif /* CONFIG_CPU_SUBTYPE_SH7785 || CONFIG_CPU_SUBTYPE_SH7780 */
#endif /* __KERNEL__ */
#endif /* __ASM_SH_WATCHDOG_H */
diff --git a/arch/sh/include/cpu-sh4/cpu/watchdog.h b/arch/sh/include/cpu-sh4/cpu/watchdog.h
index 259f6a0ce23..7672301d0c7 100644
--- a/arch/sh/include/cpu-sh4/cpu/watchdog.h
+++ b/arch/sh/include/cpu-sh4/cpu/watchdog.h
@@ -2,6 +2,8 @@
* include/asm-sh/cpu-sh4/watchdog.h
*
* Copyright (C) 2002, 2003 Paul Mundt
+ * Copyright (C) 2009 Siemens AG
+ * Copyright (C) 2009 Sitdikov Valentin
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
@@ -10,9 +12,20 @@
#ifndef __ASM_CPU_SH4_WATCHDOG_H
#define __ASM_CPU_SH4_WATCHDOG_H
+#if defined(CONFIG_CPU_SUBTYPE_SH7785) || defined(CONFIG_CPU_SUBTYPE_SH7780)
+/* Prefix definition */
+#define WTBST_HIGH 0x55
+/* Register definitions */
+#define WTCNT_R 0xffcc0010 /*WDTCNT*/
+#define WTCSR 0xffcc0004 /*WDTCSR*/
+#define WTCNT 0xffcc0000 /*WDTST*/
+#define WTST WTCNT
+#define WTBST 0xffcc0008 /*WDTBST*/
+#else
/* Register definitions */
#define WTCNT 0xffc00008
#define WTCSR 0xffc0000c
+#endif
/* Bit definitions */
#define WTCSR_TME 0x80
diff --git a/arch/sh/include/mach-ecovec24/mach/partner-jet-setup.txt b/arch/sh/include/mach-ecovec24/mach/partner-jet-setup.txt
index 8b8e4fa1fee..cc737b80733 100644
--- a/arch/sh/include/mach-ecovec24/mach/partner-jet-setup.txt
+++ b/arch/sh/include/mach-ecovec24/mach/partner-jet-setup.txt
@@ -22,13 +22,12 @@ ED 0xff000010, 0x00000004
LIST "setup clocks"
ED 0xa4150024, 0x00004000
ED 0xa4150000, 0x8E003508
-ED 0xa4150004, 0x00000000
WAIT 1
LIST "BSC"
ED 0xff800020, 0xa5a50000
-ED 0xfec10000, 0x00000013
+ED 0xfec10000, 0x00001013
ED 0xfec10004, 0x11110400
ED 0xfec10024, 0x00000440
diff --git a/arch/sh/include/mach-se/mach/se7722.h b/arch/sh/include/mach-se/mach/se7722.h
index e971d9a82f4..16505bfb8a9 100644
--- a/arch/sh/include/mach-se/mach/se7722.h
+++ b/arch/sh/include/mach-se/mach/se7722.h
@@ -92,18 +92,11 @@
#define SE7722_FPGA_IRQ_MRSHPC1 3 /* IRQ1 */
#define SE7722_FPGA_IRQ_MRSHPC2 4 /* IRQ1 */
#define SE7722_FPGA_IRQ_MRSHPC3 5 /* IRQ1 */
-
#define SE7722_FPGA_IRQ_NR 6
-#define SE7722_FPGA_IRQ_BASE 110
-
-#define MRSHPC_IRQ3 (SE7722_FPGA_IRQ_BASE + SE7722_FPGA_IRQ_MRSHPC3)
-#define MRSHPC_IRQ2 (SE7722_FPGA_IRQ_BASE + SE7722_FPGA_IRQ_MRSHPC2)
-#define MRSHPC_IRQ1 (SE7722_FPGA_IRQ_BASE + SE7722_FPGA_IRQ_MRSHPC1)
-#define MRSHPC_IRQ0 (SE7722_FPGA_IRQ_BASE + SE7722_FPGA_IRQ_MRSHPC0)
-#define SMC_IRQ (SE7722_FPGA_IRQ_BASE + SE7722_FPGA_IRQ_SMC)
-#define USB_IRQ (SE7722_FPGA_IRQ_BASE + SE7722_FPGA_IRQ_USB)
/* arch/sh/boards/se/7722/irq.c */
+extern unsigned int se7722_fpga_irq[];
+
void init_se7722_IRQ(void);
#define __IO_PREFIX se7722
diff --git a/arch/sh/kernel/Makefile b/arch/sh/kernel/Makefile
index a2d0a40f384..0471a3eb25e 100644
--- a/arch/sh/kernel/Makefile
+++ b/arch/sh/kernel/Makefile
@@ -9,8 +9,12 @@ ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_ftrace.o = -pg
endif
-obj-y := debugtraps.o dumpstack.o idle.o io.o io_generic.o irq.o \
- machvec.o nmi_debug.o process_$(BITS).o ptrace_$(BITS).o \
+CFLAGS_REMOVE_return_address.o = -pg
+
+obj-y := debugtraps.o dma-nommu.o dumpstack.o \
+ idle.o io.o io_generic.o irq.o \
+ irq_$(BITS).o machvec.o nmi_debug.o process_$(BITS).o \
+ ptrace_$(BITS).o return_address.o \
setup.o signal_$(BITS).o sys_sh.o sys_sh$(BITS).o \
syscalls_$(BITS).o time.o topology.o traps.o \
traps_$(BITS).o unwinder.o
@@ -28,13 +32,13 @@ obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_IO_TRAPPED) += io_trapped.o
obj-$(CONFIG_KPROBES) += kprobes.o
-obj-$(CONFIG_GENERIC_GPIO) += gpio.o
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
obj-$(CONFIG_DUMP_CODE) += disassemble.o
obj-$(CONFIG_HIBERNATION) += swsusp.o
obj-$(CONFIG_DWARF_UNWINDER) += dwarf.o
+obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_callchain.o
obj-$(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) += localtimer.o
diff --git a/arch/sh/kernel/asm-offsets.c b/arch/sh/kernel/asm-offsets.c
index d218e808294..08a2be775b6 100644
--- a/arch/sh/kernel/asm-offsets.c
+++ b/arch/sh/kernel/asm-offsets.c
@@ -34,5 +34,28 @@ int main(void)
DEFINE(PBE_NEXT, offsetof(struct pbe, next));
DEFINE(SWSUSP_ARCH_REGS_SIZE, sizeof(struct swsusp_arch_regs));
#endif
+
+ DEFINE(SH_SLEEP_MODE, offsetof(struct sh_sleep_data, mode));
+ DEFINE(SH_SLEEP_SF_PRE, offsetof(struct sh_sleep_data, sf_pre));
+ DEFINE(SH_SLEEP_SF_POST, offsetof(struct sh_sleep_data, sf_post));
+ DEFINE(SH_SLEEP_RESUME, offsetof(struct sh_sleep_data, resume));
+ DEFINE(SH_SLEEP_VBR, offsetof(struct sh_sleep_data, vbr));
+ DEFINE(SH_SLEEP_SPC, offsetof(struct sh_sleep_data, spc));
+ DEFINE(SH_SLEEP_SR, offsetof(struct sh_sleep_data, sr));
+ DEFINE(SH_SLEEP_SP, offsetof(struct sh_sleep_data, sp));
+ DEFINE(SH_SLEEP_BASE_ADDR, offsetof(struct sh_sleep_data, addr));
+ DEFINE(SH_SLEEP_BASE_DATA, offsetof(struct sh_sleep_data, data));
+ DEFINE(SH_SLEEP_REG_STBCR, offsetof(struct sh_sleep_regs, stbcr));
+ DEFINE(SH_SLEEP_REG_BAR, offsetof(struct sh_sleep_regs, bar));
+ DEFINE(SH_SLEEP_REG_PTEH, offsetof(struct sh_sleep_regs, pteh));
+ DEFINE(SH_SLEEP_REG_PTEL, offsetof(struct sh_sleep_regs, ptel));
+ DEFINE(SH_SLEEP_REG_TTB, offsetof(struct sh_sleep_regs, ttb));
+ DEFINE(SH_SLEEP_REG_TEA, offsetof(struct sh_sleep_regs, tea));
+ DEFINE(SH_SLEEP_REG_MMUCR, offsetof(struct sh_sleep_regs, mmucr));
+ DEFINE(SH_SLEEP_REG_PTEA, offsetof(struct sh_sleep_regs, ptea));
+ DEFINE(SH_SLEEP_REG_PASCR, offsetof(struct sh_sleep_regs, pascr));
+ DEFINE(SH_SLEEP_REG_IRMCR, offsetof(struct sh_sleep_regs, irmcr));
+ DEFINE(SH_SLEEP_REG_CCR, offsetof(struct sh_sleep_regs, ccr));
+ DEFINE(SH_SLEEP_REG_RAMCR, offsetof(struct sh_sleep_regs, ramcr));
return 0;
}
diff --git a/arch/sh/kernel/cpu/Makefile b/arch/sh/kernel/cpu/Makefile
index 3d6b9312dc4..d97c803719e 100644
--- a/arch/sh/kernel/cpu/Makefile
+++ b/arch/sh/kernel/cpu/Makefile
@@ -15,7 +15,6 @@ obj-$(CONFIG_ARCH_SHMOBILE) += shmobile/
# Common interfaces.
-obj-$(CONFIG_UBC_WAKEUP) += ubc.o
obj-$(CONFIG_SH_ADC) += adc.o
obj-$(CONFIG_SH_CLK_CPG) += clock-cpg.o
diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c
index e932ebef473..89b4b76c0d7 100644
--- a/arch/sh/kernel/cpu/init.c
+++ b/arch/sh/kernel/cpu/init.c
@@ -75,16 +75,11 @@ static void __init expmask_init(void)
/*
* Future proofing.
*
- * Disable support for slottable sleep instruction
- * and non-nop instructions in the rte delay slot.
+ * Disable support for slottable sleep instruction, non-nop
+ * instructions in the rte delay slot, and associative writes to
+ * the memory-mapped cache array.
*/
- expmask &= ~(EXPMASK_RTEDS | EXPMASK_BRDSSLP);
-
- /*
- * Enable associative writes to the memory-mapped cache array
- * until the cache flush ops have been rewritten.
- */
- expmask |= EXPMASK_MMCAW;
+ expmask &= ~(EXPMASK_RTEDS | EXPMASK_BRDSSLP | EXPMASK_MMCAW);
__raw_writel(expmask, EXPMASK);
ctrl_barrier();
@@ -311,12 +306,12 @@ asmlinkage void __init sh_cpu_init(void)
if (fpu_disabled) {
printk("FPU Disabled\n");
current_cpu_data.flags &= ~CPU_HAS_FPU;
- disable_fpu();
}
/* FPU initialization */
+ disable_fpu();
if ((current_cpu_data.flags & CPU_HAS_FPU)) {
- clear_thread_flag(TIF_USEDFPU);
+ current_thread_info()->status &= ~TS_USEDFPU;
clear_used_math();
}
@@ -338,17 +333,6 @@ asmlinkage void __init sh_cpu_init(void)
}
#endif
- /*
- * Some brain-damaged loaders decided it would be a good idea to put
- * the UBC to sleep. This causes some issues when it comes to things
- * like PTRACE_SINGLESTEP or doing hardware watchpoints in GDB. So ..
- * we wake it up and hope that all is well.
- */
-#ifdef CONFIG_SUPERH32
- if (raw_smp_processor_id() == 0)
- ubc_wakeup();
-#endif
-
speculative_execution_init();
expmask_init();
}
diff --git a/arch/sh/kernel/cpu/sh2a/fpu.c b/arch/sh/kernel/cpu/sh2a/fpu.c
index 6df2fb98eb3..d395ce5740e 100644
--- a/arch/sh/kernel/cpu/sh2a/fpu.c
+++ b/arch/sh/kernel/cpu/sh2a/fpu.c
@@ -25,14 +25,12 @@
/*
* Save FPU registers onto task structure.
- * Assume called with FPU enabled (SR.FD=0).
*/
void
-save_fpu(struct task_struct *tsk, struct pt_regs *regs)
+save_fpu(struct task_struct *tsk)
{
unsigned long dummy;
- clear_tsk_thread_flag(tsk, TIF_USEDFPU);
enable_fpu();
asm volatile("sts.l fpul, @-%0\n\t"
"sts.l fpscr, @-%0\n\t"
@@ -60,7 +58,6 @@ save_fpu(struct task_struct *tsk, struct pt_regs *regs)
: "memory");
disable_fpu();
- release_fpu(regs);
}
static void
@@ -598,31 +595,31 @@ BUILD_TRAP_HANDLER(fpu_error)
struct task_struct *tsk = current;
TRAP_HANDLER_DECL;
- save_fpu(tsk, regs);
+ __unlazy_fpu(tsk, regs);
if (ieee_fpe_handler(regs)) {
tsk->thread.fpu.hard.fpscr &=
~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK);
grab_fpu(regs);
restore_fpu(tsk);
- set_tsk_thread_flag(tsk, TIF_USEDFPU);
+ task_thread_info(tsk)->status |= TS_USEDFPU;
return;
}
force_sig(SIGFPE, tsk);
}
-BUILD_TRAP_HANDLER(fpu_state_restore)
+void fpu_state_restore(struct pt_regs *regs)
{
struct task_struct *tsk = current;
- TRAP_HANDLER_DECL;
grab_fpu(regs);
- if (!user_mode(regs)) {
+ if (unlikely(!user_mode(regs))) {
printk(KERN_ERR "BUG: FPU is used in kernel mode.\n");
+ BUG();
return;
}
- if (used_math()) {
+ if (likely(used_math())) {
/* Using the FPU again. */
restore_fpu(tsk);
} else {
@@ -630,5 +627,13 @@ BUILD_TRAP_HANDLER(fpu_state_restore)
fpu_init();
set_used_math();
}
- set_tsk_thread_flag(tsk, TIF_USEDFPU);
+ task_thread_info(tsk)->status |= TS_USEDFPU;
+ tsk->fpu_counter++;
+}
+
+BUILD_TRAP_HANDLER(fpu_state_restore)
+{
+ TRAP_HANDLER_DECL;
+
+ fpu_state_restore(regs);
}
diff --git a/arch/sh/kernel/cpu/sh3/entry.S b/arch/sh/kernel/cpu/sh3/entry.S
index bb407ef0b91..3f7e2a22c7c 100644
--- a/arch/sh/kernel/cpu/sh3/entry.S
+++ b/arch/sh/kernel/cpu/sh3/entry.S
@@ -297,41 +297,8 @@ ENTRY(vbr_base)
!
.balign 256,0,256
general_exception:
-#ifndef CONFIG_CPU_SUBTYPE_SHX3
bra handle_exception
sts pr, k3 ! save original pr value in k3
-#else
- mov.l 1f, k4
- mov.l @k4, k4
-
- ! Is EXPEVT larger than 0x800?
- mov #0x8, k0
- shll8 k0
- cmp/hs k0, k4
- bf 0f
-
- ! then add 0x580 (k2 is 0xd80 or 0xda0)
- mov #0x58, k0
- shll2 k0
- shll2 k0
- add k0, k4
-0:
- ! Setup stack and save DSP context (k0 contains original r15 on return)
- bsr prepare_stack
- nop
-
- ! Save registers / Switch to bank 0
- mov k4, k2 ! keep vector in k2
- mov.l 1f, k4 ! SR bits to clear in k4
- bsr save_regs ! needs original pr value in k3
- nop
-
- bra handle_exception_special
- nop
-
- .align 2
-1: .long EXPEVT
-#endif
! prepare_stack()
! - roll back gRB
diff --git a/arch/sh/kernel/cpu/sh4/Makefile b/arch/sh/kernel/cpu/sh4/Makefile
index 203b18347b8..3a1dbc70983 100644
--- a/arch/sh/kernel/cpu/sh4/Makefile
+++ b/arch/sh/kernel/cpu/sh4/Makefile
@@ -9,6 +9,11 @@ obj-$(CONFIG_HIBERNATION) += $(addprefix ../sh3/, swsusp.o)
obj-$(CONFIG_SH_FPU) += fpu.o softfloat.o
obj-$(CONFIG_SH_STORE_QUEUES) += sq.o
+# Perf events
+perf-$(CONFIG_CPU_SUBTYPE_SH7750) := perf_event.o
+perf-$(CONFIG_CPU_SUBTYPE_SH7750S) := perf_event.o
+perf-$(CONFIG_CPU_SUBTYPE_SH7091) := perf_event.o
+
# CPU subtype setup
obj-$(CONFIG_CPU_SUBTYPE_SH7750) += setup-sh7750.o
obj-$(CONFIG_CPU_SUBTYPE_SH7750R) += setup-sh7750.o
@@ -27,4 +32,5 @@ endif
# Additional clocks by subtype
clock-$(CONFIG_CPU_SUBTYPE_SH4_202) += clock-sh4-202.o
-obj-y += $(clock-y)
+obj-y += $(clock-y)
+obj-$(CONFIG_PERF_EVENTS) += $(perf-y)
diff --git a/arch/sh/kernel/cpu/sh4/fpu.c b/arch/sh/kernel/cpu/sh4/fpu.c
index e3ea5411da6..e97857aec8a 100644
--- a/arch/sh/kernel/cpu/sh4/fpu.c
+++ b/arch/sh/kernel/cpu/sh4/fpu.c
@@ -41,13 +41,11 @@ static unsigned int fpu_exception_flags;
/*
* Save FPU registers onto task structure.
- * Assume called with FPU enabled (SR.FD=0).
*/
-void save_fpu(struct task_struct *tsk, struct pt_regs *regs)
+void save_fpu(struct task_struct *tsk)
{
unsigned long dummy;
- clear_tsk_thread_flag(tsk, TIF_USEDFPU);
enable_fpu();
asm volatile ("sts.l fpul, @-%0\n\t"
"sts.l fpscr, @-%0\n\t"
@@ -92,7 +90,6 @@ void save_fpu(struct task_struct *tsk, struct pt_regs *regs)
:"memory");
disable_fpu();
- release_fpu(regs);
}
static void restore_fpu(struct task_struct *tsk)
@@ -285,7 +282,6 @@ static int ieee_fpe_handler(struct pt_regs *regs)
/* fcnvsd */
struct task_struct *tsk = current;
- save_fpu(tsk, regs);
if ((tsk->thread.fpu.hard.fpscr & FPSCR_CAUSE_ERROR))
/* FPU error */
denormal_to_double(&tsk->thread.fpu.hard,
@@ -462,7 +458,7 @@ BUILD_TRAP_HANDLER(fpu_error)
struct task_struct *tsk = current;
TRAP_HANDLER_DECL;
- save_fpu(tsk, regs);
+ __unlazy_fpu(tsk, regs);
fpu_exception_flags = 0;
if (ieee_fpe_handler(regs)) {
tsk->thread.fpu.hard.fpscr &=
@@ -473,7 +469,7 @@ BUILD_TRAP_HANDLER(fpu_error)
tsk->thread.fpu.hard.fpscr |= (fpu_exception_flags >> 10);
grab_fpu(regs);
restore_fpu(tsk);
- set_tsk_thread_flag(tsk, TIF_USEDFPU);
+ task_thread_info(tsk)->status |= TS_USEDFPU;
if ((((tsk->thread.fpu.hard.fpscr & FPSCR_ENABLE_MASK) >> 7) &
(fpu_exception_flags >> 2)) == 0) {
return;
@@ -483,18 +479,18 @@ BUILD_TRAP_HANDLER(fpu_error)
force_sig(SIGFPE, tsk);
}
-BUILD_TRAP_HANDLER(fpu_state_restore)
+void fpu_state_restore(struct pt_regs *regs)
{
struct task_struct *tsk = current;
- TRAP_HANDLER_DECL;
grab_fpu(regs);
- if (!user_mode(regs)) {
+ if (unlikely(!user_mode(regs))) {
printk(KERN_ERR "BUG: FPU is used in kernel mode.\n");
+ BUG();
return;
}
- if (used_math()) {
+ if (likely(used_math())) {
/* Using the FPU again. */
restore_fpu(tsk);
} else {
@@ -502,5 +498,13 @@ BUILD_TRAP_HANDLER(fpu_state_restore)
fpu_init();
set_used_math();
}
- set_tsk_thread_flag(tsk, TIF_USEDFPU);
+ task_thread_info(tsk)->status |= TS_USEDFPU;
+ tsk->fpu_counter++;
+}
+
+BUILD_TRAP_HANDLER(fpu_state_restore)
+{
+ TRAP_HANDLER_DECL;
+
+ fpu_state_restore(regs);
}
diff --git a/arch/sh/kernel/cpu/sh4/perf_event.c b/arch/sh/kernel/cpu/sh4/perf_event.c
new file mode 100644
index 00000000000..7f9ecc9c2d0
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4/perf_event.c
@@ -0,0 +1,253 @@
+/*
+ * Performance events support for SH7750-style performance counters
+ *
+ * Copyright (C) 2009 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/perf_event.h>
+#include <asm/processor.h>
+
+#define PM_CR_BASE 0xff000084 /* 16-bit */
+#define PM_CTR_BASE 0xff100004 /* 32-bit */
+
+#define PMCR(n) (PM_CR_BASE + ((n) * 0x04))
+#define PMCTRH(n) (PM_CTR_BASE + 0x00 + ((n) * 0x08))
+#define PMCTRL(n) (PM_CTR_BASE + 0x04 + ((n) * 0x08))
+
+#define PMCR_PMM_MASK 0x0000003f
+
+#define PMCR_CLKF 0x00000100
+#define PMCR_PMCLR 0x00002000
+#define PMCR_PMST 0x00004000
+#define PMCR_PMEN 0x00008000
+
+static struct sh_pmu sh7750_pmu;
+
+/*
+ * There are a number of events supported by each counter (33 in total).
+ * Since we have 2 counters, each counter will take the event code as it
+ * corresponds to the PMCR PMM setting. Each counter can be configured
+ * independently.
+ *
+ * Event Code Description
+ * ---------- -----------
+ *
+ * 0x01 Operand read access
+ * 0x02 Operand write access
+ * 0x03 UTLB miss
+ * 0x04 Operand cache read miss
+ * 0x05 Operand cache write miss
+ * 0x06 Instruction fetch (w/ cache)
+ * 0x07 Instruction TLB miss
+ * 0x08 Instruction cache miss
+ * 0x09 All operand accesses
+ * 0x0a All instruction accesses
+ * 0x0b OC RAM operand access
+ * 0x0d On-chip I/O space access
+ * 0x0e Operand access (r/w)
+ * 0x0f Operand cache miss (r/w)
+ * 0x10 Branch instruction
+ * 0x11 Branch taken
+ * 0x12 BSR/BSRF/JSR
+ * 0x13 Instruction execution
+ * 0x14 Instruction execution in parallel
+ * 0x15 FPU Instruction execution
+ * 0x16 Interrupt
+ * 0x17 NMI
+ * 0x18 trapa instruction execution
+ * 0x19 UBCA match
+ * 0x1a UBCB match
+ * 0x21 Instruction cache fill
+ * 0x22 Operand cache fill
+ * 0x23 Elapsed time
+ * 0x24 Pipeline freeze by I-cache miss
+ * 0x25 Pipeline freeze by D-cache miss
+ * 0x27 Pipeline freeze by branch instruction
+ * 0x28 Pipeline freeze by CPU register
+ * 0x29 Pipeline freeze by FPU
+ */
+
+static const int sh7750_general_events[] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = 0x0023,
+ [PERF_COUNT_HW_INSTRUCTIONS] = 0x000a,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0006, /* I-cache */
+ [PERF_COUNT_HW_CACHE_MISSES] = 0x0008, /* I-cache */
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x0010,
+ [PERF_COUNT_HW_BRANCH_MISSES] = -1,
+ [PERF_COUNT_HW_BUS_CYCLES] = -1,
+};
+
+#define C(x) PERF_COUNT_HW_CACHE_##x
+
+static const int sh7750_cache_events
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] =
+{
+ [ C(L1D) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0001,
+ [ C(RESULT_MISS) ] = 0x0004,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0002,
+ [ C(RESULT_MISS) ] = 0x0005,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ },
+
+ [ C(L1I) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0006,
+ [ C(RESULT_MISS) ] = 0x0008,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ },
+
+ [ C(LL) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ },
+
+ [ C(DTLB) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0x0003,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ },
+
+ [ C(ITLB) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0x0007,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+
+ [ C(BPU) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+};
+
+static int sh7750_event_map(int event)
+{
+ return sh7750_general_events[event];
+}
+
+static u64 sh7750_pmu_read(int idx)
+{
+ return (u64)((u64)(__raw_readl(PMCTRH(idx)) & 0xffff) << 32) |
+ __raw_readl(PMCTRL(idx));
+}
+
+static void sh7750_pmu_disable(struct hw_perf_event *hwc, int idx)
+{
+ unsigned int tmp;
+
+ tmp = __raw_readw(PMCR(idx));
+ tmp &= ~(PMCR_PMM_MASK | PMCR_PMEN);
+ __raw_writew(tmp, PMCR(idx));
+}
+
+static void sh7750_pmu_enable(struct hw_perf_event *hwc, int idx)
+{
+ __raw_writew(__raw_readw(PMCR(idx)) | PMCR_PMCLR, PMCR(idx));
+ __raw_writew(hwc->config | PMCR_PMEN | PMCR_PMST, PMCR(idx));
+}
+
+static void sh7750_pmu_disable_all(void)
+{
+ int i;
+
+ for (i = 0; i < sh7750_pmu.num_events; i++)
+ __raw_writew(__raw_readw(PMCR(i)) & ~PMCR_PMEN, PMCR(i));
+}
+
+static void sh7750_pmu_enable_all(void)
+{
+ int i;
+
+ for (i = 0; i < sh7750_pmu.num_events; i++)
+ __raw_writew(__raw_readw(PMCR(i)) | PMCR_PMEN, PMCR(i));
+}
+
+static struct sh_pmu sh7750_pmu = {
+ .name = "SH7750",
+ .num_events = 2,
+ .event_map = sh7750_event_map,
+ .max_events = ARRAY_SIZE(sh7750_general_events),
+ .raw_event_mask = PMCR_PMM_MASK,
+ .cache_events = &sh7750_cache_events,
+ .read = sh7750_pmu_read,
+ .disable = sh7750_pmu_disable,
+ .enable = sh7750_pmu_enable,
+ .disable_all = sh7750_pmu_disable_all,
+ .enable_all = sh7750_pmu_enable_all,
+};
+
+static int __init sh7750_pmu_init(void)
+{
+ /*
+ * Make sure this CPU actually has perf counters.
+ */
+ if (!(boot_cpu_data.flags & CPU_HAS_PERF_COUNTER)) {
+ pr_notice("HW perf events unsupported, software events only.\n");
+ return -ENODEV;
+ }
+
+ return register_sh_pmu(&sh7750_pmu);
+}
+arch_initcall(sh7750_pmu_init);
diff --git a/arch/sh/kernel/cpu/sh4a/Makefile b/arch/sh/kernel/cpu/sh4a/Makefile
index 490d5dc9e37..33bab477d2e 100644
--- a/arch/sh/kernel/cpu/sh4a/Makefile
+++ b/arch/sh/kernel/cpu/sh4a/Makefile
@@ -44,3 +44,4 @@ pinmux-$(CONFIG_CPU_SUBTYPE_SH7786) := pinmux-sh7786.o
obj-y += $(clock-y)
obj-$(CONFIG_SMP) += $(smp-y)
obj-$(CONFIG_GENERIC_GPIO) += $(pinmux-y)
+obj-$(CONFIG_PERF_EVENTS) += perf_event.o
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c
index dfe9192be63..9db743802f0 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c
@@ -152,7 +152,7 @@ struct clk div6_clks[] = {
SH_CLK_DIV6("fsia_clk", &div3_clk, FCLKACR, 0),
SH_CLK_DIV6("fsib_clk", &div3_clk, FCLKBCR, 0),
SH_CLK_DIV6("irda_clk", &div3_clk, IRDACLKCR, 0),
- SH_CLK_DIV6("spu_clk", &div3_clk, SPUCLKCR, 0),
+ SH_CLK_DIV6("spu_clk", &div3_clk, SPUCLKCR, CLK_ENABLE_ON_INIT),
};
#define R_CLK (&r_clk)
diff --git a/arch/sh/kernel/cpu/sh4a/perf_event.c b/arch/sh/kernel/cpu/sh4a/perf_event.c
new file mode 100644
index 00000000000..eddc21973fa
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/perf_event.c
@@ -0,0 +1,269 @@
+/*
+ * Performance events support for SH-4A performance counters
+ *
+ * Copyright (C) 2009 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/perf_event.h>
+#include <asm/processor.h>
+
+#define PPC_CCBR(idx) (0xff200800 + (sizeof(u32) * idx))
+#define PPC_PMCTR(idx) (0xfc100000 + (sizeof(u32) * idx))
+
+#define CCBR_CIT_MASK (0x7ff << 6)
+#define CCBR_DUC (1 << 3)
+#define CCBR_CMDS (1 << 1)
+#define CCBR_PPCE (1 << 0)
+
+#define PPC_PMCAT 0xfc100080
+
+#define PMCAT_OVF3 (1 << 27)
+#define PMCAT_CNN3 (1 << 26)
+#define PMCAT_CLR3 (1 << 25)
+#define PMCAT_OVF2 (1 << 19)
+#define PMCAT_CLR2 (1 << 17)
+#define PMCAT_OVF1 (1 << 11)
+#define PMCAT_CNN1 (1 << 10)
+#define PMCAT_CLR1 (1 << 9)
+#define PMCAT_OVF0 (1 << 3)
+#define PMCAT_CLR0 (1 << 1)
+
+static struct sh_pmu sh4a_pmu;
+
+/*
+ * Supported raw event codes:
+ *
+ * Event Code Description
+ * ---------- -----------
+ *
+ * 0x0000 number of elapsed cycles
+ * 0x0200 number of elapsed cycles in privileged mode
+ * 0x0280 number of elapsed cycles while SR.BL is asserted
+ * 0x0202 instruction execution
+ * 0x0203 instruction execution in parallel
+ * 0x0204 number of unconditional branches
+ * 0x0208 number of exceptions
+ * 0x0209 number of interrupts
+ * 0x0220 UTLB miss caused by instruction fetch
+ * 0x0222 UTLB miss caused by operand access
+ * 0x02a0 number of ITLB misses
+ * 0x0028 number of accesses to instruction memories
+ * 0x0029 number of accesses to instruction cache
+ * 0x002a instruction cache miss
+ * 0x022e number of access to instruction X/Y memory
+ * 0x0030 number of reads to operand memories
+ * 0x0038 number of writes to operand memories
+ * 0x0031 number of operand cache read accesses
+ * 0x0039 number of operand cache write accesses
+ * 0x0032 operand cache read miss
+ * 0x003a operand cache write miss
+ * 0x0236 number of reads to operand X/Y memory
+ * 0x023e number of writes to operand X/Y memory
+ * 0x0237 number of reads to operand U memory
+ * 0x023f number of writes to operand U memory
+ * 0x0337 number of U memory read buffer misses
+ * 0x02b4 number of wait cycles due to operand read access
+ * 0x02bc number of wait cycles due to operand write access
+ * 0x0033 number of wait cycles due to operand cache read miss
+ * 0x003b number of wait cycles due to operand cache write miss
+ */
+
+/*
+ * Special reserved bits used by hardware emulators, read values will
+ * vary, but writes must always be 0.
+ */
+#define PMCAT_EMU_CLR_MASK ((1 << 24) | (1 << 16) | (1 << 8) | (1 << 0))
+
+static const int sh4a_general_events[] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = 0x0000,
+ [PERF_COUNT_HW_INSTRUCTIONS] = 0x0202,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0029, /* I-cache */
+ [PERF_COUNT_HW_CACHE_MISSES] = 0x002a, /* I-cache */
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x0204,
+ [PERF_COUNT_HW_BRANCH_MISSES] = -1,
+ [PERF_COUNT_HW_BUS_CYCLES] = -1,
+};
+
+#define C(x) PERF_COUNT_HW_CACHE_##x
+
+static const int sh4a_cache_events
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] =
+{
+ [ C(L1D) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0031,
+ [ C(RESULT_MISS) ] = 0x0032,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0039,
+ [ C(RESULT_MISS) ] = 0x003a,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ },
+
+ [ C(L1I) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0029,
+ [ C(RESULT_MISS) ] = 0x002a,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ },
+
+ [ C(LL) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0030,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0038,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ },
+
+ [ C(DTLB) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0222,
+ [ C(RESULT_MISS) ] = 0x0220,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ },
+
+ [ C(ITLB) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0x02a0,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+
+ [ C(BPU) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+};
+
+static int sh4a_event_map(int event)
+{
+ return sh4a_general_events[event];
+}
+
+static u64 sh4a_pmu_read(int idx)
+{
+ return __raw_readl(PPC_PMCTR(idx));
+}
+
+static void sh4a_pmu_disable(struct hw_perf_event *hwc, int idx)
+{
+ unsigned int tmp;
+
+ tmp = __raw_readl(PPC_CCBR(idx));
+ tmp &= ~(CCBR_CIT_MASK | CCBR_DUC);
+ __raw_writel(tmp, PPC_CCBR(idx));
+}
+
+static void sh4a_pmu_enable(struct hw_perf_event *hwc, int idx)
+{
+ unsigned int tmp;
+
+ tmp = __raw_readl(PPC_PMCAT);
+ tmp &= ~PMCAT_EMU_CLR_MASK;
+ tmp |= idx ? PMCAT_CLR1 : PMCAT_CLR0;
+ __raw_writel(tmp, PPC_PMCAT);
+
+ tmp = __raw_readl(PPC_CCBR(idx));
+ tmp |= (hwc->config << 6) | CCBR_CMDS | CCBR_PPCE;
+ __raw_writel(tmp, PPC_CCBR(idx));
+
+ __raw_writel(__raw_readl(PPC_CCBR(idx)) | CCBR_DUC, PPC_CCBR(idx));
+}
+
+static void sh4a_pmu_disable_all(void)
+{
+ int i;
+
+ for (i = 0; i < sh4a_pmu.num_events; i++)
+ __raw_writel(__raw_readl(PPC_CCBR(i)) & ~CCBR_DUC, PPC_CCBR(i));
+}
+
+static void sh4a_pmu_enable_all(void)
+{
+ int i;
+
+ for (i = 0; i < sh4a_pmu.num_events; i++)
+ __raw_writel(__raw_readl(PPC_CCBR(i)) | CCBR_DUC, PPC_CCBR(i));
+}
+
+static struct sh_pmu sh4a_pmu = {
+ .name = "SH-4A",
+ .num_events = 2,
+ .event_map = sh4a_event_map,
+ .max_events = ARRAY_SIZE(sh4a_general_events),
+ .raw_event_mask = 0x3ff,
+ .cache_events = &sh4a_cache_events,
+ .read = sh4a_pmu_read,
+ .disable = sh4a_pmu_disable,
+ .enable = sh4a_pmu_enable,
+ .disable_all = sh4a_pmu_disable_all,
+ .enable_all = sh4a_pmu_enable_all,
+};
+
+static int __init sh4a_pmu_init(void)
+{
+ /*
+ * Make sure this CPU actually has perf counters.
+ */
+ if (!(boot_cpu_data.flags & CPU_HAS_PERF_COUNTER)) {
+ pr_notice("HW perf events unsupported, software events only.\n");
+ return -ENODEV;
+ }
+
+ return register_sh_pmu(&sh4a_pmu);
+}
+arch_initcall(sh4a_pmu_init);
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
index f3851fd757e..845e89c936e 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
@@ -20,6 +20,8 @@
#include <linux/uio_driver.h>
#include <linux/sh_timer.h>
#include <linux/io.h>
+#include <linux/notifier.h>
+#include <asm/suspend.h>
#include <asm/clock.h>
#include <asm/mmzone.h>
#include <cpu/sh7724.h>
@@ -202,7 +204,7 @@ static struct resource veu0_resources[] = {
[0] = {
.name = "VEU3F0",
.start = 0xfe920000,
- .end = 0xfe9200cb - 1,
+ .end = 0xfe9200cb,
.flags = IORESOURCE_MEM,
},
[1] = {
@@ -234,7 +236,7 @@ static struct resource veu1_resources[] = {
[0] = {
.name = "VEU3F1",
.start = 0xfe924000,
- .end = 0xfe9240cb - 1,
+ .end = 0xfe9240cb,
.flags = IORESOURCE_MEM,
},
[1] = {
@@ -523,6 +525,70 @@ static struct platform_device jpu_device = {
},
};
+/* SPU2DSP0 */
+static struct uio_info spu0_platform_data = {
+ .name = "SPU2DSP0",
+ .version = "0",
+ .irq = 86,
+};
+
+static struct resource spu0_resources[] = {
+ [0] = {
+ .name = "SPU2DSP0",
+ .start = 0xFE200000,
+ .end = 0xFE2FFFFF,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ /* place holder for contiguous memory */
+ },
+};
+
+static struct platform_device spu0_device = {
+ .name = "uio_pdrv_genirq",
+ .id = 4,
+ .dev = {
+ .platform_data = &spu0_platform_data,
+ },
+ .resource = spu0_resources,
+ .num_resources = ARRAY_SIZE(spu0_resources),
+ .archdata = {
+ .hwblk_id = HWBLK_SPU,
+ },
+};
+
+/* SPU2DSP1 */
+static struct uio_info spu1_platform_data = {
+ .name = "SPU2DSP1",
+ .version = "0",
+ .irq = 87,
+};
+
+static struct resource spu1_resources[] = {
+ [0] = {
+ .name = "SPU2DSP1",
+ .start = 0xFE300000,
+ .end = 0xFE3FFFFF,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ /* place holder for contiguous memory */
+ },
+};
+
+static struct platform_device spu1_device = {
+ .name = "uio_pdrv_genirq",
+ .id = 5,
+ .dev = {
+ .platform_data = &spu1_platform_data,
+ },
+ .resource = spu1_resources,
+ .num_resources = ARRAY_SIZE(spu1_resources),
+ .archdata = {
+ .hwblk_id = HWBLK_SPU,
+ },
+};
+
static struct platform_device *sh7724_devices[] __initdata = {
&cmt_device,
&tmu0_device,
@@ -539,6 +605,8 @@ static struct platform_device *sh7724_devices[] __initdata = {
&veu0_device,
&veu1_device,
&jpu_device,
+ &spu0_device,
+ &spu1_device,
};
static int __init sh7724_devices_setup(void)
@@ -547,6 +615,8 @@ static int __init sh7724_devices_setup(void)
platform_resource_setup_memory(&veu0_device, "veu0", 2 << 20);
platform_resource_setup_memory(&veu1_device, "veu1", 2 << 20);
platform_resource_setup_memory(&jpu_device, "jpu", 2 << 20);
+ platform_resource_setup_memory(&spu0_device, "spu0", 2 << 20);
+ platform_resource_setup_memory(&spu1_device, "spu1", 2 << 20);
return platform_add_devices(sh7724_devices,
ARRAY_SIZE(sh7724_devices));
@@ -827,3 +897,193 @@ void __init plat_irq_setup(void)
{
register_intc_controller(&intc_desc);
}
+
+static struct {
+ /* BSC */
+ unsigned long mmselr;
+ unsigned long cs0bcr;
+ unsigned long cs4bcr;
+ unsigned long cs5abcr;
+ unsigned long cs5bbcr;
+ unsigned long cs6abcr;
+ unsigned long cs6bbcr;
+ unsigned long cs4wcr;
+ unsigned long cs5awcr;
+ unsigned long cs5bwcr;
+ unsigned long cs6awcr;
+ unsigned long cs6bwcr;
+ /* INTC */
+ unsigned short ipra;
+ unsigned short iprb;
+ unsigned short iprc;
+ unsigned short iprd;
+ unsigned short ipre;
+ unsigned short iprf;
+ unsigned short iprg;
+ unsigned short iprh;
+ unsigned short ipri;
+ unsigned short iprj;
+ unsigned short iprk;
+ unsigned short iprl;
+ unsigned char imr0;
+ unsigned char imr1;
+ unsigned char imr2;
+ unsigned char imr3;
+ unsigned char imr4;
+ unsigned char imr5;
+ unsigned char imr6;
+ unsigned char imr7;
+ unsigned char imr8;
+ unsigned char imr9;
+ unsigned char imr10;
+ unsigned char imr11;
+ unsigned char imr12;
+ /* RWDT */
+ unsigned short rwtcnt;
+ unsigned short rwtcsr;
+ /* CPG */
+ unsigned long irdaclk;
+ unsigned long spuclk;
+} sh7724_rstandby_state;
+
+static int sh7724_pre_sleep_notifier_call(struct notifier_block *nb,
+ unsigned long flags, void *unused)
+{
+ if (!(flags & SUSP_SH_RSTANDBY))
+ return NOTIFY_DONE;
+
+ /* BCR */
+ sh7724_rstandby_state.mmselr = __raw_readl(0xff800020); /* MMSELR */
+ sh7724_rstandby_state.mmselr |= 0xa5a50000;
+ sh7724_rstandby_state.cs0bcr = __raw_readl(0xfec10004); /* CS0BCR */
+ sh7724_rstandby_state.cs4bcr = __raw_readl(0xfec10010); /* CS4BCR */
+ sh7724_rstandby_state.cs5abcr = __raw_readl(0xfec10014); /* CS5ABCR */
+ sh7724_rstandby_state.cs5bbcr = __raw_readl(0xfec10018); /* CS5BBCR */
+ sh7724_rstandby_state.cs6abcr = __raw_readl(0xfec1001c); /* CS6ABCR */
+ sh7724_rstandby_state.cs6bbcr = __raw_readl(0xfec10020); /* CS6BBCR */
+ sh7724_rstandby_state.cs4wcr = __raw_readl(0xfec10030); /* CS4WCR */
+ sh7724_rstandby_state.cs5awcr = __raw_readl(0xfec10034); /* CS5AWCR */
+ sh7724_rstandby_state.cs5bwcr = __raw_readl(0xfec10038); /* CS5BWCR */
+ sh7724_rstandby_state.cs6awcr = __raw_readl(0xfec1003c); /* CS6AWCR */
+ sh7724_rstandby_state.cs6bwcr = __raw_readl(0xfec10040); /* CS6BWCR */
+
+ /* INTC */
+ sh7724_rstandby_state.ipra = __raw_readw(0xa4080000); /* IPRA */
+ sh7724_rstandby_state.iprb = __raw_readw(0xa4080004); /* IPRB */
+ sh7724_rstandby_state.iprc = __raw_readw(0xa4080008); /* IPRC */
+ sh7724_rstandby_state.iprd = __raw_readw(0xa408000c); /* IPRD */
+ sh7724_rstandby_state.ipre = __raw_readw(0xa4080010); /* IPRE */
+ sh7724_rstandby_state.iprf = __raw_readw(0xa4080014); /* IPRF */
+ sh7724_rstandby_state.iprg = __raw_readw(0xa4080018); /* IPRG */
+ sh7724_rstandby_state.iprh = __raw_readw(0xa408001c); /* IPRH */
+ sh7724_rstandby_state.ipri = __raw_readw(0xa4080020); /* IPRI */
+ sh7724_rstandby_state.iprj = __raw_readw(0xa4080024); /* IPRJ */
+ sh7724_rstandby_state.iprk = __raw_readw(0xa4080028); /* IPRK */
+ sh7724_rstandby_state.iprl = __raw_readw(0xa408002c); /* IPRL */
+ sh7724_rstandby_state.imr0 = __raw_readb(0xa4080080); /* IMR0 */
+ sh7724_rstandby_state.imr1 = __raw_readb(0xa4080084); /* IMR1 */
+ sh7724_rstandby_state.imr2 = __raw_readb(0xa4080088); /* IMR2 */
+ sh7724_rstandby_state.imr3 = __raw_readb(0xa408008c); /* IMR3 */
+ sh7724_rstandby_state.imr4 = __raw_readb(0xa4080090); /* IMR4 */
+ sh7724_rstandby_state.imr5 = __raw_readb(0xa4080094); /* IMR5 */
+ sh7724_rstandby_state.imr6 = __raw_readb(0xa4080098); /* IMR6 */
+ sh7724_rstandby_state.imr7 = __raw_readb(0xa408009c); /* IMR7 */
+ sh7724_rstandby_state.imr8 = __raw_readb(0xa40800a0); /* IMR8 */
+ sh7724_rstandby_state.imr9 = __raw_readb(0xa40800a4); /* IMR9 */
+ sh7724_rstandby_state.imr10 = __raw_readb(0xa40800a8); /* IMR10 */
+ sh7724_rstandby_state.imr11 = __raw_readb(0xa40800ac); /* IMR11 */
+ sh7724_rstandby_state.imr12 = __raw_readb(0xa40800b0); /* IMR12 */
+
+ /* RWDT */
+ sh7724_rstandby_state.rwtcnt = __raw_readb(0xa4520000); /* RWTCNT */
+ sh7724_rstandby_state.rwtcnt |= 0x5a00;
+ sh7724_rstandby_state.rwtcsr = __raw_readb(0xa4520004); /* RWTCSR */
+ sh7724_rstandby_state.rwtcsr |= 0xa500;
+ __raw_writew(sh7724_rstandby_state.rwtcsr & 0x07, 0xa4520004);
+
+ /* CPG */
+ sh7724_rstandby_state.irdaclk = __raw_readl(0xa4150018); /* IRDACLKCR */
+ sh7724_rstandby_state.spuclk = __raw_readl(0xa415003c); /* SPUCLKCR */
+
+ return NOTIFY_DONE;
+}
+
+static int sh7724_post_sleep_notifier_call(struct notifier_block *nb,
+ unsigned long flags, void *unused)
+{
+ if (!(flags & SUSP_SH_RSTANDBY))
+ return NOTIFY_DONE;
+
+ /* BCR */
+ __raw_writel(sh7724_rstandby_state.mmselr, 0xff800020); /* MMSELR */
+ __raw_writel(sh7724_rstandby_state.cs0bcr, 0xfec10004); /* CS0BCR */
+ __raw_writel(sh7724_rstandby_state.cs4bcr, 0xfec10010); /* CS4BCR */
+ __raw_writel(sh7724_rstandby_state.cs5abcr, 0xfec10014); /* CS5ABCR */
+ __raw_writel(sh7724_rstandby_state.cs5bbcr, 0xfec10018); /* CS5BBCR */
+ __raw_writel(sh7724_rstandby_state.cs6abcr, 0xfec1001c); /* CS6ABCR */
+ __raw_writel(sh7724_rstandby_state.cs6bbcr, 0xfec10020); /* CS6BBCR */
+ __raw_writel(sh7724_rstandby_state.cs4wcr, 0xfec10030); /* CS4WCR */
+ __raw_writel(sh7724_rstandby_state.cs5awcr, 0xfec10034); /* CS5AWCR */
+ __raw_writel(sh7724_rstandby_state.cs5bwcr, 0xfec10038); /* CS5BWCR */
+ __raw_writel(sh7724_rstandby_state.cs6awcr, 0xfec1003c); /* CS6AWCR */
+ __raw_writel(sh7724_rstandby_state.cs6bwcr, 0xfec10040); /* CS6BWCR */
+
+ /* INTC */
+ __raw_writew(sh7724_rstandby_state.ipra, 0xa4080000); /* IPRA */
+ __raw_writew(sh7724_rstandby_state.iprb, 0xa4080004); /* IPRB */
+ __raw_writew(sh7724_rstandby_state.iprc, 0xa4080008); /* IPRC */
+ __raw_writew(sh7724_rstandby_state.iprd, 0xa408000c); /* IPRD */
+ __raw_writew(sh7724_rstandby_state.ipre, 0xa4080010); /* IPRE */
+ __raw_writew(sh7724_rstandby_state.iprf, 0xa4080014); /* IPRF */
+ __raw_writew(sh7724_rstandby_state.iprg, 0xa4080018); /* IPRG */
+ __raw_writew(sh7724_rstandby_state.iprh, 0xa408001c); /* IPRH */
+ __raw_writew(sh7724_rstandby_state.ipri, 0xa4080020); /* IPRI */
+ __raw_writew(sh7724_rstandby_state.iprj, 0xa4080024); /* IPRJ */
+ __raw_writew(sh7724_rstandby_state.iprk, 0xa4080028); /* IPRK */
+ __raw_writew(sh7724_rstandby_state.iprl, 0xa408002c); /* IPRL */
+ __raw_writeb(sh7724_rstandby_state.imr0, 0xa4080080); /* IMR0 */
+ __raw_writeb(sh7724_rstandby_state.imr1, 0xa4080084); /* IMR1 */
+ __raw_writeb(sh7724_rstandby_state.imr2, 0xa4080088); /* IMR2 */
+ __raw_writeb(sh7724_rstandby_state.imr3, 0xa408008c); /* IMR3 */
+ __raw_writeb(sh7724_rstandby_state.imr4, 0xa4080090); /* IMR4 */
+ __raw_writeb(sh7724_rstandby_state.imr5, 0xa4080094); /* IMR5 */
+ __raw_writeb(sh7724_rstandby_state.imr6, 0xa4080098); /* IMR6 */
+ __raw_writeb(sh7724_rstandby_state.imr7, 0xa408009c); /* IMR7 */
+ __raw_writeb(sh7724_rstandby_state.imr8, 0xa40800a0); /* IMR8 */
+ __raw_writeb(sh7724_rstandby_state.imr9, 0xa40800a4); /* IMR9 */
+ __raw_writeb(sh7724_rstandby_state.imr10, 0xa40800a8); /* IMR10 */
+ __raw_writeb(sh7724_rstandby_state.imr11, 0xa40800ac); /* IMR11 */
+ __raw_writeb(sh7724_rstandby_state.imr12, 0xa40800b0); /* IMR12 */
+
+ /* RWDT */
+ __raw_writew(sh7724_rstandby_state.rwtcnt, 0xa4520000); /* RWTCNT */
+ __raw_writew(sh7724_rstandby_state.rwtcsr, 0xa4520004); /* RWTCSR */
+
+ /* CPG */
+ __raw_writel(sh7724_rstandby_state.irdaclk, 0xa4150018); /* IRDACLKCR */
+ __raw_writel(sh7724_rstandby_state.spuclk, 0xa415003c); /* SPUCLKCR */
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block sh7724_pre_sleep_notifier = {
+ .notifier_call = sh7724_pre_sleep_notifier_call,
+ .priority = SH_MOBILE_PRE(SH_MOBILE_SLEEP_CPU),
+};
+
+static struct notifier_block sh7724_post_sleep_notifier = {
+ .notifier_call = sh7724_post_sleep_notifier_call,
+ .priority = SH_MOBILE_POST(SH_MOBILE_SLEEP_CPU),
+};
+
+static int __init sh7724_sleep_setup(void)
+{
+ atomic_notifier_chain_register(&sh_mobile_pre_sleep_notifier_list,
+ &sh7724_pre_sleep_notifier);
+
+ atomic_notifier_chain_register(&sh_mobile_post_sleep_notifier_list,
+ &sh7724_post_sleep_notifier);
+ return 0;
+}
+arch_initcall(sh7724_sleep_setup);
+
diff --git a/arch/sh/kernel/cpu/sh4a/setup-shx3.c b/arch/sh/kernel/cpu/sh4a/setup-shx3.c
index e848443deeb..c7ba9166e18 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-shx3.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-shx3.c
@@ -15,6 +15,15 @@
#include <linux/sh_timer.h>
#include <asm/mmzone.h>
+/*
+ * This intentionally only registers SCIF ports 0, 1, and 3. SCIF 2
+ * INTEVT values overlap with the FPU EXPEVT ones, requiring special
+ * demuxing in the exception dispatch path.
+ *
+ * As this overlap is something that never should have made it in to
+ * silicon in the first place, we just refuse to deal with the port at
+ * all rather than adding infrastructure to hack around it.
+ */
static struct plat_sci_port sci_platform_data[] = {
{
.mapbase = 0xffc30000,
@@ -27,11 +36,6 @@ static struct plat_sci_port sci_platform_data[] = {
.type = PORT_SCIF,
.irqs = { 44, 45, 47, 46 },
}, {
- .mapbase = 0xffc50000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 48, 49, 51, 50 },
- }, {
.mapbase = 0xffc60000,
.flags = UPF_BOOT_AUTOCONF,
.type = PORT_SCIF,
@@ -268,7 +272,11 @@ enum {
UNUSED = 0,
/* interrupt sources */
- IRL, IRQ0, IRQ1, IRQ2, IRQ3,
+ IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH,
+ IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH,
+ IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH,
+ IRL_HHLL, IRL_HHLH, IRL_HHHL,
+ IRQ0, IRQ1, IRQ2, IRQ3,
HUDII,
TMU0, TMU1, TMU2, TMU3, TMU4, TMU5,
PCII0, PCII1, PCII2, PCII3, PCII4,
@@ -291,7 +299,7 @@ enum {
INTICI4, INTICI5, INTICI6, INTICI7,
/* interrupt groups */
- PCII56789, SCIF0, SCIF1, SCIF2, SCIF3,
+ IRL, PCII56789, SCIF0, SCIF1, SCIF2, SCIF3,
DMAC0, DMAC1,
};
@@ -309,8 +317,6 @@ static struct intc_vect vectors[] __initdata = {
INTC_VECT(SCIF0_BRI, 0x740), INTC_VECT(SCIF0_TXI, 0x760),
INTC_VECT(SCIF1_ERI, 0x780), INTC_VECT(SCIF1_RXI, 0x7a0),
INTC_VECT(SCIF1_BRI, 0x7c0), INTC_VECT(SCIF1_TXI, 0x7e0),
- INTC_VECT(SCIF2_ERI, 0x800), INTC_VECT(SCIF2_RXI, 0x820),
- INTC_VECT(SCIF2_BRI, 0x840), INTC_VECT(SCIF2_TXI, 0x860),
INTC_VECT(SCIF3_ERI, 0x880), INTC_VECT(SCIF3_RXI, 0x8a0),
INTC_VECT(SCIF3_BRI, 0x8c0), INTC_VECT(SCIF3_TXI, 0x8e0),
INTC_VECT(DMAC0_DMINT0, 0x900), INTC_VECT(DMAC0_DMINT1, 0x920),
@@ -344,10 +350,13 @@ static struct intc_vect vectors[] __initdata = {
};
static struct intc_group groups[] __initdata = {
+ INTC_GROUP(IRL, IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH,
+ IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH,
+ IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH,
+ IRL_HHLL, IRL_HHLH, IRL_HHHL),
INTC_GROUP(PCII56789, PCII5, PCII6, PCII7, PCII8, PCII9),
INTC_GROUP(SCIF0, SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI),
INTC_GROUP(SCIF1, SCIF1_ERI, SCIF1_RXI, SCIF1_BRI, SCIF1_TXI),
- INTC_GROUP(SCIF2, SCIF2_ERI, SCIF2_RXI, SCIF2_BRI, SCIF2_TXI),
INTC_GROUP(SCIF3, SCIF3_ERI, SCIF3_RXI, SCIF3_BRI, SCIF3_TXI),
INTC_GROUP(DMAC0, DMAC0_DMINT0, DMAC0_DMINT1, DMAC0_DMINT2,
DMAC0_DMINT3, DMAC0_DMINT4, DMAC0_DMINT5, DMAC0_DMAE),
@@ -419,14 +428,14 @@ static DECLARE_INTC_DESC(intc_desc_irq, "shx3-irq", vectors_irq, groups,
/* External interrupt pins in IRL mode */
static struct intc_vect vectors_irl[] __initdata = {
- INTC_VECT(IRL, 0x200), INTC_VECT(IRL, 0x220),
- INTC_VECT(IRL, 0x240), INTC_VECT(IRL, 0x260),
- INTC_VECT(IRL, 0x280), INTC_VECT(IRL, 0x2a0),
- INTC_VECT(IRL, 0x2c0), INTC_VECT(IRL, 0x2e0),
- INTC_VECT(IRL, 0x300), INTC_VECT(IRL, 0x320),
- INTC_VECT(IRL, 0x340), INTC_VECT(IRL, 0x360),
- INTC_VECT(IRL, 0x380), INTC_VECT(IRL, 0x3a0),
- INTC_VECT(IRL, 0x3c0),
+ INTC_VECT(IRL_LLLL, 0x200), INTC_VECT(IRL_LLLH, 0x220),
+ INTC_VECT(IRL_LLHL, 0x240), INTC_VECT(IRL_LLHH, 0x260),
+ INTC_VECT(IRL_LHLL, 0x280), INTC_VECT(IRL_LHLH, 0x2a0),
+ INTC_VECT(IRL_LHHL, 0x2c0), INTC_VECT(IRL_LHHH, 0x2e0),
+ INTC_VECT(IRL_HLLL, 0x300), INTC_VECT(IRL_HLLH, 0x320),
+ INTC_VECT(IRL_HLHL, 0x340), INTC_VECT(IRL_HLHH, 0x360),
+ INTC_VECT(IRL_HHLL, 0x380), INTC_VECT(IRL_HHLH, 0x3a0),
+ INTC_VECT(IRL_HHHL, 0x3c0),
};
static DECLARE_INTC_DESC(intc_desc_irl, "shx3-irl", vectors_irl, groups,
diff --git a/arch/sh/kernel/cpu/sh4a/smp-shx3.c b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
index 185ec3976a2..5863e0c4d02 100644
--- a/arch/sh/kernel/cpu/sh4a/smp-shx3.c
+++ b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
@@ -14,6 +14,13 @@
#include <linux/interrupt.h>
#include <linux/io.h>
+#define STBCR_REG(phys_id) (0xfe400004 | (phys_id << 12))
+#define RESET_REG(phys_id) (0xfe400008 | (phys_id << 12))
+
+#define STBCR_MSTP 0x00000001
+#define STBCR_RESET 0x00000002
+#define STBCR_LTSLP 0x80000000
+
static irqreturn_t ipi_interrupt_handler(int irq, void *arg)
{
unsigned int message = (unsigned int)(long)arg;
@@ -21,9 +28,9 @@ static irqreturn_t ipi_interrupt_handler(int irq, void *arg)
unsigned int offs = 4 * cpu;
unsigned int x;
- x = ctrl_inl(0xfe410070 + offs); /* C0INITICI..CnINTICI */
+ x = __raw_readl(0xfe410070 + offs); /* C0INITICI..CnINTICI */
x &= (1 << (message << 2));
- ctrl_outl(x, 0xfe410080 + offs); /* C0INTICICLR..CnINTICICLR */
+ __raw_writel(x, 0xfe410080 + offs); /* C0INTICICLR..CnINTICICLR */
smp_message_recv(message);
@@ -37,6 +44,9 @@ void __init plat_smp_setup(void)
init_cpu_possible(cpumask_of(cpu));
+ /* Enable light sleep for the boot CPU */
+ __raw_writel(__raw_readl(STBCR_REG(cpu)) | STBCR_LTSLP, STBCR_REG(cpu));
+
__cpu_number_map[0] = 0;
__cpu_logical_map[0] = 0;
@@ -66,32 +76,23 @@ void __init plat_prepare_cpus(unsigned int max_cpus)
"IPI", (void *)(long)i);
}
-#define STBCR_REG(phys_id) (0xfe400004 | (phys_id << 12))
-#define RESET_REG(phys_id) (0xfe400008 | (phys_id << 12))
-
-#define STBCR_MSTP 0x00000001
-#define STBCR_RESET 0x00000002
-#define STBCR_LTSLP 0x80000000
-
-#define STBCR_AP_VAL (STBCR_RESET | STBCR_LTSLP)
-
void plat_start_cpu(unsigned int cpu, unsigned long entry_point)
{
- ctrl_outl(entry_point, RESET_REG(cpu));
+ __raw_writel(entry_point, RESET_REG(cpu));
- if (!(ctrl_inl(STBCR_REG(cpu)) & STBCR_MSTP))
- ctrl_outl(STBCR_MSTP, STBCR_REG(cpu));
+ if (!(__raw_readl(STBCR_REG(cpu)) & STBCR_MSTP))
+ __raw_writel(STBCR_MSTP, STBCR_REG(cpu));
- while (!(ctrl_inl(STBCR_REG(cpu)) & STBCR_MSTP))
+ while (!(__raw_readl(STBCR_REG(cpu)) & STBCR_MSTP))
cpu_relax();
/* Start up secondary processor by sending a reset */
- ctrl_outl(STBCR_AP_VAL, STBCR_REG(cpu));
+ __raw_writel(STBCR_RESET | STBCR_LTSLP, STBCR_REG(cpu));
}
int plat_smp_processor_id(void)
{
- return ctrl_inl(0xff000048); /* CPIDR */
+ return __raw_readl(0xff000048); /* CPIDR */
}
void plat_send_ipi(unsigned int cpu, unsigned int message)
@@ -100,5 +101,5 @@ void plat_send_ipi(unsigned int cpu, unsigned int message)
BUG_ON(cpu >= 4);
- ctrl_outl(1 << (message << 2), addr); /* C0INTICI..CnINTICI */
+ __raw_writel(1 << (message << 2), addr); /* C0INTICI..CnINTICI */
}
diff --git a/arch/sh/kernel/cpu/sh5/entry.S b/arch/sh/kernel/cpu/sh5/entry.S
index b0aacf67525..8f13f73cb2c 100644
--- a/arch/sh/kernel/cpu/sh5/entry.S
+++ b/arch/sh/kernel/cpu/sh5/entry.S
@@ -933,7 +933,7 @@ ret_with_reschedule:
pta restore_all, tr1
- movi (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), r8
+ movi _TIF_SIGPENDING, r8
and r8, r7, r8
pta work_notifysig, tr0
bne r8, ZERO, tr0
diff --git a/arch/sh/kernel/cpu/shmobile/cpuidle.c b/arch/sh/kernel/cpu/shmobile/cpuidle.c
index 1c504bd972c..83972aa319c 100644
--- a/arch/sh/kernel/cpu/shmobile/cpuidle.c
+++ b/arch/sh/kernel/cpu/shmobile/cpuidle.c
@@ -87,25 +87,31 @@ void sh_mobile_setup_cpuidle(void)
dev->safe_state = state;
- state = &dev->states[i++];
- snprintf(state->name, CPUIDLE_NAME_LEN, "C1");
- strncpy(state->desc, "SuperH Sleep Mode [SF]", CPUIDLE_DESC_LEN);
- state->exit_latency = 100;
- state->target_residency = 1 * 2;
- state->power_usage = 1;
- state->flags = 0;
- state->flags |= CPUIDLE_FLAG_TIME_VALID;
- state->enter = cpuidle_sleep_enter;
+ if (sh_mobile_sleep_supported & SUSP_SH_SF) {
+ state = &dev->states[i++];
+ snprintf(state->name, CPUIDLE_NAME_LEN, "C1");
+ strncpy(state->desc, "SuperH Sleep Mode [SF]",
+ CPUIDLE_DESC_LEN);
+ state->exit_latency = 100;
+ state->target_residency = 1 * 2;
+ state->power_usage = 1;
+ state->flags = 0;
+ state->flags |= CPUIDLE_FLAG_TIME_VALID;
+ state->enter = cpuidle_sleep_enter;
+ }
- state = &dev->states[i++];
- snprintf(state->name, CPUIDLE_NAME_LEN, "C2");
- strncpy(state->desc, "SuperH Mobile Standby Mode [SF]", CPUIDLE_DESC_LEN);
- state->exit_latency = 2300;
- state->target_residency = 1 * 2;
- state->power_usage = 1;
- state->flags = 0;
- state->flags |= CPUIDLE_FLAG_TIME_VALID;
- state->enter = cpuidle_sleep_enter;
+ if (sh_mobile_sleep_supported & SUSP_SH_STANDBY) {
+ state = &dev->states[i++];
+ snprintf(state->name, CPUIDLE_NAME_LEN, "C2");
+ strncpy(state->desc, "SuperH Mobile Standby Mode [SF]",
+ CPUIDLE_DESC_LEN);
+ state->exit_latency = 2300;
+ state->target_residency = 1 * 2;
+ state->power_usage = 1;
+ state->flags = 0;
+ state->flags |= CPUIDLE_FLAG_TIME_VALID;
+ state->enter = cpuidle_sleep_enter;
+ }
dev->state_count = i;
diff --git a/arch/sh/kernel/cpu/shmobile/pm.c b/arch/sh/kernel/cpu/shmobile/pm.c
index ee3c2aaf66f..ca029a44743 100644
--- a/arch/sh/kernel/cpu/shmobile/pm.c
+++ b/arch/sh/kernel/cpu/shmobile/pm.c
@@ -15,6 +15,13 @@
#include <linux/suspend.h>
#include <asm/suspend.h>
#include <asm/uaccess.h>
+#include <asm/cacheflush.h>
+
+/*
+ * Notifier lists for pre/post sleep notification
+ */
+ATOMIC_NOTIFIER_HEAD(sh_mobile_pre_sleep_notifier_list);
+ATOMIC_NOTIFIER_HEAD(sh_mobile_post_sleep_notifier_list);
/*
* Sleep modes available on SuperH Mobile:
@@ -26,30 +33,105 @@
#define SUSP_MODE_SLEEP (SUSP_SH_SLEEP)
#define SUSP_MODE_SLEEP_SF (SUSP_SH_SLEEP | SUSP_SH_SF)
#define SUSP_MODE_STANDBY_SF (SUSP_SH_STANDBY | SUSP_SH_SF)
+#define SUSP_MODE_RSTANDBY (SUSP_SH_RSTANDBY | SUSP_SH_MMU | SUSP_SH_SF)
+ /*
+ * U-standby mode is unsupported since it needs bootloader hacks
+ */
-/*
- * The following modes are not there yet:
- *
- * R-standby mode is unsupported, but will be added in the future
- * U-standby mode is low priority since it needs bootloader hacks
- */
-
-#define ILRAM_BASE 0xe5200000
-
-extern const unsigned char sh_mobile_standby[];
-extern const unsigned int sh_mobile_standby_size;
+#ifdef CONFIG_CPU_SUBTYPE_SH7724
+#define RAM_BASE 0xfd800000 /* RSMEM */
+#else
+#define RAM_BASE 0xe5200000 /* ILRAM */
+#endif
void sh_mobile_call_standby(unsigned long mode)
{
- void *onchip_mem = (void *)ILRAM_BASE;
- void (*standby_onchip_mem)(unsigned long, unsigned long) = onchip_mem;
+ void *onchip_mem = (void *)RAM_BASE;
+ struct sh_sleep_data *sdp = onchip_mem;
+ void (*standby_onchip_mem)(unsigned long, unsigned long);
+
+ /* code located directly after data structure */
+ standby_onchip_mem = (void *)(sdp + 1);
+
+ atomic_notifier_call_chain(&sh_mobile_pre_sleep_notifier_list,
+ mode, NULL);
+
+ /* flush the caches if MMU flag is set */
+ if (mode & SUSP_SH_MMU)
+ flush_cache_all();
/* Let assembly snippet in on-chip memory handle the rest */
- standby_onchip_mem(mode, ILRAM_BASE);
+ standby_onchip_mem(mode, RAM_BASE);
+
+ atomic_notifier_call_chain(&sh_mobile_post_sleep_notifier_list,
+ mode, NULL);
+}
+
+extern char sh_mobile_sleep_enter_start;
+extern char sh_mobile_sleep_enter_end;
+
+extern char sh_mobile_sleep_resume_start;
+extern char sh_mobile_sleep_resume_end;
+
+unsigned long sh_mobile_sleep_supported = SUSP_SH_SLEEP;
+
+void sh_mobile_register_self_refresh(unsigned long flags,
+ void *pre_start, void *pre_end,
+ void *post_start, void *post_end)
+{
+ void *onchip_mem = (void *)RAM_BASE;
+ void *vp;
+ struct sh_sleep_data *sdp;
+ int n;
+
+ /* part 0: data area */
+ sdp = onchip_mem;
+ sdp->addr.stbcr = 0xa4150020; /* STBCR */
+ sdp->addr.bar = 0xa4150040; /* BAR */
+ sdp->addr.pteh = 0xff000000; /* PTEH */
+ sdp->addr.ptel = 0xff000004; /* PTEL */
+ sdp->addr.ttb = 0xff000008; /* TTB */
+ sdp->addr.tea = 0xff00000c; /* TEA */
+ sdp->addr.mmucr = 0xff000010; /* MMUCR */
+ sdp->addr.ptea = 0xff000034; /* PTEA */
+ sdp->addr.pascr = 0xff000070; /* PASCR */
+ sdp->addr.irmcr = 0xff000078; /* IRMCR */
+ sdp->addr.ccr = 0xff00001c; /* CCR */
+ sdp->addr.ramcr = 0xff000074; /* RAMCR */
+ vp = sdp + 1;
+
+ /* part 1: common code to enter sleep mode */
+ n = &sh_mobile_sleep_enter_end - &sh_mobile_sleep_enter_start;
+ memcpy(vp, &sh_mobile_sleep_enter_start, n);
+ vp += roundup(n, 4);
+
+ /* part 2: board specific code to enter self-refresh mode */
+ n = pre_end - pre_start;
+ memcpy(vp, pre_start, n);
+ sdp->sf_pre = (unsigned long)vp;
+ vp += roundup(n, 4);
+
+ /* part 3: board specific code to resume from self-refresh mode */
+ n = post_end - post_start;
+ memcpy(vp, post_start, n);
+ sdp->sf_post = (unsigned long)vp;
+ vp += roundup(n, 4);
+
+ /* part 4: common code to resume from sleep mode */
+ WARN_ON(vp > (onchip_mem + 0x600));
+ vp = onchip_mem + 0x600; /* located at interrupt vector */
+ n = &sh_mobile_sleep_resume_end - &sh_mobile_sleep_resume_start;
+ memcpy(vp, &sh_mobile_sleep_resume_start, n);
+ sdp->resume = (unsigned long)vp;
+
+ sh_mobile_sleep_supported |= flags;
}
static int sh_pm_enter(suspend_state_t state)
{
+ if (!(sh_mobile_sleep_supported & SUSP_MODE_STANDBY_SF))
+ return -ENXIO;
+
local_irq_disable();
set_bl_bit();
sh_mobile_call_standby(SUSP_MODE_STANDBY_SF);
@@ -65,13 +147,6 @@ static struct platform_suspend_ops sh_pm_ops = {
static int __init sh_pm_init(void)
{
- void *onchip_mem = (void *)ILRAM_BASE;
-
- /* Copy the assembly snippet to the otherwise ununsed ILRAM */
- memcpy(onchip_mem, sh_mobile_standby, sh_mobile_standby_size);
- wmb();
- ctrl_barrier();
-
suspend_set_ops(&sh_pm_ops);
sh_mobile_setup_cpuidle();
return 0;
diff --git a/arch/sh/kernel/cpu/shmobile/pm_runtime.c b/arch/sh/kernel/cpu/shmobile/pm_runtime.c
index 7c615b17e20..6dcb8166a64 100644
--- a/arch/sh/kernel/cpu/shmobile/pm_runtime.c
+++ b/arch/sh/kernel/cpu/shmobile/pm_runtime.c
@@ -45,12 +45,14 @@ static int __platform_pm_runtime_resume(struct platform_device *pdev)
dev_dbg(d, "__platform_pm_runtime_resume() [%d]\n", hwblk);
- if (d->driver && d->driver->pm && d->driver->pm->runtime_resume) {
+ if (d->driver) {
hwblk_enable(hwblk_info, hwblk);
ret = 0;
if (test_bit(PDEV_ARCHDATA_FLAG_SUSP, &ad->flags)) {
- ret = d->driver->pm->runtime_resume(d);
+ if (d->driver->pm && d->driver->pm->runtime_resume)
+ ret = d->driver->pm->runtime_resume(d);
+
if (!ret)
clear_bit(PDEV_ARCHDATA_FLAG_SUSP, &ad->flags);
else
@@ -73,12 +75,15 @@ static int __platform_pm_runtime_suspend(struct platform_device *pdev)
dev_dbg(d, "__platform_pm_runtime_suspend() [%d]\n", hwblk);
- if (d->driver && d->driver->pm && d->driver->pm->runtime_suspend) {
+ if (d->driver) {
BUG_ON(!test_bit(PDEV_ARCHDATA_FLAG_IDLE, &ad->flags));
+ ret = 0;
- hwblk_enable(hwblk_info, hwblk);
- ret = d->driver->pm->runtime_suspend(d);
- hwblk_disable(hwblk_info, hwblk);
+ if (d->driver->pm && d->driver->pm->runtime_suspend) {
+ hwblk_enable(hwblk_info, hwblk);
+ ret = d->driver->pm->runtime_suspend(d);
+ hwblk_disable(hwblk_info, hwblk);
+ }
if (!ret) {
set_bit(PDEV_ARCHDATA_FLAG_SUSP, &ad->flags);
diff --git a/arch/sh/kernel/cpu/shmobile/sleep.S b/arch/sh/kernel/cpu/shmobile/sleep.S
index a439e6c7824..e9dd7fa0abd 100644
--- a/arch/sh/kernel/cpu/shmobile/sleep.S
+++ b/arch/sh/kernel/cpu/shmobile/sleep.S
@@ -20,79 +20,103 @@
* Kernel mode register usage, see entry.S:
* k0 scratch
* k1 scratch
- * k4 scratch
*/
#define k0 r0
#define k1 r1
-#define k4 r4
-/* manage self-refresh and enter standby mode.
+/* manage self-refresh and enter standby mode. must be self-contained.
* this code will be copied to on-chip memory and executed from there.
*/
+ .balign 4
+ENTRY(sh_mobile_sleep_enter_start)
- .balign 4096,0,4096
-ENTRY(sh_mobile_standby)
+ /* save mode flags */
+ mov.l r4, @(SH_SLEEP_MODE, r5)
/* save original vbr */
- stc vbr, r1
- mova saved_vbr, r0
- mov.l r1, @r0
+ stc vbr, r0
+ mov.l r0, @(SH_SLEEP_VBR, r5)
/* point vbr to our on-chip memory page */
ldc r5, vbr
/* save return address */
- mova saved_spc, r0
- sts pr, r5
- mov.l r5, @r0
+ sts pr, r0
+ mov.l r0, @(SH_SLEEP_SPC, r5)
/* save sr */
- mova saved_sr, r0
- stc sr, r5
- mov.l r5, @r0
+ stc sr, r0
+ mov.l r0, @(SH_SLEEP_SR, r5)
- /* save mode flags */
- mova saved_mode, r0
- mov.l r4, @r0
+ /* save sp */
+ mov.l r15, @(SH_SLEEP_SP, r5)
+
+ /* save stbcr */
+ bsr save_register
+ mov #SH_SLEEP_REG_STBCR, r0
+
+ /* save mmu and cache context if needed */
+ mov.l @(SH_SLEEP_MODE, r5), r0
+ tst #SUSP_SH_MMU, r0
+ bt skip_mmu_save_disable
+
+ /* save mmu state */
+ bsr save_register
+ mov #SH_SLEEP_REG_PTEH, r0
+
+ bsr save_register
+ mov #SH_SLEEP_REG_PTEL, r0
+
+ bsr save_register
+ mov #SH_SLEEP_REG_TTB, r0
+
+ bsr save_register
+ mov #SH_SLEEP_REG_TEA, r0
+
+ bsr save_register
+ mov #SH_SLEEP_REG_MMUCR, r0
+
+ bsr save_register
+ mov #SH_SLEEP_REG_PTEA, r0
+
+ bsr save_register
+ mov #SH_SLEEP_REG_PASCR, r0
- /* put mode flags in r0 */
- mov r4, r0
+ bsr save_register
+ mov #SH_SLEEP_REG_IRMCR, r0
+ /* invalidate TLBs and disable the MMU */
+ bsr get_register
+ mov #SH_SLEEP_REG_MMUCR, r0
+ mov #4, r1
+ mov.l r1, @r0
+ icbi @r0
+
+ /* save cache registers and disable caches */
+ bsr save_register
+ mov #SH_SLEEP_REG_CCR, r0
+
+ bsr save_register
+ mov #SH_SLEEP_REG_RAMCR, r0
+
+ bsr get_register
+ mov #SH_SLEEP_REG_CCR, r0
+ mov #0, r1
+ mov.l r1, @r0
+ icbi @r0
+
+skip_mmu_save_disable:
+ /* call self-refresh entering code if needed */
+ mov.l @(SH_SLEEP_MODE, r5), r0
tst #SUSP_SH_SF, r0
bt skip_set_sf
-#ifdef CONFIG_CPU_SUBTYPE_SH7724
- /* DBSC: put memory in self-refresh mode */
- mov.l dben_reg, r4
- mov.l dben_data0, r1
- mov.l r1, @r4
-
- mov.l dbrfpdn0_reg, r4
- mov.l dbrfpdn0_data0, r1
- mov.l r1, @r4
-
- mov.l dbcmdcnt_reg, r4
- mov.l dbcmdcnt_data0, r1
- mov.l r1, @r4
-
- mov.l dbcmdcnt_reg, r4
- mov.l dbcmdcnt_data1, r1
- mov.l r1, @r4
-
- mov.l dbrfpdn0_reg, r4
- mov.l dbrfpdn0_data1, r1
- mov.l r1, @r4
-#else
- /* SBSC: disable power down and put in self-refresh mode */
- mov.l 1f, r4
- mov.l 2f, r1
- mov.l @r4, r2
- or r1, r2
- mov.l 3f, r3
- and r3, r2
- mov.l r2, @r4
-#endif
+
+ mov.l @(SH_SLEEP_SF_PRE, r5), r0
+ jsr @r0
+ nop
skip_set_sf:
+ mov.l @(SH_SLEEP_MODE, r5), r0
tst #SUSP_SH_STANDBY, r0
bt test_rstandby
@@ -104,6 +128,12 @@ test_rstandby:
tst #SUSP_SH_RSTANDBY, r0
bt test_ustandby
+ /* setup BAR register */
+ bsr get_register
+ mov #SH_SLEEP_REG_BAR, r0
+ mov.l @(SH_SLEEP_RESUME, r5), r1
+ mov.l r1, @r0
+
/* set mode to "r-standby mode" */
bra do_sleep
mov #0x20, r1
@@ -123,124 +153,136 @@ force_sleep:
do_sleep:
/* setup and enter selected standby mode */
- mov.l 5f, r4
- mov.l r1, @r4
+ bsr get_register
+ mov #SH_SLEEP_REG_STBCR, r0
+ mov.l r1, @r0
again:
sleep
bra again
nop
-restore_jump_vbr:
+save_register:
+ add #SH_SLEEP_BASE_ADDR, r0
+ mov.l @(r0, r5), r1
+ add #-SH_SLEEP_BASE_ADDR, r0
+ mov.l @r1, r1
+ add #SH_SLEEP_BASE_DATA, r0
+ mov.l r1, @(r0, r5)
+ add #-SH_SLEEP_BASE_DATA, r0
+ rts
+ nop
+
+get_register:
+ add #SH_SLEEP_BASE_ADDR, r0
+ mov.l @(r0, r5), r0
+ rts
+ nop
+ENTRY(sh_mobile_sleep_enter_end)
+
+ .balign 4
+ENTRY(sh_mobile_sleep_resume_start)
+
+ /* figure out start address */
+ bsr 0f
+ nop
+0:
+ sts pr, k1
+ mov.l 1f, k0
+ and k0, k1
+
+ /* store pointer to data area in VBR */
+ ldc k1, vbr
+
+ /* setup sr with saved sr */
+ mov.l @(SH_SLEEP_SR, k1), k0
+ ldc k0, sr
+
+ /* now: user register set! */
+ stc vbr, r5
+
/* setup spc with return address to c code */
- mov.l saved_spc, k0
- ldc k0, spc
+ mov.l @(SH_SLEEP_SPC, r5), r0
+ ldc r0, spc
/* restore vbr */
- mov.l saved_vbr, k0
- ldc k0, vbr
+ mov.l @(SH_SLEEP_VBR, r5), r0
+ ldc r0, vbr
/* setup ssr with saved sr */
- mov.l saved_sr, k0
- ldc k0, ssr
+ mov.l @(SH_SLEEP_SR, r5), r0
+ ldc r0, ssr
- /* get mode flags */
- mov.l saved_mode, k0
+ /* restore sp */
+ mov.l @(SH_SLEEP_SP, r5), r15
-done_sleep:
- /* reset standby mode to sleep mode */
- mov.l 5f, k4
- mov #0x00, k1
- mov.l k1, @k4
+ /* restore sleep mode register */
+ bsr restore_register
+ mov #SH_SLEEP_REG_STBCR, r0
- tst #SUSP_SH_SF, k0
+ /* call self-refresh resume code if needed */
+ mov.l @(SH_SLEEP_MODE, r5), r0
+ tst #SUSP_SH_SF, r0
bt skip_restore_sf
-#ifdef CONFIG_CPU_SUBTYPE_SH7724
- /* DBSC: put memory in auto-refresh mode */
- mov.l dbrfpdn0_reg, k4
- mov.l dbrfpdn0_data0, k1
- mov.l k1, @k4
-
- nop /* sleep 140 ns */
- nop
- nop
- nop
-
- mov.l dbcmdcnt_reg, k4
- mov.l dbcmdcnt_data0, k1
- mov.l k1, @k4
-
- mov.l dbcmdcnt_reg, k4
- mov.l dbcmdcnt_data1, k1
- mov.l k1, @k4
-
- mov.l dben_reg, k4
- mov.l dben_data1, k1
- mov.l k1, @k4
-
- mov.l dbrfpdn0_reg, k4
- mov.l dbrfpdn0_data2, k1
- mov.l k1, @k4
-#else
- /* SBSC: set auto-refresh mode */
- mov.l 1f, k4
- mov.l @k4, k0
- mov.l 4f, k1
- and k1, k0
- mov.l k0, @k4
- mov.l 6f, k4
- mov.l 8f, k0
- mov.l @k4, k1
- mov #-1, k4
- add k4, k1
- or k1, k0
- mov.l 7f, k1
- mov.l k0, @k1
-#endif
+ mov.l @(SH_SLEEP_SF_POST, r5), r0
+ jsr @r0
+ nop
+
skip_restore_sf:
- /* jump to vbr vector */
- mov.l saved_vbr, k0
- mov.l offset_vbr, k4
- add k4, k0
- jmp @k0
+ /* restore mmu and cache state if needed */
+ mov.l @(SH_SLEEP_MODE, r5), r0
+ tst #SUSP_SH_MMU, r0
+ bt skip_restore_mmu
+
+ /* restore mmu state */
+ bsr restore_register
+ mov #SH_SLEEP_REG_PTEH, r0
+
+ bsr restore_register
+ mov #SH_SLEEP_REG_PTEL, r0
+
+ bsr restore_register
+ mov #SH_SLEEP_REG_TTB, r0
+
+ bsr restore_register
+ mov #SH_SLEEP_REG_TEA, r0
+
+ bsr restore_register
+ mov #SH_SLEEP_REG_PTEA, r0
+
+ bsr restore_register
+ mov #SH_SLEEP_REG_PASCR, r0
+
+ bsr restore_register
+ mov #SH_SLEEP_REG_IRMCR, r0
+
+ bsr restore_register
+ mov #SH_SLEEP_REG_MMUCR, r0
+ icbi @r0
+
+ /* restore cache settings */
+ bsr restore_register
+ mov #SH_SLEEP_REG_RAMCR, r0
+ icbi @r0
+
+ bsr restore_register
+ mov #SH_SLEEP_REG_CCR, r0
+ icbi @r0
+
+skip_restore_mmu:
+ rte
nop
- .balign 4
-saved_mode: .long 0
-saved_spc: .long 0
-saved_sr: .long 0
-saved_vbr: .long 0
-offset_vbr: .long 0x600
-#ifdef CONFIG_CPU_SUBTYPE_SH7724
-dben_reg: .long 0xfd000010 /* DBEN */
-dben_data0: .long 0
-dben_data1: .long 1
-dbrfpdn0_reg: .long 0xfd000040 /* DBRFPDN0 */
-dbrfpdn0_data0: .long 0
-dbrfpdn0_data1: .long 1
-dbrfpdn0_data2: .long 0x00010000
-dbcmdcnt_reg: .long 0xfd000014 /* DBCMDCNT */
-dbcmdcnt_data0: .long 2
-dbcmdcnt_data1: .long 4
-#else
-1: .long 0xfe400008 /* SDCR0 */
-2: .long 0x00000400
-3: .long 0xffff7fff
-4: .long 0xfffffbff
-#endif
-5: .long 0xa4150020 /* STBCR */
-6: .long 0xfe40001c /* RTCOR */
-7: .long 0xfe400018 /* RTCNT */
-8: .long 0xa55a0000
-
-
-/* interrupt vector @ 0x600 */
- .balign 0x400,0,0x400
- .long 0xdeadbeef
- .balign 0x200,0,0x200
- bra restore_jump_vbr
+restore_register:
+ add #SH_SLEEP_BASE_DATA, r0
+ mov.l @(r0, r5), r1
+ add #-SH_SLEEP_BASE_DATA, r0
+ add #SH_SLEEP_BASE_ADDR, r0
+ mov.l @(r0, r5), r0
+ mov.l r1, @r0
+ rts
nop
-sh_mobile_standby_end:
-ENTRY(sh_mobile_standby_size)
- .long sh_mobile_standby_end - sh_mobile_standby
+ .balign 4
+1: .long ~0x7ff
+ENTRY(sh_mobile_sleep_resume_end)
diff --git a/arch/sh/kernel/cpu/ubc.S b/arch/sh/kernel/cpu/ubc.S
deleted file mode 100644
index 81923079fa1..00000000000
--- a/arch/sh/kernel/cpu/ubc.S
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * arch/sh/kernel/cpu/ubc.S
- *
- * Set of management routines for the User Break Controller (UBC)
- *
- * Copyright (C) 2002 Paul Mundt
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-#include <linux/linkage.h>
-#include <asm/ubc.h>
-
-#define STBCR2 0xffc00010
-
-ENTRY(ubc_sleep)
- mov #0, r0
-
- mov.l 1f, r1 ! Zero out UBC_BBRA ..
- mov.w r0, @r1
-
- mov.l 2f, r1 ! .. same for BBRB ..
- mov.w r0, @r1
-
- mov.l 3f, r1 ! .. and again for BRCR.
- mov.w r0, @r1
-
- mov.w @r1, r0 ! Dummy read BRCR
-
- mov.l 4f, r1 ! Set MSTP5 in STBCR2
- mov.b @r1, r0
- or #0x01, r0
- mov.b r0, @r1
-
- mov.b @r1, r0 ! Two dummy reads ..
- mov.b @r1, r0
-
- rts
- nop
-
-ENTRY(ubc_wakeup)
- mov.l 4f, r1 ! Clear MSTP5
- mov.b @r1, r0
- and #0xfe, r0
- mov.b r0, @r1
-
- mov.b @r1, r0 ! Two more dummy reads ..
- mov.b @r1, r0
-
- rts
- nop
-
-1: .long UBC_BBRA
-2: .long UBC_BBRB
-3: .long UBC_BRCR
-4: .long STBCR2
-
diff --git a/arch/sh/kernel/dma-nommu.c b/arch/sh/kernel/dma-nommu.c
new file mode 100644
index 00000000000..3c55b87f8b6
--- /dev/null
+++ b/arch/sh/kernel/dma-nommu.c
@@ -0,0 +1,82 @@
+/*
+ * DMA mapping support for platforms lacking IOMMUs.
+ *
+ * Copyright (C) 2009 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+
+static dma_addr_t nommu_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ dma_addr_t addr = page_to_phys(page) + offset;
+
+ WARN_ON(size == 0);
+ dma_cache_sync(dev, page_address(page) + offset, size, dir);
+
+ return addr;
+}
+
+static int nommu_map_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ struct scatterlist *s;
+ int i;
+
+ WARN_ON(nents == 0 || sg[0].length == 0);
+
+ for_each_sg(sg, s, nents, i) {
+ BUG_ON(!sg_page(s));
+
+ dma_cache_sync(dev, sg_virt(s), s->length, dir);
+
+ s->dma_address = sg_phys(s);
+ s->dma_length = s->length;
+ }
+
+ return nents;
+}
+
+#ifdef CONFIG_DMA_NONCOHERENT
+static void nommu_sync_single(struct device *dev, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir)
+{
+ dma_cache_sync(dev, phys_to_virt(addr), size, dir);
+}
+
+static void nommu_sync_sg(struct device *dev, struct scatterlist *sg,
+ int nelems, enum dma_data_direction dir)
+{
+ struct scatterlist *s;
+ int i;
+
+ for_each_sg(sg, s, nelems, i)
+ dma_cache_sync(dev, sg_virt(s), s->length, dir);
+}
+#endif
+
+struct dma_map_ops nommu_dma_ops = {
+ .alloc_coherent = dma_generic_alloc_coherent,
+ .free_coherent = dma_generic_free_coherent,
+ .map_page = nommu_map_page,
+ .map_sg = nommu_map_sg,
+#ifdef CONFIG_DMA_NONCOHERENT
+ .sync_single_for_device = nommu_sync_single,
+ .sync_sg_for_device = nommu_sync_sg,
+#endif
+ .is_phys = 1,
+};
+
+void __init no_iommu_init(void)
+{
+ if (dma_ops)
+ return;
+ dma_ops = &nommu_dma_ops;
+}
diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c
index d76a23170db..3576b709f05 100644
--- a/arch/sh/kernel/dwarf.c
+++ b/arch/sh/kernel/dwarf.c
@@ -20,6 +20,7 @@
#include <linux/list.h>
#include <linux/mempool.h>
#include <linux/mm.h>
+#include <linux/elf.h>
#include <linux/ftrace.h>
#include <asm/dwarf.h>
#include <asm/unwinder.h>
@@ -530,7 +531,18 @@ static int dwarf_cfa_execute_insns(unsigned char *insn_start,
}
/**
- * dwarf_unwind_stack - recursively unwind the stack
+ * dwarf_free_frame - free the memory allocated for @frame
+ * @frame: the frame to free
+ */
+void dwarf_free_frame(struct dwarf_frame *frame)
+{
+ dwarf_frame_free_regs(frame);
+ mempool_free(frame, dwarf_frame_pool);
+}
+
+/**
+ * dwarf_unwind_stack - unwind the stack
+ *
* @pc: address of the function to unwind
* @prev: struct dwarf_frame of the previous stackframe on the callstack
*
@@ -548,9 +560,9 @@ struct dwarf_frame * dwarf_unwind_stack(unsigned long pc,
unsigned long addr;
/*
- * If this is the first invocation of this recursive function we
- * need get the contents of a physical register to get the CFA
- * in order to begin the virtual unwinding of the stack.
+ * If we're starting at the top of the stack we need get the
+ * contents of a physical register to get the CFA in order to
+ * begin the virtual unwinding of the stack.
*
* NOTE: the return address is guaranteed to be setup by the
* time this function makes its first function call.
@@ -593,9 +605,8 @@ struct dwarf_frame * dwarf_unwind_stack(unsigned long pc,
fde = dwarf_lookup_fde(pc);
if (!fde) {
/*
- * This is our normal exit path - the one that stops the
- * recursion. There's two reasons why we might exit
- * here,
+ * This is our normal exit path. There are two reasons
+ * why we might exit here,
*
* a) pc has no asscociated DWARF frame info and so
* we don't know how to unwind this frame. This is
@@ -637,10 +648,10 @@ struct dwarf_frame * dwarf_unwind_stack(unsigned long pc,
} else {
/*
- * Again, this is the first invocation of this
- * recurisve function. We need to physically
- * read the contents of a register in order to
- * get the Canonical Frame Address for this
+ * Again, we're starting from the top of the
+ * stack. We need to physically read
+ * the contents of a register in order to get
+ * the Canonical Frame Address for this
* function.
*/
frame->cfa = dwarf_read_arch_reg(frame->cfa_register);
@@ -670,13 +681,12 @@ struct dwarf_frame * dwarf_unwind_stack(unsigned long pc,
return frame;
bail:
- dwarf_frame_free_regs(frame);
- mempool_free(frame, dwarf_frame_pool);
+ dwarf_free_frame(frame);
return NULL;
}
static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
- unsigned char *end)
+ unsigned char *end, struct module *mod)
{
struct dwarf_cie *cie;
unsigned long flags;
@@ -772,6 +782,8 @@ static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
cie->initial_instructions = p;
cie->instructions_end = end;
+ cie->mod = mod;
+
/* Add to list */
spin_lock_irqsave(&dwarf_cie_lock, flags);
list_add_tail(&cie->link, &dwarf_cie_list);
@@ -782,7 +794,7 @@ static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
static int dwarf_parse_fde(void *entry, u32 entry_type,
void *start, unsigned long len,
- unsigned char *end)
+ unsigned char *end, struct module *mod)
{
struct dwarf_fde *fde;
struct dwarf_cie *cie;
@@ -831,6 +843,8 @@ static int dwarf_parse_fde(void *entry, u32 entry_type,
fde->instructions = p;
fde->end = end;
+ fde->mod = mod;
+
/* Add to list. */
spin_lock_irqsave(&dwarf_fde_lock, flags);
list_add_tail(&fde->link, &dwarf_fde_list);
@@ -854,10 +868,8 @@ static void dwarf_unwinder_dump(struct task_struct *task,
while (1) {
frame = dwarf_unwind_stack(return_addr, _frame);
- if (_frame) {
- dwarf_frame_free_regs(_frame);
- mempool_free(_frame, dwarf_frame_pool);
- }
+ if (_frame)
+ dwarf_free_frame(_frame);
_frame = frame;
@@ -867,6 +879,9 @@ static void dwarf_unwinder_dump(struct task_struct *task,
return_addr = frame->return_addr;
ops->address(data, return_addr, 1);
}
+
+ if (frame)
+ dwarf_free_frame(frame);
}
static struct unwinder dwarf_unwinder = {
@@ -896,48 +911,28 @@ static void dwarf_unwinder_cleanup(void)
}
/**
- * dwarf_unwinder_init - initialise the dwarf unwinder
+ * dwarf_parse_section - parse DWARF section
+ * @eh_frame_start: start address of the .eh_frame section
+ * @eh_frame_end: end address of the .eh_frame section
+ * @mod: the kernel module containing the .eh_frame section
*
- * Build the data structures describing the .dwarf_frame section to
- * make it easier to lookup CIE and FDE entries. Because the
- * .eh_frame section is packed as tightly as possible it is not
- * easy to lookup the FDE for a given PC, so we build a list of FDE
- * and CIE entries that make it easier.
+ * Parse the information in a .eh_frame section.
*/
-static int __init dwarf_unwinder_init(void)
+static int dwarf_parse_section(char *eh_frame_start, char *eh_frame_end,
+ struct module *mod)
{
u32 entry_type;
void *p, *entry;
int count, err = 0;
- unsigned long len;
+ unsigned long len = 0;
unsigned int c_entries, f_entries;
unsigned char *end;
- INIT_LIST_HEAD(&dwarf_cie_list);
- INIT_LIST_HEAD(&dwarf_fde_list);
c_entries = 0;
f_entries = 0;
- entry = &__start_eh_frame;
-
- dwarf_frame_cachep = kmem_cache_create("dwarf_frames",
- sizeof(struct dwarf_frame), 0,
- SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL);
-
- dwarf_reg_cachep = kmem_cache_create("dwarf_regs",
- sizeof(struct dwarf_reg), 0,
- SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL);
+ entry = eh_frame_start;
- dwarf_frame_pool = mempool_create(DWARF_FRAME_MIN_REQ,
- mempool_alloc_slab,
- mempool_free_slab,
- dwarf_frame_cachep);
-
- dwarf_reg_pool = mempool_create(DWARF_REG_MIN_REQ,
- mempool_alloc_slab,
- mempool_free_slab,
- dwarf_reg_cachep);
-
- while ((char *)entry < __stop_eh_frame) {
+ while ((char *)entry < eh_frame_end) {
p = entry;
count = dwarf_entry_len(p, &len);
@@ -949,6 +944,7 @@ static int __init dwarf_unwinder_init(void)
* entry and move to the next one because 'len'
* tells us where our next entry is.
*/
+ err = -EINVAL;
goto out;
} else
p += count;
@@ -960,13 +956,14 @@ static int __init dwarf_unwinder_init(void)
p += 4;
if (entry_type == DW_EH_FRAME_CIE) {
- err = dwarf_parse_cie(entry, p, len, end);
+ err = dwarf_parse_cie(entry, p, len, end, mod);
if (err < 0)
goto out;
else
c_entries++;
} else {
- err = dwarf_parse_fde(entry, entry_type, p, len, end);
+ err = dwarf_parse_fde(entry, entry_type, p, len,
+ end, mod);
if (err < 0)
goto out;
else
@@ -979,6 +976,129 @@ static int __init dwarf_unwinder_init(void)
printk(KERN_INFO "DWARF unwinder initialised: read %u CIEs, %u FDEs\n",
c_entries, f_entries);
+ return 0;
+
+out:
+ return err;
+}
+
+#ifdef CONFIG_MODULES
+int module_dwarf_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
+ struct module *me)
+{
+ unsigned int i, err;
+ unsigned long start, end;
+ char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
+
+ start = end = 0;
+
+ for (i = 1; i < hdr->e_shnum; i++) {
+ /* Alloc bit cleared means "ignore it." */
+ if ((sechdrs[i].sh_flags & SHF_ALLOC)
+ && !strcmp(secstrings+sechdrs[i].sh_name, ".eh_frame")) {
+ start = sechdrs[i].sh_addr;
+ end = start + sechdrs[i].sh_size;
+ break;
+ }
+ }
+
+ /* Did we find the .eh_frame section? */
+ if (i != hdr->e_shnum) {
+ err = dwarf_parse_section((char *)start, (char *)end, me);
+ if (err) {
+ printk(KERN_WARNING "%s: failed to parse DWARF info\n",
+ me->name);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * module_dwarf_cleanup - remove FDE/CIEs associated with @mod
+ * @mod: the module that is being unloaded
+ *
+ * Remove any FDEs and CIEs from the global lists that came from
+ * @mod's .eh_frame section because @mod is being unloaded.
+ */
+void module_dwarf_cleanup(struct module *mod)
+{
+ struct dwarf_fde *fde;
+ struct dwarf_cie *cie;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dwarf_cie_lock, flags);
+
+again_cie:
+ list_for_each_entry(cie, &dwarf_cie_list, link) {
+ if (cie->mod == mod)
+ break;
+ }
+
+ if (&cie->link != &dwarf_cie_list) {
+ list_del(&cie->link);
+ kfree(cie);
+ goto again_cie;
+ }
+
+ spin_unlock_irqrestore(&dwarf_cie_lock, flags);
+
+ spin_lock_irqsave(&dwarf_fde_lock, flags);
+
+again_fde:
+ list_for_each_entry(fde, &dwarf_fde_list, link) {
+ if (fde->mod == mod)
+ break;
+ }
+
+ if (&fde->link != &dwarf_fde_list) {
+ list_del(&fde->link);
+ kfree(fde);
+ goto again_fde;
+ }
+
+ spin_unlock_irqrestore(&dwarf_fde_lock, flags);
+}
+#endif /* CONFIG_MODULES */
+
+/**
+ * dwarf_unwinder_init - initialise the dwarf unwinder
+ *
+ * Build the data structures describing the .dwarf_frame section to
+ * make it easier to lookup CIE and FDE entries. Because the
+ * .eh_frame section is packed as tightly as possible it is not
+ * easy to lookup the FDE for a given PC, so we build a list of FDE
+ * and CIE entries that make it easier.
+ */
+static int __init dwarf_unwinder_init(void)
+{
+ int err;
+ INIT_LIST_HEAD(&dwarf_cie_list);
+ INIT_LIST_HEAD(&dwarf_fde_list);
+
+ dwarf_frame_cachep = kmem_cache_create("dwarf_frames",
+ sizeof(struct dwarf_frame), 0,
+ SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL);
+
+ dwarf_reg_cachep = kmem_cache_create("dwarf_regs",
+ sizeof(struct dwarf_reg), 0,
+ SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL);
+
+ dwarf_frame_pool = mempool_create(DWARF_FRAME_MIN_REQ,
+ mempool_alloc_slab,
+ mempool_free_slab,
+ dwarf_frame_cachep);
+
+ dwarf_reg_pool = mempool_create(DWARF_REG_MIN_REQ,
+ mempool_alloc_slab,
+ mempool_free_slab,
+ dwarf_reg_cachep);
+
+ err = dwarf_parse_section(__start_eh_frame, __stop_eh_frame, NULL);
+ if (err)
+ goto out;
+
err = unwinder_register(&dwarf_unwinder);
if (err)
goto out;
diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S
index 3eb84931d2a..f0abd58c3a6 100644
--- a/arch/sh/kernel/entry-common.S
+++ b/arch/sh/kernel/entry-common.S
@@ -133,7 +133,7 @@ work_pending:
! r8: current_thread_info
! t: result of "tst #_TIF_NEED_RESCHED, r0"
bf/s work_resched
- tst #(_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), r0
+ tst #_TIF_SIGPENDING, r0
work_notifysig:
bt/s __restore_all
mov r15, r4
diff --git a/arch/sh/kernel/ftrace.c b/arch/sh/kernel/ftrace.c
index 2c48e267256..b6f41c109be 100644
--- a/arch/sh/kernel/ftrace.c
+++ b/arch/sh/kernel/ftrace.c
@@ -62,6 +62,150 @@ static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
return ftrace_replaced_code;
}
+/*
+ * Modifying code must take extra care. On an SMP machine, if
+ * the code being modified is also being executed on another CPU
+ * that CPU will have undefined results and possibly take a GPF.
+ * We use kstop_machine to stop other CPUS from exectuing code.
+ * But this does not stop NMIs from happening. We still need
+ * to protect against that. We separate out the modification of
+ * the code to take care of this.
+ *
+ * Two buffers are added: An IP buffer and a "code" buffer.
+ *
+ * 1) Put the instruction pointer into the IP buffer
+ * and the new code into the "code" buffer.
+ * 2) Wait for any running NMIs to finish and set a flag that says
+ * we are modifying code, it is done in an atomic operation.
+ * 3) Write the code
+ * 4) clear the flag.
+ * 5) Wait for any running NMIs to finish.
+ *
+ * If an NMI is executed, the first thing it does is to call
+ * "ftrace_nmi_enter". This will check if the flag is set to write
+ * and if it is, it will write what is in the IP and "code" buffers.
+ *
+ * The trick is, it does not matter if everyone is writing the same
+ * content to the code location. Also, if a CPU is executing code
+ * it is OK to write to that code location if the contents being written
+ * are the same as what exists.
+ */
+#define MOD_CODE_WRITE_FLAG (1 << 31) /* set when NMI should do the write */
+static atomic_t nmi_running = ATOMIC_INIT(0);
+static int mod_code_status; /* holds return value of text write */
+static void *mod_code_ip; /* holds the IP to write to */
+static void *mod_code_newcode; /* holds the text to write to the IP */
+
+static unsigned nmi_wait_count;
+static atomic_t nmi_update_count = ATOMIC_INIT(0);
+
+int ftrace_arch_read_dyn_info(char *buf, int size)
+{
+ int r;
+
+ r = snprintf(buf, size, "%u %u",
+ nmi_wait_count,
+ atomic_read(&nmi_update_count));
+ return r;
+}
+
+static void clear_mod_flag(void)
+{
+ int old = atomic_read(&nmi_running);
+
+ for (;;) {
+ int new = old & ~MOD_CODE_WRITE_FLAG;
+
+ if (old == new)
+ break;
+
+ old = atomic_cmpxchg(&nmi_running, old, new);
+ }
+}
+
+static void ftrace_mod_code(void)
+{
+ /*
+ * Yes, more than one CPU process can be writing to mod_code_status.
+ * (and the code itself)
+ * But if one were to fail, then they all should, and if one were
+ * to succeed, then they all should.
+ */
+ mod_code_status = probe_kernel_write(mod_code_ip, mod_code_newcode,
+ MCOUNT_INSN_SIZE);
+
+ /* if we fail, then kill any new writers */
+ if (mod_code_status)
+ clear_mod_flag();
+}
+
+void ftrace_nmi_enter(void)
+{
+ if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
+ smp_rmb();
+ ftrace_mod_code();
+ atomic_inc(&nmi_update_count);
+ }
+ /* Must have previous changes seen before executions */
+ smp_mb();
+}
+
+void ftrace_nmi_exit(void)
+{
+ /* Finish all executions before clearing nmi_running */
+ smp_mb();
+ atomic_dec(&nmi_running);
+}
+
+static void wait_for_nmi_and_set_mod_flag(void)
+{
+ if (!atomic_cmpxchg(&nmi_running, 0, MOD_CODE_WRITE_FLAG))
+ return;
+
+ do {
+ cpu_relax();
+ } while (atomic_cmpxchg(&nmi_running, 0, MOD_CODE_WRITE_FLAG));
+
+ nmi_wait_count++;
+}
+
+static void wait_for_nmi(void)
+{
+ if (!atomic_read(&nmi_running))
+ return;
+
+ do {
+ cpu_relax();
+ } while (atomic_read(&nmi_running));
+
+ nmi_wait_count++;
+}
+
+static int
+do_ftrace_mod_code(unsigned long ip, void *new_code)
+{
+ mod_code_ip = (void *)ip;
+ mod_code_newcode = new_code;
+
+ /* The buffers need to be visible before we let NMIs write them */
+ smp_mb();
+
+ wait_for_nmi_and_set_mod_flag();
+
+ /* Make sure all running NMIs have finished before we write the code */
+ smp_mb();
+
+ ftrace_mod_code();
+
+ /* Make sure the write happens before clearing the bit */
+ smp_mb();
+
+ clear_mod_flag();
+ wait_for_nmi();
+
+ return mod_code_status;
+}
+
static int ftrace_modify_code(unsigned long ip, unsigned char *old_code,
unsigned char *new_code)
{
@@ -86,7 +230,7 @@ static int ftrace_modify_code(unsigned long ip, unsigned char *old_code,
return -EINVAL;
/* replace the text with the new text */
- if (probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE))
+ if (do_ftrace_mod_code(ip, new_code))
return -EPERM;
flush_icache_range(ip, ip + MCOUNT_INSN_SIZE);
diff --git a/arch/sh/kernel/head_32.S b/arch/sh/kernel/head_32.S
index a78be74b8d3..1151ecdffa7 100644
--- a/arch/sh/kernel/head_32.S
+++ b/arch/sh/kernel/head_32.S
@@ -33,7 +33,7 @@ ENTRY(empty_zero_page)
.long 1 /* LOADER_TYPE */
.long 0x00000000 /* INITRD_START */
.long 0x00000000 /* INITRD_SIZE */
-#ifdef CONFIG_32BIT
+#if defined(CONFIG_32BIT) && defined(CONFIG_PMB_FIXED)
.long 0x53453f00 + 32 /* "SE?" = 32 bit */
#else
.long 0x53453f00 + 29 /* "SE?" = 29 bit */
diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c
index 27ff2dc093c..aaff0037fcd 100644
--- a/arch/sh/kernel/idle.c
+++ b/arch/sh/kernel/idle.c
@@ -21,7 +21,7 @@
#include <asm/atomic.h>
static int hlt_counter;
-void (*pm_idle)(void);
+void (*pm_idle)(void) = NULL;
void (*pm_power_off)(void);
EXPORT_SYMBOL(pm_power_off);
@@ -39,48 +39,92 @@ static int __init hlt_setup(char *__unused)
}
__setup("hlt", hlt_setup);
+static inline int hlt_works(void)
+{
+ return !hlt_counter;
+}
+
+/*
+ * On SMP it's slightly faster (but much more power-consuming!)
+ * to poll the ->work.need_resched flag instead of waiting for the
+ * cross-CPU IPI to arrive. Use this option with caution.
+ */
+static void poll_idle(void)
+{
+ local_irq_enable();
+ while (!need_resched())
+ cpu_relax();
+}
+
void default_idle(void)
{
- if (!hlt_counter) {
+ if (hlt_works()) {
clear_thread_flag(TIF_POLLING_NRFLAG);
smp_mb__after_clear_bit();
- set_bl_bit();
- stop_critical_timings();
- while (!need_resched())
+ if (!need_resched()) {
+ local_irq_enable();
cpu_sleep();
+ } else
+ local_irq_enable();
- start_critical_timings();
- clear_bl_bit();
set_thread_flag(TIF_POLLING_NRFLAG);
} else
- while (!need_resched())
- cpu_relax();
+ poll_idle();
}
+/*
+ * The idle thread. There's no useful work to be done, so just try to conserve
+ * power and have a low exit latency (ie sit in a loop waiting for somebody to
+ * say that they'd like to reschedule)
+ */
void cpu_idle(void)
{
+ unsigned int cpu = smp_processor_id();
+
set_thread_flag(TIF_POLLING_NRFLAG);
/* endless idle loop with no priority at all */
while (1) {
- void (*idle)(void) = pm_idle;
+ tick_nohz_stop_sched_tick(1);
- if (!idle)
- idle = default_idle;
+ while (!need_resched() && cpu_online(cpu)) {
+ check_pgt_cache();
+ rmb();
- tick_nohz_stop_sched_tick(1);
- while (!need_resched())
- idle();
- tick_nohz_restart_sched_tick();
+ local_irq_disable();
+ /* Don't trace irqs off for idle */
+ stop_critical_timings();
+ pm_idle();
+ /*
+ * Sanity check to ensure that pm_idle() returns
+ * with IRQs enabled
+ */
+ WARN_ON(irqs_disabled());
+ start_critical_timings();
+ }
+ tick_nohz_restart_sched_tick();
preempt_enable_no_resched();
schedule();
preempt_disable();
- check_pgt_cache();
}
}
+void __cpuinit select_idle_routine(void)
+{
+ /*
+ * If a platform has set its own idle routine, leave it alone.
+ */
+ if (pm_idle)
+ return;
+
+ if (hlt_works())
+ pm_idle = default_idle;
+ else
+ pm_idle = poll_idle;
+}
+
static void do_nothing(void *unused)
{
}
diff --git a/arch/sh/kernel/io_generic.c b/arch/sh/kernel/io_generic.c
index b8fa6524760..e1e1dbd1955 100644
--- a/arch/sh/kernel/io_generic.c
+++ b/arch/sh/kernel/io_generic.c
@@ -24,7 +24,7 @@
#define dummy_read()
#endif
-unsigned long generic_io_base;
+unsigned long generic_io_base = 0;
u8 generic_inb(unsigned long port)
{
@@ -147,8 +147,10 @@ void generic_outsl(unsigned long port, const void *src, unsigned long count)
void __iomem *generic_ioport_map(unsigned long addr, unsigned int size)
{
+#ifdef P1SEG
if (PXSEG(addr) >= P1SEG)
return (void __iomem *)addr;
+#endif
return (void __iomem *)(addr + generic_io_base);
}
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
index eac7da772fc..e1913f28f41 100644
--- a/arch/sh/kernel/irq.c
+++ b/arch/sh/kernel/irq.c
@@ -37,7 +37,15 @@ void ack_bad_irq(unsigned int irq)
*/
static int show_other_interrupts(struct seq_file *p, int prec)
{
+ int j;
+
+ seq_printf(p, "%*s: ", prec, "NMI");
+ for_each_online_cpu(j)
+ seq_printf(p, "%10u ", irq_stat[j].__nmi_count);
+ seq_printf(p, " Non-maskable interrupts\n");
+
seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
+
return 0;
}
@@ -255,6 +263,12 @@ void __init init_IRQ(void)
{
plat_irq_setup();
+ /*
+ * Pin any of the legacy IRQ vectors that haven't already been
+ * grabbed by the platform
+ */
+ reserve_irq_legacy();
+
/* Perform the machine specific initialisation */
if (sh_mv.mv_init_irq)
sh_mv.mv_init_irq();
diff --git a/arch/sh/kernel/irq_32.c b/arch/sh/kernel/irq_32.c
new file mode 100644
index 00000000000..e33ab15831f
--- /dev/null
+++ b/arch/sh/kernel/irq_32.c
@@ -0,0 +1,57 @@
+/*
+ * SHcompact irqflags support
+ *
+ * Copyright (C) 2006 - 2009 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/irqflags.h>
+#include <linux/module.h>
+
+void notrace raw_local_irq_restore(unsigned long flags)
+{
+ unsigned long __dummy0, __dummy1;
+
+ if (flags == RAW_IRQ_DISABLED) {
+ __asm__ __volatile__ (
+ "stc sr, %0\n\t"
+ "or #0xf0, %0\n\t"
+ "ldc %0, sr\n\t"
+ : "=&z" (__dummy0)
+ : /* no inputs */
+ : "memory"
+ );
+ } else {
+ __asm__ __volatile__ (
+ "stc sr, %0\n\t"
+ "and %1, %0\n\t"
+#ifdef CONFIG_CPU_HAS_SR_RB
+ "stc r6_bank, %1\n\t"
+ "or %1, %0\n\t"
+#endif
+ "ldc %0, sr\n\t"
+ : "=&r" (__dummy0), "=r" (__dummy1)
+ : "1" (~RAW_IRQ_DISABLED)
+ : "memory"
+ );
+ }
+}
+EXPORT_SYMBOL(raw_local_irq_restore);
+
+unsigned long notrace __raw_local_save_flags(void)
+{
+ unsigned long flags;
+
+ __asm__ __volatile__ (
+ "stc sr, %0\n\t"
+ "and #0xf0, %0\n\t"
+ : "=&z" (flags)
+ : /* no inputs */
+ : "memory"
+ );
+
+ return flags;
+}
+EXPORT_SYMBOL(__raw_local_save_flags);
diff --git a/arch/sh/kernel/irq_64.c b/arch/sh/kernel/irq_64.c
new file mode 100644
index 00000000000..32365ba0e03
--- /dev/null
+++ b/arch/sh/kernel/irq_64.c
@@ -0,0 +1,51 @@
+/*
+ * SHmedia irqflags support
+ *
+ * Copyright (C) 2006 - 2009 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/irqflags.h>
+#include <linux/module.h>
+#include <cpu/registers.h>
+
+void notrace raw_local_irq_restore(unsigned long flags)
+{
+ unsigned long long __dummy;
+
+ if (flags == RAW_IRQ_DISABLED) {
+ __asm__ __volatile__ (
+ "getcon " __SR ", %0\n\t"
+ "or %0, %1, %0\n\t"
+ "putcon %0, " __SR "\n\t"
+ : "=&r" (__dummy)
+ : "r" (RAW_IRQ_DISABLED)
+ );
+ } else {
+ __asm__ __volatile__ (
+ "getcon " __SR ", %0\n\t"
+ "and %0, %1, %0\n\t"
+ "putcon %0, " __SR "\n\t"
+ : "=&r" (__dummy)
+ : "r" (~RAW_IRQ_DISABLED)
+ );
+ }
+}
+EXPORT_SYMBOL(raw_local_irq_restore);
+
+unsigned long notrace __raw_local_save_flags(void)
+{
+ unsigned long flags;
+
+ __asm__ __volatile__ (
+ "getcon " __SR ", %0\n\t"
+ "and %0, %1, %0"
+ : "=&r" (flags)
+ : "r" (RAW_IRQ_DISABLED)
+ );
+
+ return flags;
+}
+EXPORT_SYMBOL(__raw_local_save_flags);
diff --git a/arch/sh/kernel/machine_kexec.c b/arch/sh/kernel/machine_kexec.c
index 7ea2704ea03..76f280223eb 100644
--- a/arch/sh/kernel/machine_kexec.c
+++ b/arch/sh/kernel/machine_kexec.c
@@ -46,12 +46,6 @@ void machine_crash_shutdown(struct pt_regs *regs)
*/
int machine_kexec_prepare(struct kimage *image)
{
- /* older versions of kexec-tools are passing
- * the zImage entry point as a virtual address.
- */
- if (image->start != PHYSADDR(image->start))
- return -EINVAL; /* upgrade your kexec-tools */
-
return 0;
}
diff --git a/arch/sh/kernel/machvec.c b/arch/sh/kernel/machvec.c
index cbce639b108..1652340ba3f 100644
--- a/arch/sh/kernel/machvec.c
+++ b/arch/sh/kernel/machvec.c
@@ -135,5 +135,9 @@ void __init sh_mv_setup(void)
if (!sh_mv.mv_nr_irqs)
sh_mv.mv_nr_irqs = NR_IRQS;
+#ifdef P2SEG
__set_io_port_base(P2SEG);
+#else
+ __set_io_port_base(0);
+#endif
}
diff --git a/arch/sh/kernel/module.c b/arch/sh/kernel/module.c
index c2efdcde266..43adddfe4c0 100644
--- a/arch/sh/kernel/module.c
+++ b/arch/sh/kernel/module.c
@@ -32,6 +32,7 @@
#include <linux/string.h>
#include <linux/kernel.h>
#include <asm/unaligned.h>
+#include <asm/dwarf.h>
void *module_alloc(unsigned long size)
{
@@ -145,10 +146,16 @@ int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *me)
{
- return module_bug_finalize(hdr, sechdrs, me);
+ int ret = 0;
+
+ ret |= module_dwarf_finalize(hdr, sechdrs, me);
+ ret |= module_bug_finalize(hdr, sechdrs, me);
+
+ return ret;
}
void module_arch_cleanup(struct module *mod)
{
module_bug_cleanup(mod);
+ module_dwarf_cleanup(mod);
}
diff --git a/arch/sh/kernel/perf_callchain.c b/arch/sh/kernel/perf_callchain.c
new file mode 100644
index 00000000000..24ea837eac5
--- /dev/null
+++ b/arch/sh/kernel/perf_callchain.c
@@ -0,0 +1,98 @@
+/*
+ * Performance event callchain support - SuperH architecture code
+ *
+ * Copyright (C) 2009 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/perf_event.h>
+#include <linux/percpu.h>
+#include <asm/unwinder.h>
+#include <asm/ptrace.h>
+
+static inline void callchain_store(struct perf_callchain_entry *entry, u64 ip)
+{
+ if (entry->nr < PERF_MAX_STACK_DEPTH)
+ entry->ip[entry->nr++] = ip;
+}
+
+static void callchain_warning(void *data, char *msg)
+{
+}
+
+static void
+callchain_warning_symbol(void *data, char *msg, unsigned long symbol)
+{
+}
+
+static int callchain_stack(void *data, char *name)
+{
+ return 0;
+}
+
+static void callchain_address(void *data, unsigned long addr, int reliable)
+{
+ struct perf_callchain_entry *entry = data;
+
+ if (reliable)
+ callchain_store(entry, addr);
+}
+
+static const struct stacktrace_ops callchain_ops = {
+ .warning = callchain_warning,
+ .warning_symbol = callchain_warning_symbol,
+ .stack = callchain_stack,
+ .address = callchain_address,
+};
+
+static void
+perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
+{
+ callchain_store(entry, PERF_CONTEXT_KERNEL);
+ callchain_store(entry, regs->pc);
+
+ unwind_stack(NULL, regs, NULL, &callchain_ops, entry);
+}
+
+static void
+perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
+{
+ int is_user;
+
+ if (!regs)
+ return;
+
+ is_user = user_mode(regs);
+
+ if (!current || current->pid == 0)
+ return;
+
+ if (is_user && current->state != TASK_RUNNING)
+ return;
+
+ /*
+ * Only the kernel side is implemented for now.
+ */
+ if (!is_user)
+ perf_callchain_kernel(regs, entry);
+}
+
+/*
+ * No need for separate IRQ and NMI entries.
+ */
+static DEFINE_PER_CPU(struct perf_callchain_entry, callchain);
+
+struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
+{
+ struct perf_callchain_entry *entry = &__get_cpu_var(callchain);
+
+ entry->nr = 0;
+
+ perf_do_callchain(regs, entry);
+
+ return entry;
+}
diff --git a/arch/sh/kernel/perf_event.c b/arch/sh/kernel/perf_event.c
new file mode 100644
index 00000000000..7ff0943e7a0
--- /dev/null
+++ b/arch/sh/kernel/perf_event.c
@@ -0,0 +1,312 @@
+/*
+ * Performance event support framework for SuperH hardware counters.
+ *
+ * Copyright (C) 2009 Paul Mundt
+ *
+ * Heavily based on the x86 and PowerPC implementations.
+ *
+ * x86:
+ * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
+ * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
+ * Copyright (C) 2009 Jaswinder Singh Rajput
+ * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
+ * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
+ *
+ * ppc:
+ * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/perf_event.h>
+#include <asm/processor.h>
+
+struct cpu_hw_events {
+ struct perf_event *events[MAX_HWEVENTS];
+ unsigned long used_mask[BITS_TO_LONGS(MAX_HWEVENTS)];
+ unsigned long active_mask[BITS_TO_LONGS(MAX_HWEVENTS)];
+};
+
+DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
+
+static struct sh_pmu *sh_pmu __read_mostly;
+
+/* Number of perf_events counting hardware events */
+static atomic_t num_events;
+/* Used to avoid races in calling reserve/release_pmc_hardware */
+static DEFINE_MUTEX(pmc_reserve_mutex);
+
+/*
+ * Stub these out for now, do something more profound later.
+ */
+int reserve_pmc_hardware(void)
+{
+ return 0;
+}
+
+void release_pmc_hardware(void)
+{
+}
+
+static inline int sh_pmu_initialized(void)
+{
+ return !!sh_pmu;
+}
+
+/*
+ * Release the PMU if this is the last perf_event.
+ */
+static void hw_perf_event_destroy(struct perf_event *event)
+{
+ if (!atomic_add_unless(&num_events, -1, 1)) {
+ mutex_lock(&pmc_reserve_mutex);
+ if (atomic_dec_return(&num_events) == 0)
+ release_pmc_hardware();
+ mutex_unlock(&pmc_reserve_mutex);
+ }
+}
+
+static int hw_perf_cache_event(int config, int *evp)
+{
+ unsigned long type, op, result;
+ int ev;
+
+ if (!sh_pmu->cache_events)
+ return -EINVAL;
+
+ /* unpack config */
+ type = config & 0xff;
+ op = (config >> 8) & 0xff;
+ result = (config >> 16) & 0xff;
+
+ if (type >= PERF_COUNT_HW_CACHE_MAX ||
+ op >= PERF_COUNT_HW_CACHE_OP_MAX ||
+ result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
+ return -EINVAL;
+
+ ev = (*sh_pmu->cache_events)[type][op][result];
+ if (ev == 0)
+ return -EOPNOTSUPP;
+ if (ev == -1)
+ return -EINVAL;
+ *evp = ev;
+ return 0;
+}
+
+static int __hw_perf_event_init(struct perf_event *event)
+{
+ struct perf_event_attr *attr = &event->attr;
+ struct hw_perf_event *hwc = &event->hw;
+ int config = -1;
+ int err;
+
+ if (!sh_pmu_initialized())
+ return -ENODEV;
+
+ /*
+ * All of the on-chip counters are "limited", in that they have
+ * no interrupts, and are therefore unable to do sampling without
+ * further work and timer assistance.
+ */
+ if (hwc->sample_period)
+ return -EINVAL;
+
+ /*
+ * See if we need to reserve the counter.
+ *
+ * If no events are currently in use, then we have to take a
+ * mutex to ensure that we don't race with another task doing
+ * reserve_pmc_hardware or release_pmc_hardware.
+ */
+ err = 0;
+ if (!atomic_inc_not_zero(&num_events)) {
+ mutex_lock(&pmc_reserve_mutex);
+ if (atomic_read(&num_events) == 0 &&
+ reserve_pmc_hardware())
+ err = -EBUSY;
+ else
+ atomic_inc(&num_events);
+ mutex_unlock(&pmc_reserve_mutex);
+ }
+
+ if (err)
+ return err;
+
+ event->destroy = hw_perf_event_destroy;
+
+ switch (attr->type) {
+ case PERF_TYPE_RAW:
+ config = attr->config & sh_pmu->raw_event_mask;
+ break;
+ case PERF_TYPE_HW_CACHE:
+ err = hw_perf_cache_event(attr->config, &config);
+ if (err)
+ return err;
+ break;
+ case PERF_TYPE_HARDWARE:
+ if (attr->config >= sh_pmu->max_events)
+ return -EINVAL;
+
+ config = sh_pmu->event_map(attr->config);
+ break;
+ }
+
+ if (config == -1)
+ return -EINVAL;
+
+ hwc->config |= config;
+
+ return 0;
+}
+
+static void sh_perf_event_update(struct perf_event *event,
+ struct hw_perf_event *hwc, int idx)
+{
+ u64 prev_raw_count, new_raw_count;
+ s64 delta;
+ int shift = 0;
+
+ /*
+ * Depending on the counter configuration, they may or may not
+ * be chained, in which case the previous counter value can be
+ * updated underneath us if the lower-half overflows.
+ *
+ * Our tactic to handle this is to first atomically read and
+ * exchange a new raw count - then add that new-prev delta
+ * count to the generic counter atomically.
+ *
+ * As there is no interrupt associated with the overflow events,
+ * this is the simplest approach for maintaining consistency.
+ */
+again:
+ prev_raw_count = atomic64_read(&hwc->prev_count);
+ new_raw_count = sh_pmu->read(idx);
+
+ if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
+ new_raw_count) != prev_raw_count)
+ goto again;
+
+ /*
+ * Now we have the new raw value and have updated the prev
+ * timestamp already. We can now calculate the elapsed delta
+ * (counter-)time and add that to the generic counter.
+ *
+ * Careful, not all hw sign-extends above the physical width
+ * of the count.
+ */
+ delta = (new_raw_count << shift) - (prev_raw_count << shift);
+ delta >>= shift;
+
+ atomic64_add(delta, &event->count);
+}
+
+static void sh_pmu_disable(struct perf_event *event)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+ clear_bit(idx, cpuc->active_mask);
+ sh_pmu->disable(hwc, idx);
+
+ barrier();
+
+ sh_perf_event_update(event, &event->hw, idx);
+
+ cpuc->events[idx] = NULL;
+ clear_bit(idx, cpuc->used_mask);
+
+ perf_event_update_userpage(event);
+}
+
+static int sh_pmu_enable(struct perf_event *event)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+ if (test_and_set_bit(idx, cpuc->used_mask)) {
+ idx = find_first_zero_bit(cpuc->used_mask, sh_pmu->num_events);
+ if (idx == sh_pmu->num_events)
+ return -EAGAIN;
+
+ set_bit(idx, cpuc->used_mask);
+ hwc->idx = idx;
+ }
+
+ sh_pmu->disable(hwc, idx);
+
+ cpuc->events[idx] = event;
+ set_bit(idx, cpuc->active_mask);
+
+ sh_pmu->enable(hwc, idx);
+
+ perf_event_update_userpage(event);
+
+ return 0;
+}
+
+static void sh_pmu_read(struct perf_event *event)
+{
+ sh_perf_event_update(event, &event->hw, event->hw.idx);
+}
+
+static const struct pmu pmu = {
+ .enable = sh_pmu_enable,
+ .disable = sh_pmu_disable,
+ .read = sh_pmu_read,
+};
+
+const struct pmu *hw_perf_event_init(struct perf_event *event)
+{
+ int err = __hw_perf_event_init(event);
+ if (unlikely(err)) {
+ if (event->destroy)
+ event->destroy(event);
+ return ERR_PTR(err);
+ }
+
+ return &pmu;
+}
+
+void hw_perf_event_setup(int cpu)
+{
+ struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
+
+ memset(cpuhw, 0, sizeof(struct cpu_hw_events));
+}
+
+void hw_perf_enable(void)
+{
+ if (!sh_pmu_initialized())
+ return;
+
+ sh_pmu->enable_all();
+}
+
+void hw_perf_disable(void)
+{
+ if (!sh_pmu_initialized())
+ return;
+
+ sh_pmu->disable_all();
+}
+
+int register_sh_pmu(struct sh_pmu *pmu)
+{
+ if (sh_pmu)
+ return -EBUSY;
+ sh_pmu = pmu;
+
+ pr_info("Performance Events: %s support registered\n", pmu->name);
+
+ WARN_ON(pmu->num_events > MAX_HWEVENTS);
+
+ return 0;
+}
diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c
index 0673c4746be..d8af889366a 100644
--- a/arch/sh/kernel/process_32.c
+++ b/arch/sh/kernel/process_32.c
@@ -134,7 +134,10 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
regs.regs[5] = (unsigned long)fn;
regs.pc = (unsigned long)kernel_thread_helper;
- regs.sr = (1 << 30);
+ regs.sr = SR_MD;
+#if defined(CONFIG_SH_FPU)
+ regs.sr |= SR_FD;
+#endif
/* Ok, create the new process.. */
pid = do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0,
@@ -142,6 +145,7 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
return pid;
}
+EXPORT_SYMBOL(kernel_thread);
/*
* Free current thread data structures etc..
@@ -186,6 +190,16 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
return fpvalid;
}
+EXPORT_SYMBOL(dump_fpu);
+
+/*
+ * This gets called before we allocate a new thread and copy
+ * the current task into it.
+ */
+void prepare_to_copy(struct task_struct *tsk)
+{
+ unlazy_fpu(tsk, task_pt_regs(tsk));
+}
asmlinkage void ret_from_fork(void);
@@ -195,16 +209,10 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
{
struct thread_info *ti = task_thread_info(p);
struct pt_regs *childregs;
-#if defined(CONFIG_SH_FPU) || defined(CONFIG_SH_DSP)
+#if defined(CONFIG_SH_DSP)
struct task_struct *tsk = current;
#endif
-#if defined(CONFIG_SH_FPU)
- unlazy_fpu(tsk, regs);
- p->thread.fpu = tsk->thread.fpu;
- copy_to_stopped_child_used_math(p);
-#endif
-
#if defined(CONFIG_SH_DSP)
if (is_dsp_enabled(tsk)) {
/* We can use the __save_dsp or just copy the struct:
@@ -224,6 +232,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
} else {
childregs->regs[15] = (unsigned long)childregs;
ti->addr_limit = KERNEL_DS;
+ ti->status &= ~TS_USEDFPU;
+ p->fpu_counter = 0;
}
if (clone_flags & CLONE_SETTLS)
@@ -288,9 +298,13 @@ static void ubc_set_tracing(int asid, unsigned long pc)
__notrace_funcgraph struct task_struct *
__switch_to(struct task_struct *prev, struct task_struct *next)
{
-#if defined(CONFIG_SH_FPU)
+ struct thread_struct *next_t = &next->thread;
+
unlazy_fpu(prev, task_pt_regs(prev));
-#endif
+
+ /* we're going to use this soon, after a few expensive things */
+ if (next->fpu_counter > 5)
+ prefetch(&next_t->fpu.hard);
#ifdef CONFIG_MMU
/*
@@ -321,6 +335,14 @@ __switch_to(struct task_struct *prev, struct task_struct *next)
#endif
}
+ /*
+ * If the task has used fpu the last 5 timeslices, just do a full
+ * restore of the math state immediately to avoid the trap; the
+ * chances of needing FPU soon are obviously high now
+ */
+ if (next->fpu_counter > 5)
+ fpu_state_restore(task_pt_regs(next));
+
return prev;
}
diff --git a/arch/sh/kernel/process_64.c b/arch/sh/kernel/process_64.c
index 1192398ef58..359b8a2f4d2 100644
--- a/arch/sh/kernel/process_64.c
+++ b/arch/sh/kernel/process_64.c
@@ -335,6 +335,7 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0,
&regs, 0, NULL, NULL);
}
+EXPORT_SYMBOL(kernel_thread);
/*
* Free current thread data structures etc..
@@ -417,6 +418,7 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
return 0; /* Task didn't use the fpu at all. */
#endif
}
+EXPORT_SYMBOL(dump_fpu);
asmlinkage void ret_from_fork(void);
diff --git a/arch/sh/kernel/return_address.c b/arch/sh/kernel/return_address.c
new file mode 100644
index 00000000000..df3ab581107
--- /dev/null
+++ b/arch/sh/kernel/return_address.c
@@ -0,0 +1,54 @@
+/*
+ * arch/sh/kernel/return_address.c
+ *
+ * Copyright (C) 2009 Matt Fleming
+ * Copyright (C) 2009 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/kernel.h>
+#include <asm/dwarf.h>
+
+#ifdef CONFIG_DWARF_UNWINDER
+
+void *return_address(unsigned int depth)
+{
+ struct dwarf_frame *frame;
+ unsigned long ra;
+ int i;
+
+ for (i = 0, frame = NULL, ra = 0; i <= depth; i++) {
+ struct dwarf_frame *tmp;
+
+ tmp = dwarf_unwind_stack(ra, frame);
+
+ if (frame)
+ dwarf_free_frame(frame);
+
+ frame = tmp;
+
+ if (!frame || !frame->return_addr)
+ break;
+
+ ra = frame->return_addr;
+ }
+
+ /* Failed to unwind the stack to the specified depth. */
+ WARN_ON(i != depth + 1);
+
+ if (frame)
+ dwarf_free_frame(frame);
+
+ return (void *)ra;
+}
+
+#else
+
+void *return_address(unsigned int depth)
+{
+ return NULL;
+}
+
+#endif
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index 99b4fb553bf..5a947a2567e 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -453,6 +453,10 @@ void __init setup_arch(char **cmdline_p)
paging_init();
+#ifdef CONFIG_PMB_ENABLE
+ pmb_init();
+#endif
+
#ifdef CONFIG_SMP
plat_smp_setup();
#endif
diff --git a/arch/sh/kernel/sh_ksyms_32.c b/arch/sh/kernel/sh_ksyms_32.c
index 444cce3ae92..3896f26efa4 100644
--- a/arch/sh/kernel/sh_ksyms_32.c
+++ b/arch/sh/kernel/sh_ksyms_32.c
@@ -1,37 +1,11 @@
#include <linux/module.h>
-#include <linux/smp.h>
-#include <linux/user.h>
-#include <linux/elfcore.h>
-#include <linux/sched.h>
-#include <linux/in6.h>
-#include <linux/interrupt.h>
-#include <linux/vmalloc.h>
-#include <linux/pci.h>
-#include <linux/irq.h>
-#include <asm/sections.h>
-#include <asm/processor.h>
-#include <asm/uaccess.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
#include <asm/checksum.h>
-#include <asm/io.h>
-#include <asm/delay.h>
-#include <asm/tlbflush.h>
-#include <asm/cacheflush.h>
-#include <asm/ftrace.h>
-
-extern int dump_fpu(struct pt_regs *, elf_fpregset_t *);
-
-/* platform dependent support */
-EXPORT_SYMBOL(dump_fpu);
-EXPORT_SYMBOL(kernel_thread);
-EXPORT_SYMBOL(strlen);
-
-/* PCI exports */
-#ifdef CONFIG_PCI
-EXPORT_SYMBOL(pci_alloc_consistent);
-EXPORT_SYMBOL(pci_free_consistent);
-#endif
+#include <asm/sections.h>
-/* mem exports */
EXPORT_SYMBOL(memchr);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memset);
@@ -40,6 +14,13 @@ EXPORT_SYMBOL(__copy_user);
EXPORT_SYMBOL(__udelay);
EXPORT_SYMBOL(__ndelay);
EXPORT_SYMBOL(__const_udelay);
+EXPORT_SYMBOL(strlen);
+EXPORT_SYMBOL(csum_partial);
+EXPORT_SYMBOL(csum_partial_copy_generic);
+EXPORT_SYMBOL(copy_page);
+EXPORT_SYMBOL(__clear_user);
+EXPORT_SYMBOL(_ebss);
+EXPORT_SYMBOL(empty_zero_page);
#define DECLARE_EXPORT(name) \
extern void name(void);EXPORT_SYMBOL(name)
@@ -107,30 +88,6 @@ DECLARE_EXPORT(__sdivsi3_i4);
DECLARE_EXPORT(__udivsi3_i4);
DECLARE_EXPORT(__sdivsi3_i4i);
DECLARE_EXPORT(__udivsi3_i4i);
-
-#if !defined(CONFIG_CACHE_OFF) && (defined(CONFIG_CPU_SH4) || \
- defined(CONFIG_SH7705_CACHE_32KB))
-/* needed by some modules */
-EXPORT_SYMBOL(flush_cache_all);
-EXPORT_SYMBOL(flush_cache_range);
-EXPORT_SYMBOL(flush_dcache_page);
-#endif
-
#ifdef CONFIG_MCOUNT
DECLARE_EXPORT(mcount);
#endif
-EXPORT_SYMBOL(csum_partial);
-EXPORT_SYMBOL(csum_partial_copy_generic);
-#ifdef CONFIG_IPV6
-EXPORT_SYMBOL(csum_ipv6_magic);
-#endif
-EXPORT_SYMBOL(copy_page);
-EXPORT_SYMBOL(__clear_user);
-EXPORT_SYMBOL(_ebss);
-EXPORT_SYMBOL(empty_zero_page);
-
-#ifndef CONFIG_CACHE_OFF
-EXPORT_SYMBOL(__flush_purge_region);
-EXPORT_SYMBOL(__flush_wback_region);
-EXPORT_SYMBOL(__flush_invalidate_region);
-#endif
diff --git a/arch/sh/kernel/sh_ksyms_64.c b/arch/sh/kernel/sh_ksyms_64.c
index d008e17eb25..45afa5c51f6 100644
--- a/arch/sh/kernel/sh_ksyms_64.c
+++ b/arch/sh/kernel/sh_ksyms_64.c
@@ -24,16 +24,6 @@
#include <asm/delay.h>
#include <asm/irq.h>
-extern int dump_fpu(struct pt_regs *, elf_fpregset_t *);
-
-/* platform dependent support */
-EXPORT_SYMBOL(dump_fpu);
-EXPORT_SYMBOL(kernel_thread);
-
-#ifdef CONFIG_VT
-EXPORT_SYMBOL(screen_info);
-#endif
-
EXPORT_SYMBOL(__put_user_asm_b);
EXPORT_SYMBOL(__put_user_asm_w);
EXPORT_SYMBOL(__put_user_asm_l);
diff --git a/arch/sh/kernel/signal_32.c b/arch/sh/kernel/signal_32.c
index 3db37425210..12815ce01ec 100644
--- a/arch/sh/kernel/signal_32.c
+++ b/arch/sh/kernel/signal_32.c
@@ -67,7 +67,8 @@ sys_sigsuspend(old_sigset_t mask,
current->state = TASK_INTERRUPTIBLE;
schedule();
- set_thread_flag(TIF_RESTORE_SIGMASK);
+ set_restore_sigmask();
+
return -ERESTARTNOHAND;
}
@@ -590,7 +591,7 @@ static void do_signal(struct pt_regs *regs, unsigned int save_r0)
if (try_to_freeze())
goto no_signal;
- if (test_thread_flag(TIF_RESTORE_SIGMASK))
+ if (current_thread_info()->status & TS_RESTORE_SIGMASK)
oldset = &current->saved_sigmask;
else
oldset = &current->blocked;
@@ -602,12 +603,13 @@ static void do_signal(struct pt_regs *regs, unsigned int save_r0)
/* Whee! Actually deliver the signal. */
if (handle_signal(signr, &ka, &info, oldset,
regs, save_r0) == 0) {
- /* a signal was successfully delivered; the saved
+ /*
+ * A signal was successfully delivered; the saved
* sigmask will have been stored in the signal frame,
* and will be restored by sigreturn, so we can simply
- * clear the TIF_RESTORE_SIGMASK flag */
- if (test_thread_flag(TIF_RESTORE_SIGMASK))
- clear_thread_flag(TIF_RESTORE_SIGMASK);
+ * clear the TS_RESTORE_SIGMASK flag
+ */
+ current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
tracehook_signal_handler(signr, &info, &ka, regs,
test_thread_flag(TIF_SINGLESTEP));
@@ -631,10 +633,12 @@ no_signal:
}
}
- /* if there's no signal to deliver, we just put the saved sigmask
- * back */
- if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
- clear_thread_flag(TIF_RESTORE_SIGMASK);
+ /*
+ * If there's no signal to deliver, we just put the saved sigmask
+ * back.
+ */
+ if (current_thread_info()->status & TS_RESTORE_SIGMASK) {
+ current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
}
}
diff --git a/arch/sh/kernel/signal_64.c b/arch/sh/kernel/signal_64.c
index 74793c80a57..feb3dddd319 100644
--- a/arch/sh/kernel/signal_64.c
+++ b/arch/sh/kernel/signal_64.c
@@ -101,7 +101,7 @@ static int do_signal(struct pt_regs *regs, sigset_t *oldset)
if (try_to_freeze())
goto no_signal;
- if (test_thread_flag(TIF_RESTORE_SIGMASK))
+ if (current_thread_info()->status & TS_RESTORE_SIGMASK)
oldset = &current->saved_sigmask;
else if (!oldset)
oldset = &current->blocked;
@@ -115,11 +115,9 @@ static int do_signal(struct pt_regs *regs, sigset_t *oldset)
/*
* If a signal was successfully delivered, the
* saved sigmask is in its frame, and we can
- * clear the TIF_RESTORE_SIGMASK flag.
+ * clear the TS_RESTORE_SIGMASK flag.
*/
- if (test_thread_flag(TIF_RESTORE_SIGMASK))
- clear_thread_flag(TIF_RESTORE_SIGMASK);
-
+ current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
tracehook_signal_handler(signr, &info, &ka, regs, 0);
return 1;
}
@@ -146,8 +144,8 @@ no_signal:
}
/* No signal to deliver -- put the saved sigmask back */
- if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
- clear_thread_flag(TIF_RESTORE_SIGMASK);
+ if (current_thread_info()->status & TS_RESTORE_SIGMASK) {
+ current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
}
@@ -176,6 +174,7 @@ sys_sigsuspend(old_sigset_t mask,
while (1) {
current->state = TASK_INTERRUPTIBLE;
schedule();
+ set_restore_sigmask();
regs->pc += 4; /* because sys_sigreturn decrements the pc */
if (do_signal(regs, &saveset)) {
/* pc now points at signal handler. Need to decrement
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c
index 160db1003cf..983e0792d5f 100644
--- a/arch/sh/kernel/smp.c
+++ b/arch/sh/kernel/smp.c
@@ -122,7 +122,9 @@ int __cpuinit __cpu_up(unsigned int cpu)
stack_start.bss_start = 0; /* don't clear bss for secondary cpus */
stack_start.start_kernel_fn = start_secondary;
- flush_cache_all();
+ flush_icache_range((unsigned long)&stack_start,
+ (unsigned long)&stack_start + sizeof(stack_start));
+ wmb();
plat_start_cpu(cpu, (unsigned long)_stext);
diff --git a/arch/sh/kernel/topology.c b/arch/sh/kernel/topology.c
index 0838942b708..9b0b633b6c9 100644
--- a/arch/sh/kernel/topology.c
+++ b/arch/sh/kernel/topology.c
@@ -16,6 +16,32 @@
static DEFINE_PER_CPU(struct cpu, cpu_devices);
+cpumask_t cpu_core_map[NR_CPUS];
+
+static cpumask_t cpu_coregroup_map(unsigned int cpu)
+{
+ /*
+ * Presently all SH-X3 SMP cores are multi-cores, so just keep it
+ * simple until we have a method for determining topology..
+ */
+ return cpu_possible_map;
+}
+
+const struct cpumask *cpu_coregroup_mask(unsigned int cpu)
+{
+ return &cpu_core_map[cpu];
+}
+
+int arch_update_cpu_topology(void)
+{
+ unsigned int cpu;
+
+ for_each_possible_cpu(cpu)
+ cpu_core_map[cpu] = cpu_coregroup_map(cpu);
+
+ return 0;
+}
+
static int __init topology_init(void)
{
int i, ret;
diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c
index a8396f36bd1..7b036339dc9 100644
--- a/arch/sh/kernel/traps.c
+++ b/arch/sh/kernel/traps.c
@@ -9,8 +9,8 @@
#include <asm/unwinder.h>
#include <asm/system.h>
-#ifdef CONFIG_BUG
-void handle_BUG(struct pt_regs *regs)
+#ifdef CONFIG_GENERIC_BUG
+static void handle_BUG(struct pt_regs *regs)
{
const struct bug_entry *bug;
unsigned long bugaddr = regs->pc;
@@ -81,7 +81,7 @@ BUILD_TRAP_HANDLER(bug)
SIGTRAP) == NOTIFY_STOP)
return;
-#ifdef CONFIG_BUG
+#ifdef CONFIG_GENERIC_BUG
if (__kernel_text_address(instruction_pointer(regs))) {
insn_size_t insn = *(insn_size_t *)instruction_pointer(regs);
if (insn == TRAPA_BUG_OPCODE)
@@ -95,9 +95,11 @@ BUILD_TRAP_HANDLER(bug)
BUILD_TRAP_HANDLER(nmi)
{
+ unsigned int cpu = smp_processor_id();
TRAP_HANDLER_DECL;
nmi_enter();
+ nmi_count(cpu)++;
switch (notify_die(DIE_NMI, "NMI", regs, 0, vec & 0xff, SIGINT)) {
case NOTIFY_OK:
diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c
index 7a2ee3a6b8e..3da5a125d88 100644
--- a/arch/sh/kernel/traps_32.c
+++ b/arch/sh/kernel/traps_32.c
@@ -25,6 +25,7 @@
#include <linux/kexec.h>
#include <linux/limits.h>
#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <linux/sysfs.h>
#include <asm/system.h>
#include <asm/uaccess.h>
@@ -68,61 +69,49 @@ static const char *se_usermode_action[] = {
"signal+warn"
};
-static int
-proc_alignment_read(char *page, char **start, off_t off, int count, int *eof,
- void *data)
+static int alignment_proc_show(struct seq_file *m, void *v)
{
- char *p = page;
- int len;
-
- p += sprintf(p, "User:\t\t%lu\n", se_user);
- p += sprintf(p, "System:\t\t%lu\n", se_sys);
- p += sprintf(p, "Half:\t\t%lu\n", se_half);
- p += sprintf(p, "Word:\t\t%lu\n", se_word);
- p += sprintf(p, "DWord:\t\t%lu\n", se_dword);
- p += sprintf(p, "Multi:\t\t%lu\n", se_multi);
- p += sprintf(p, "User faults:\t%i (%s)\n", se_usermode,
+ seq_printf(m, "User:\t\t%lu\n", se_user);
+ seq_printf(m, "System:\t\t%lu\n", se_sys);
+ seq_printf(m, "Half:\t\t%lu\n", se_half);
+ seq_printf(m, "Word:\t\t%lu\n", se_word);
+ seq_printf(m, "DWord:\t\t%lu\n", se_dword);
+ seq_printf(m, "Multi:\t\t%lu\n", se_multi);
+ seq_printf(m, "User faults:\t%i (%s)\n", se_usermode,
se_usermode_action[se_usermode]);
- p += sprintf(p, "Kernel faults:\t%i (fixup%s)\n", se_kernmode_warn,
+ seq_printf(m, "Kernel faults:\t%i (fixup%s)\n", se_kernmode_warn,
se_kernmode_warn ? "+warn" : "");
-
- len = (p - page) - off;
- if (len < 0)
- len = 0;
-
- *eof = (len <= count) ? 1 : 0;
- *start = page + off;
-
- return len;
+ return 0;
}
-static int proc_alignment_write(struct file *file, const char __user *buffer,
- unsigned long count, void *data)
+static int alignment_proc_open(struct inode *inode, struct file *file)
{
- char mode;
-
- if (count > 0) {
- if (get_user(mode, buffer))
- return -EFAULT;
- if (mode >= '0' && mode <= '5')
- se_usermode = mode - '0';
- }
- return count;
+ return single_open(file, alignment_proc_show, NULL);
}
-static int proc_alignment_kern_write(struct file *file, const char __user *buffer,
- unsigned long count, void *data)
+static ssize_t alignment_proc_write(struct file *file,
+ const char __user *buffer, size_t count, loff_t *pos)
{
+ int *data = PDE(file->f_path.dentry->d_inode)->data;
char mode;
if (count > 0) {
if (get_user(mode, buffer))
return -EFAULT;
- if (mode >= '0' && mode <= '1')
- se_kernmode_warn = mode - '0';
+ if (mode >= '0' && mode <= '5')
+ *data = mode - '0';
}
return count;
}
+
+static const struct file_operations alignment_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = alignment_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = alignment_proc_write,
+};
#endif
static void dump_mem(const char *str, unsigned long bottom, unsigned long top)
@@ -945,14 +934,9 @@ void __init trap_init(void)
set_exception_table_evt(0x800, do_reserved_inst);
set_exception_table_evt(0x820, do_illegal_slot_inst);
#elif defined(CONFIG_SH_FPU)
-#ifdef CONFIG_CPU_SUBTYPE_SHX3
- set_exception_table_evt(0xd80, fpu_state_restore_trap_handler);
- set_exception_table_evt(0xda0, fpu_state_restore_trap_handler);
-#else
set_exception_table_evt(0x800, fpu_state_restore_trap_handler);
set_exception_table_evt(0x820, fpu_state_restore_trap_handler);
#endif
-#endif
#ifdef CONFIG_CPU_SH2
set_exception_table_vec(TRAP_ADDRESS_ERROR, address_error_trap_handler);
@@ -1011,20 +995,16 @@ static int __init alignment_init(void)
if (!dir)
return -ENOMEM;
- res = create_proc_entry("alignment", S_IWUSR | S_IRUGO, dir);
+ res = proc_create_data("alignment", S_IWUSR | S_IRUGO, dir,
+ &alignment_proc_fops, &se_usermode);
if (!res)
return -ENOMEM;
- res->read_proc = proc_alignment_read;
- res->write_proc = proc_alignment_write;
-
- res = create_proc_entry("kernel_alignment", S_IWUSR | S_IRUGO, dir);
+ res = proc_create_data("kernel_alignment", S_IWUSR | S_IRUGO, dir,
+ &alignment_proc_fops, &se_kernmode_warn);
if (!res)
return -ENOMEM;
- res->read_proc = proc_alignment_read;
- res->write_proc = proc_alignment_kern_write;
-
return 0;
}
diff --git a/arch/sh/lib/Makefile b/arch/sh/lib/Makefile
index a969b47c546..dab4d212981 100644
--- a/arch/sh/lib/Makefile
+++ b/arch/sh/lib/Makefile
@@ -2,7 +2,7 @@
# Makefile for SuperH-specific library files..
#
-lib-y = delay.o memset.o memmove.o memchr.o \
+lib-y = delay.o memmove.o memchr.o \
checksum.o strlen.o div64.o div64-generic.o
# Extracted from libgcc
@@ -23,8 +23,11 @@ obj-y += io.o
memcpy-y := memcpy.o
memcpy-$(CONFIG_CPU_SH4) := memcpy-sh4.o
+memset-y := memset.o
+memset-$(CONFIG_CPU_SH4) := memset-sh4.o
+
lib-$(CONFIG_MMU) += copy_page.o __clear_user.o
lib-$(CONFIG_MCOUNT) += mcount.o
-lib-y += $(memcpy-y) $(udivsi3-y)
+lib-y += $(memcpy-y) $(memset-y) $(udivsi3-y)
EXTRA_CFLAGS += -Werror
diff --git a/arch/sh/lib/memset-sh4.S b/arch/sh/lib/memset-sh4.S
new file mode 100644
index 00000000000..1a6e32cc4e4
--- /dev/null
+++ b/arch/sh/lib/memset-sh4.S
@@ -0,0 +1,107 @@
+/*
+ * "memset" implementation for SH4
+ *
+ * Copyright (C) 1999 Niibe Yutaka
+ * Copyright (c) 2009 STMicroelectronics Limited
+ * Author: Stuart Menefy <stuart.menefy:st.com>
+ */
+
+/*
+ * void *memset(void *s, int c, size_t n);
+ */
+
+#include <linux/linkage.h>
+
+ENTRY(memset)
+ mov #12,r0
+ add r6,r4
+ cmp/gt r6,r0
+ bt/s 40f ! if it's too small, set a byte at once
+ mov r4,r0
+ and #3,r0
+ cmp/eq #0,r0
+ bt/s 2f ! It's aligned
+ sub r0,r6
+1:
+ dt r0
+ bf/s 1b
+ mov.b r5,@-r4
+2: ! make VVVV
+ extu.b r5,r5
+ swap.b r5,r0 ! V0
+ or r0,r5 ! VV
+ swap.w r5,r0 ! VV00
+ or r0,r5 ! VVVV
+
+ ! Check if enough bytes need to be copied to be worth the big loop
+ mov #0x40, r0 ! (MT)
+ cmp/gt r6,r0 ! (MT) 64 > len => slow loop
+
+ bt/s 22f
+ mov r6,r0
+
+ ! align the dst to the cache block size if necessary
+ mov r4, r3
+ mov #~(0x1f), r1
+
+ and r3, r1
+ cmp/eq r3, r1
+
+ bt/s 11f ! dst is already aligned
+ sub r1, r3 ! r3-r1 -> r3
+ shlr2 r3 ! number of loops
+
+10: mov.l r5,@-r4
+ dt r3
+ bf/s 10b
+ add #-4, r6
+
+11: ! dst is 32byte aligned
+ mov r6,r2
+ mov #-5,r0
+ shld r0,r2 ! number of loops
+
+ add #-32, r4
+ mov r5, r0
+12:
+ movca.l r0,@r4
+ mov.l r5,@(4, r4)
+ mov.l r5,@(8, r4)
+ mov.l r5,@(12,r4)
+ mov.l r5,@(16,r4)
+ mov.l r5,@(20,r4)
+ add #-0x20, r6
+ mov.l r5,@(24,r4)
+ dt r2
+ mov.l r5,@(28,r4)
+ bf/s 12b
+ add #-32, r4
+
+ add #32, r4
+ mov #8, r0
+ cmp/ge r0, r6
+ bf 40f
+
+ mov r6,r0
+22:
+ shlr2 r0
+ shlr r0 ! r0 = r6 >> 3
+3:
+ dt r0
+ mov.l r5,@-r4 ! set 8-byte at once
+ bf/s 3b
+ mov.l r5,@-r4
+ !
+ mov #7,r0
+ and r0,r6
+
+ ! fill bytes (length may be zero)
+40: tst r6,r6
+ bt 5f
+4:
+ dt r6
+ bf/s 4b
+ mov.b r5,@-r4
+5:
+ rts
+ mov r4,r0
diff --git a/arch/sh/math-emu/math.c b/arch/sh/math-emu/math.c
index ac2d7abd256..d6c15cae091 100644
--- a/arch/sh/math-emu/math.c
+++ b/arch/sh/math-emu/math.c
@@ -558,7 +558,7 @@ static int ieee_fpe_handler(struct pt_regs *regs)
(finsn >> 8) & 0xf);
tsk->thread.fpu.hard.fpscr &=
~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK);
- set_tsk_thread_flag(tsk, TIF_USEDFPU);
+ task_thread_info(tsk)->status |= TS_USEDFPU;
} else {
info.si_signo = SIGFPE;
info.si_errno = 0;
@@ -619,10 +619,10 @@ int do_fpu_inst(unsigned short inst, struct pt_regs *regs)
struct task_struct *tsk = current;
struct sh_fpu_soft_struct *fpu = &(tsk->thread.fpu.soft);
- if (!test_tsk_thread_flag(tsk, TIF_USEDFPU)) {
+ if (!(task_thread_info(tsk)->status & TS_USEDFPU)) {
/* initialize once. */
fpu_init(fpu);
- set_tsk_thread_flag(tsk, TIF_USEDFPU);
+ task_thread_info(tsk)->status |= TS_USEDFPU;
}
return fpu_emulate(inst, fpu, regs);
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig
index 7f7b52f9beb..0e7ba8e891c 100644
--- a/arch/sh/mm/Kconfig
+++ b/arch/sh/mm/Kconfig
@@ -82,8 +82,7 @@ config 32BIT
config PMB_ENABLE
bool "Support 32-bit physical addressing through PMB"
- depends on MMU && EXPERIMENTAL && (CPU_SUBTYPE_SH7757 || CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785)
- select 32BIT
+ depends on MMU && EXPERIMENTAL && CPU_SH4A
default y
help
If you say Y here, physical addressing will be extended to
@@ -97,8 +96,7 @@ choice
config PMB
bool "PMB"
- depends on MMU && EXPERIMENTAL && (CPU_SUBTYPE_SH7757 || CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785)
- select 32BIT
+ depends on MMU && EXPERIMENTAL && CPU_SH4A
help
If you say Y here, physical addressing will be extended to
32-bits through the SH-4A PMB. If this is not set, legacy
@@ -106,9 +104,7 @@ config PMB
config PMB_FIXED
bool "fixed PMB"
- depends on MMU && EXPERIMENTAL && (CPU_SUBTYPE_SH7757 || \
- CPU_SUBTYPE_SH7780 || \
- CPU_SUBTYPE_SH7785)
+ depends on MMU && EXPERIMENTAL && CPU_SH4A
select 32BIT
help
If this option is enabled, fixed PMB mappings are inherited
@@ -258,6 +254,15 @@ endchoice
source "mm/Kconfig"
+config SCHED_MC
+ bool "Multi-core scheduler support"
+ depends on SMP
+ default y
+ help
+ Multi-core scheduler support improves the CPU scheduler's decision
+ making when dealing with multi-core CPU chips at a cost of slightly
+ increased overhead in some places. If unsure say N here.
+
endmenu
menu "Cache configuration"
diff --git a/arch/sh/mm/Makefile b/arch/sh/mm/Makefile
index 3759bf85329..8a70535fa7c 100644
--- a/arch/sh/mm/Makefile
+++ b/arch/sh/mm/Makefile
@@ -33,8 +33,7 @@ obj-y += $(tlb-y)
endif
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
-obj-$(CONFIG_PMB) += pmb.o
-obj-$(CONFIG_PMB_FIXED) += pmb-fixed.o
+obj-$(CONFIG_PMB_ENABLE) += pmb.o
obj-$(CONFIG_NUMA) += numa.o
# Special flags for fault_64.o. This puts restrictions on the number of
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c
index b7f235c74d6..f36a08bf3d5 100644
--- a/arch/sh/mm/cache-sh4.c
+++ b/arch/sh/mm/cache-sh4.c
@@ -2,7 +2,7 @@
* arch/sh/mm/cache-sh4.c
*
* Copyright (C) 1999, 2000, 2002 Niibe Yutaka
- * Copyright (C) 2001 - 2007 Paul Mundt
+ * Copyright (C) 2001 - 2009 Paul Mundt
* Copyright (C) 2003 Richard Curnow
* Copyright (c) 2007 STMicroelectronics (R&D) Ltd.
*
@@ -15,6 +15,8 @@
#include <linux/io.h>
#include <linux/mutex.h>
#include <linux/fs.h>
+#include <linux/highmem.h>
+#include <asm/pgtable.h>
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
@@ -23,21 +25,12 @@
* flushing. Anything exceeding this will simply flush the dcache in its
* entirety.
*/
-#define MAX_DCACHE_PAGES 64 /* XXX: Tune for ways */
#define MAX_ICACHE_PAGES 32
static void __flush_cache_one(unsigned long addr, unsigned long phys,
unsigned long exec_offset);
/*
- * This is initialised here to ensure that it is not placed in the BSS. If
- * that were to happen, note that cache_init gets called before the BSS is
- * cleared, so this would get nulled out which would be hopeless.
- */
-static void (*__flush_dcache_segment_fn)(unsigned long, unsigned long) =
- (void (*)(unsigned long, unsigned long))0xdeadbeef;
-
-/*
* Write back the range of D-cache, and purge the I-cache.
*
* Called from kernel/module.c:sys_init_module and routine for a.out format,
@@ -97,15 +90,15 @@ static inline void flush_cache_one(unsigned long start, unsigned long phys)
unsigned long flags, exec_offset = 0;
/*
- * All types of SH-4 require PC to be in P2 to operate on the I-cache.
- * Some types of SH-4 require PC to be in P2 to operate on the D-cache.
+ * All types of SH-4 require PC to be uncached to operate on the I-cache.
+ * Some types of SH-4 require PC to be uncached to operate on the D-cache.
*/
if ((boot_cpu_data.flags & CPU_HAS_P2_FLUSH_BUG) ||
(start < CACHE_OC_ADDRESS_ARRAY))
- exec_offset = 0x20000000;
+ exec_offset = cached_to_uncached;
local_irq_save(flags);
- __flush_cache_one(start | SH_CACHE_ASSOC, P1SEGADDR(phys), exec_offset);
+ __flush_cache_one(start, phys, exec_offset);
local_irq_restore(flags);
}
@@ -124,7 +117,7 @@ static void sh4_flush_dcache_page(void *arg)
else
#endif
{
- unsigned long phys = PHYSADDR(page_address(page));
+ unsigned long phys = page_to_phys(page);
unsigned long addr = CACHE_OC_ADDRESS_ARRAY;
int i, n;
@@ -159,10 +152,27 @@ static void __uses_jump_to_uncached flush_icache_all(void)
local_irq_restore(flags);
}
-static inline void flush_dcache_all(void)
+static void flush_dcache_all(void)
{
- (*__flush_dcache_segment_fn)(0UL, boot_cpu_data.dcache.way_size);
- wmb();
+ unsigned long addr, end_addr, entry_offset;
+
+ end_addr = CACHE_OC_ADDRESS_ARRAY +
+ (current_cpu_data.dcache.sets <<
+ current_cpu_data.dcache.entry_shift) *
+ current_cpu_data.dcache.ways;
+
+ entry_offset = 1 << current_cpu_data.dcache.entry_shift;
+
+ for (addr = CACHE_OC_ADDRESS_ARRAY; addr < end_addr; ) {
+ __raw_writel(0, addr); addr += entry_offset;
+ __raw_writel(0, addr); addr += entry_offset;
+ __raw_writel(0, addr); addr += entry_offset;
+ __raw_writel(0, addr); addr += entry_offset;
+ __raw_writel(0, addr); addr += entry_offset;
+ __raw_writel(0, addr); addr += entry_offset;
+ __raw_writel(0, addr); addr += entry_offset;
+ __raw_writel(0, addr); addr += entry_offset;
+ }
}
static void sh4_flush_cache_all(void *unused)
@@ -171,89 +181,13 @@ static void sh4_flush_cache_all(void *unused)
flush_icache_all();
}
-static void __flush_cache_mm(struct mm_struct *mm, unsigned long start,
- unsigned long end)
-{
- unsigned long d = 0, p = start & PAGE_MASK;
- unsigned long alias_mask = boot_cpu_data.dcache.alias_mask;
- unsigned long n_aliases = boot_cpu_data.dcache.n_aliases;
- unsigned long select_bit;
- unsigned long all_aliases_mask;
- unsigned long addr_offset;
- pgd_t *dir;
- pmd_t *pmd;
- pud_t *pud;
- pte_t *pte;
- int i;
-
- dir = pgd_offset(mm, p);
- pud = pud_offset(dir, p);
- pmd = pmd_offset(pud, p);
- end = PAGE_ALIGN(end);
-
- all_aliases_mask = (1 << n_aliases) - 1;
-
- do {
- if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) {
- p &= PMD_MASK;
- p += PMD_SIZE;
- pmd++;
-
- continue;
- }
-
- pte = pte_offset_kernel(pmd, p);
-
- do {
- unsigned long phys;
- pte_t entry = *pte;
-
- if (!(pte_val(entry) & _PAGE_PRESENT)) {
- pte++;
- p += PAGE_SIZE;
- continue;
- }
-
- phys = pte_val(entry) & PTE_PHYS_MASK;
-
- if ((p ^ phys) & alias_mask) {
- d |= 1 << ((p & alias_mask) >> PAGE_SHIFT);
- d |= 1 << ((phys & alias_mask) >> PAGE_SHIFT);
-
- if (d == all_aliases_mask)
- goto loop_exit;
- }
-
- pte++;
- p += PAGE_SIZE;
- } while (p < end && ((unsigned long)pte & ~PAGE_MASK));
- pmd++;
- } while (p < end);
-
-loop_exit:
- addr_offset = 0;
- select_bit = 1;
-
- for (i = 0; i < n_aliases; i++) {
- if (d & select_bit) {
- (*__flush_dcache_segment_fn)(addr_offset, PAGE_SIZE);
- wmb();
- }
-
- select_bit <<= 1;
- addr_offset += PAGE_SIZE;
- }
-}
-
/*
* Note : (RPC) since the caches are physically tagged, the only point
* of flush_cache_mm for SH-4 is to get rid of aliases from the
* D-cache. The assumption elsewhere, e.g. flush_cache_range, is that
* lines can stay resident so long as the virtual address they were
* accessed with (hence cache set) is in accord with the physical
- * address (i.e. tag). It's no different here. So I reckon we don't
- * need to flush the I-cache, since aliases don't matter for that. We
- * should try that.
+ * address (i.e. tag). It's no different here.
*
* Caller takes mm->mmap_sem.
*/
@@ -264,33 +198,7 @@ static void sh4_flush_cache_mm(void *arg)
if (cpu_context(smp_processor_id(), mm) == NO_CONTEXT)
return;
- /*
- * If cache is only 4k-per-way, there are never any 'aliases'. Since
- * the cache is physically tagged, the data can just be left in there.
- */
- if (boot_cpu_data.dcache.n_aliases == 0)
- return;
-
- /*
- * Don't bother groveling around the dcache for the VMA ranges
- * if there are too many PTEs to make it worthwhile.
- */
- if (mm->nr_ptes >= MAX_DCACHE_PAGES)
- flush_dcache_all();
- else {
- struct vm_area_struct *vma;
-
- /*
- * In this case there are reasonably sized ranges to flush,
- * iterate through the VMA list and take care of any aliases.
- */
- for (vma = mm->mmap; vma; vma = vma->vm_next)
- __flush_cache_mm(mm, vma->vm_start, vma->vm_end);
- }
-
- /* Only touch the icache if one of the VMAs has VM_EXEC set. */
- if (mm->exec_vm)
- flush_icache_all();
+ flush_dcache_all();
}
/*
@@ -303,44 +211,63 @@ static void sh4_flush_cache_page(void *args)
{
struct flusher_data *data = args;
struct vm_area_struct *vma;
+ struct page *page;
unsigned long address, pfn, phys;
- unsigned int alias_mask;
+ int map_coherent = 0;
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+ void *vaddr;
vma = data->vma;
- address = data->addr1;
+ address = data->addr1 & PAGE_MASK;
pfn = data->addr2;
phys = pfn << PAGE_SHIFT;
+ page = pfn_to_page(pfn);
if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT)
return;
- alias_mask = boot_cpu_data.dcache.alias_mask;
-
- /* We only need to flush D-cache when we have alias */
- if ((address^phys) & alias_mask) {
- /* Loop 4K of the D-cache */
- flush_cache_one(
- CACHE_OC_ADDRESS_ARRAY | (address & alias_mask),
- phys);
- /* Loop another 4K of the D-cache */
- flush_cache_one(
- CACHE_OC_ADDRESS_ARRAY | (phys & alias_mask),
- phys);
- }
+ pgd = pgd_offset(vma->vm_mm, address);
+ pud = pud_offset(pgd, address);
+ pmd = pmd_offset(pud, address);
+ pte = pte_offset_kernel(pmd, address);
+
+ /* If the page isn't present, there is nothing to do here. */
+ if (!(pte_val(*pte) & _PAGE_PRESENT))
+ return;
- alias_mask = boot_cpu_data.icache.alias_mask;
- if (vma->vm_flags & VM_EXEC) {
+ if ((vma->vm_mm == current->active_mm))
+ vaddr = NULL;
+ else {
/*
- * Evict entries from the portion of the cache from which code
- * may have been executed at this address (virtual). There's
- * no need to evict from the portion corresponding to the
- * physical address as for the D-cache, because we know the
- * kernel has never executed the code through its identity
- * translation.
+ * Use kmap_coherent or kmap_atomic to do flushes for
+ * another ASID than the current one.
*/
- flush_cache_one(
- CACHE_IC_ADDRESS_ARRAY | (address & alias_mask),
- phys);
+ map_coherent = (current_cpu_data.dcache.n_aliases &&
+ !test_bit(PG_dcache_dirty, &page->flags) &&
+ page_mapped(page));
+ if (map_coherent)
+ vaddr = kmap_coherent(page, address);
+ else
+ vaddr = kmap_atomic(page, KM_USER0);
+
+ address = (unsigned long)vaddr;
+ }
+
+ if (pages_do_alias(address, phys))
+ flush_cache_one(CACHE_OC_ADDRESS_ARRAY |
+ (address & shm_align_mask), phys);
+
+ if (vma->vm_flags & VM_EXEC)
+ flush_icache_all();
+
+ if (vaddr) {
+ if (map_coherent)
+ kunmap_coherent(vaddr);
+ else
+ kunmap_atomic(vaddr, KM_USER0);
}
}
@@ -373,24 +300,10 @@ static void sh4_flush_cache_range(void *args)
if (boot_cpu_data.dcache.n_aliases == 0)
return;
- /*
- * Don't bother with the lookup and alias check if we have a
- * wide range to cover, just blow away the dcache in its
- * entirety instead. -- PFM.
- */
- if (((end - start) >> PAGE_SHIFT) >= MAX_DCACHE_PAGES)
- flush_dcache_all();
- else
- __flush_cache_mm(vma->vm_mm, start, end);
+ flush_dcache_all();
- if (vma->vm_flags & VM_EXEC) {
- /*
- * TODO: Is this required??? Need to look at how I-cache
- * coherency is assured when new programs are loaded to see if
- * this matters.
- */
+ if (vma->vm_flags & VM_EXEC)
flush_icache_all();
- }
}
/**
@@ -464,245 +377,6 @@ static void __flush_cache_one(unsigned long addr, unsigned long phys,
} while (--way_count != 0);
}
-/*
- * Break the 1, 2 and 4 way variants of this out into separate functions to
- * avoid nearly all the overhead of having the conditional stuff in the function
- * bodies (+ the 1 and 2 way cases avoid saving any registers too).
- *
- * We want to eliminate unnecessary bus transactions, so this code uses
- * a non-obvious technique.
- *
- * Loop over a cache way sized block of, one cache line at a time. For each
- * line, use movca.a to cause the current cache line contents to be written
- * back, but without reading anything from main memory. However this has the
- * side effect that the cache is now caching that memory location. So follow
- * this with a cache invalidate to mark the cache line invalid. And do all
- * this with interrupts disabled, to avoid the cache line being accidently
- * evicted while it is holding garbage.
- *
- * This also breaks in a number of circumstances:
- * - if there are modifications to the region of memory just above
- * empty_zero_page (for example because a breakpoint has been placed
- * there), then these can be lost.
- *
- * This is because the the memory address which the cache temporarily
- * caches in the above description is empty_zero_page. So the
- * movca.l hits the cache (it is assumed that it misses, or at least
- * isn't dirty), modifies the line and then invalidates it, losing the
- * required change.
- *
- * - If caches are disabled or configured in write-through mode, then
- * the movca.l writes garbage directly into memory.
- */
-static void __flush_dcache_segment_writethrough(unsigned long start,
- unsigned long extent_per_way)
-{
- unsigned long addr;
- int i;
-
- addr = CACHE_OC_ADDRESS_ARRAY | (start & cpu_data->dcache.entry_mask);
-
- while (extent_per_way) {
- for (i = 0; i < cpu_data->dcache.ways; i++)
- __raw_writel(0, addr + cpu_data->dcache.way_incr * i);
-
- addr += cpu_data->dcache.linesz;
- extent_per_way -= cpu_data->dcache.linesz;
- }
-}
-
-static void __flush_dcache_segment_1way(unsigned long start,
- unsigned long extent_per_way)
-{
- unsigned long orig_sr, sr_with_bl;
- unsigned long base_addr;
- unsigned long way_incr, linesz, way_size;
- struct cache_info *dcache;
- register unsigned long a0, a0e;
-
- asm volatile("stc sr, %0" : "=r" (orig_sr));
- sr_with_bl = orig_sr | (1<<28);
- base_addr = ((unsigned long)&empty_zero_page[0]);
-
- /*
- * The previous code aligned base_addr to 16k, i.e. the way_size of all
- * existing SH-4 D-caches. Whilst I don't see a need to have this
- * aligned to any better than the cache line size (which it will be
- * anyway by construction), let's align it to at least the way_size of
- * any existing or conceivable SH-4 D-cache. -- RPC
- */
- base_addr = ((base_addr >> 16) << 16);
- base_addr |= start;
-
- dcache = &boot_cpu_data.dcache;
- linesz = dcache->linesz;
- way_incr = dcache->way_incr;
- way_size = dcache->way_size;
-
- a0 = base_addr;
- a0e = base_addr + extent_per_way;
- do {
- asm volatile("ldc %0, sr" : : "r" (sr_with_bl));
- asm volatile("movca.l r0, @%0\n\t"
- "ocbi @%0" : : "r" (a0));
- a0 += linesz;
- asm volatile("movca.l r0, @%0\n\t"
- "ocbi @%0" : : "r" (a0));
- a0 += linesz;
- asm volatile("movca.l r0, @%0\n\t"
- "ocbi @%0" : : "r" (a0));
- a0 += linesz;
- asm volatile("movca.l r0, @%0\n\t"
- "ocbi @%0" : : "r" (a0));
- asm volatile("ldc %0, sr" : : "r" (orig_sr));
- a0 += linesz;
- } while (a0 < a0e);
-}
-
-static void __flush_dcache_segment_2way(unsigned long start,
- unsigned long extent_per_way)
-{
- unsigned long orig_sr, sr_with_bl;
- unsigned long base_addr;
- unsigned long way_incr, linesz, way_size;
- struct cache_info *dcache;
- register unsigned long a0, a1, a0e;
-
- asm volatile("stc sr, %0" : "=r" (orig_sr));
- sr_with_bl = orig_sr | (1<<28);
- base_addr = ((unsigned long)&empty_zero_page[0]);
-
- /* See comment under 1-way above */
- base_addr = ((base_addr >> 16) << 16);
- base_addr |= start;
-
- dcache = &boot_cpu_data.dcache;
- linesz = dcache->linesz;
- way_incr = dcache->way_incr;
- way_size = dcache->way_size;
-
- a0 = base_addr;
- a1 = a0 + way_incr;
- a0e = base_addr + extent_per_way;
- do {
- asm volatile("ldc %0, sr" : : "r" (sr_with_bl));
- asm volatile("movca.l r0, @%0\n\t"
- "movca.l r0, @%1\n\t"
- "ocbi @%0\n\t"
- "ocbi @%1" : :
- "r" (a0), "r" (a1));
- a0 += linesz;
- a1 += linesz;
- asm volatile("movca.l r0, @%0\n\t"
- "movca.l r0, @%1\n\t"
- "ocbi @%0\n\t"
- "ocbi @%1" : :
- "r" (a0), "r" (a1));
- a0 += linesz;
- a1 += linesz;
- asm volatile("movca.l r0, @%0\n\t"
- "movca.l r0, @%1\n\t"
- "ocbi @%0\n\t"
- "ocbi @%1" : :
- "r" (a0), "r" (a1));
- a0 += linesz;
- a1 += linesz;
- asm volatile("movca.l r0, @%0\n\t"
- "movca.l r0, @%1\n\t"
- "ocbi @%0\n\t"
- "ocbi @%1" : :
- "r" (a0), "r" (a1));
- asm volatile("ldc %0, sr" : : "r" (orig_sr));
- a0 += linesz;
- a1 += linesz;
- } while (a0 < a0e);
-}
-
-static void __flush_dcache_segment_4way(unsigned long start,
- unsigned long extent_per_way)
-{
- unsigned long orig_sr, sr_with_bl;
- unsigned long base_addr;
- unsigned long way_incr, linesz, way_size;
- struct cache_info *dcache;
- register unsigned long a0, a1, a2, a3, a0e;
-
- asm volatile("stc sr, %0" : "=r" (orig_sr));
- sr_with_bl = orig_sr | (1<<28);
- base_addr = ((unsigned long)&empty_zero_page[0]);
-
- /* See comment under 1-way above */
- base_addr = ((base_addr >> 16) << 16);
- base_addr |= start;
-
- dcache = &boot_cpu_data.dcache;
- linesz = dcache->linesz;
- way_incr = dcache->way_incr;
- way_size = dcache->way_size;
-
- a0 = base_addr;
- a1 = a0 + way_incr;
- a2 = a1 + way_incr;
- a3 = a2 + way_incr;
- a0e = base_addr + extent_per_way;
- do {
- asm volatile("ldc %0, sr" : : "r" (sr_with_bl));
- asm volatile("movca.l r0, @%0\n\t"
- "movca.l r0, @%1\n\t"
- "movca.l r0, @%2\n\t"
- "movca.l r0, @%3\n\t"
- "ocbi @%0\n\t"
- "ocbi @%1\n\t"
- "ocbi @%2\n\t"
- "ocbi @%3\n\t" : :
- "r" (a0), "r" (a1), "r" (a2), "r" (a3));
- a0 += linesz;
- a1 += linesz;
- a2 += linesz;
- a3 += linesz;
- asm volatile("movca.l r0, @%0\n\t"
- "movca.l r0, @%1\n\t"
- "movca.l r0, @%2\n\t"
- "movca.l r0, @%3\n\t"
- "ocbi @%0\n\t"
- "ocbi @%1\n\t"
- "ocbi @%2\n\t"
- "ocbi @%3\n\t" : :
- "r" (a0), "r" (a1), "r" (a2), "r" (a3));
- a0 += linesz;
- a1 += linesz;
- a2 += linesz;
- a3 += linesz;
- asm volatile("movca.l r0, @%0\n\t"
- "movca.l r0, @%1\n\t"
- "movca.l r0, @%2\n\t"
- "movca.l r0, @%3\n\t"
- "ocbi @%0\n\t"
- "ocbi @%1\n\t"
- "ocbi @%2\n\t"
- "ocbi @%3\n\t" : :
- "r" (a0), "r" (a1), "r" (a2), "r" (a3));
- a0 += linesz;
- a1 += linesz;
- a2 += linesz;
- a3 += linesz;
- asm volatile("movca.l r0, @%0\n\t"
- "movca.l r0, @%1\n\t"
- "movca.l r0, @%2\n\t"
- "movca.l r0, @%3\n\t"
- "ocbi @%0\n\t"
- "ocbi @%1\n\t"
- "ocbi @%2\n\t"
- "ocbi @%3\n\t" : :
- "r" (a0), "r" (a1), "r" (a2), "r" (a3));
- asm volatile("ldc %0, sr" : : "r" (orig_sr));
- a0 += linesz;
- a1 += linesz;
- a2 += linesz;
- a3 += linesz;
- } while (a0 < a0e);
-}
-
extern void __weak sh4__flush_region_init(void);
/*
@@ -710,32 +384,11 @@ extern void __weak sh4__flush_region_init(void);
*/
void __init sh4_cache_init(void)
{
- unsigned int wt_enabled = !!(__raw_readl(CCR) & CCR_CACHE_WT);
-
printk("PVR=%08x CVR=%08x PRR=%08x\n",
ctrl_inl(CCN_PVR),
ctrl_inl(CCN_CVR),
ctrl_inl(CCN_PRR));
- if (wt_enabled)
- __flush_dcache_segment_fn = __flush_dcache_segment_writethrough;
- else {
- switch (boot_cpu_data.dcache.ways) {
- case 1:
- __flush_dcache_segment_fn = __flush_dcache_segment_1way;
- break;
- case 2:
- __flush_dcache_segment_fn = __flush_dcache_segment_2way;
- break;
- case 4:
- __flush_dcache_segment_fn = __flush_dcache_segment_4way;
- break;
- default:
- panic("unknown number of cache ways\n");
- break;
- }
- }
-
local_flush_icache_range = sh4_flush_icache_range;
local_flush_dcache_page = sh4_flush_dcache_page;
local_flush_cache_all = sh4_flush_cache_all;
diff --git a/arch/sh/mm/cache-sh5.c b/arch/sh/mm/cache-sh5.c
index 467ff8e260f..eb4cc4ec795 100644
--- a/arch/sh/mm/cache-sh5.c
+++ b/arch/sh/mm/cache-sh5.c
@@ -563,7 +563,7 @@ static void sh5_flush_cache_page(void *args)
static void sh5_flush_dcache_page(void *page)
{
- sh64_dcache_purge_phy_page(page_to_phys(page));
+ sh64_dcache_purge_phy_page(page_to_phys((struct page *)page));
wmb();
}
diff --git a/arch/sh/mm/cache-sh7705.c b/arch/sh/mm/cache-sh7705.c
index 2601935eb58..f527fb70fce 100644
--- a/arch/sh/mm/cache-sh7705.c
+++ b/arch/sh/mm/cache-sh7705.c
@@ -141,7 +141,7 @@ static void sh7705_flush_dcache_page(void *arg)
if (mapping && !mapping_mapped(mapping))
set_bit(PG_dcache_dirty, &page->flags);
else
- __flush_dcache_page(PHYSADDR(page_address(page)));
+ __flush_dcache_page(__pa(page_address(page)));
}
static void __uses_jump_to_uncached sh7705_flush_cache_all(void *args)
diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c
index a2dc7f9ecc5..e9415d3ea94 100644
--- a/arch/sh/mm/cache.c
+++ b/arch/sh/mm/cache.c
@@ -27,8 +27,11 @@ void (*local_flush_icache_page)(void *args) = cache_noop;
void (*local_flush_cache_sigtramp)(void *args) = cache_noop;
void (*__flush_wback_region)(void *start, int size);
+EXPORT_SYMBOL(__flush_wback_region);
void (*__flush_purge_region)(void *start, int size);
+EXPORT_SYMBOL(__flush_purge_region);
void (*__flush_invalidate_region)(void *start, int size);
+EXPORT_SYMBOL(__flush_invalidate_region);
static inline void noop__flush_region(void *start, int size)
{
@@ -161,14 +164,21 @@ void flush_cache_all(void)
{
cacheop_on_each_cpu(local_flush_cache_all, NULL, 1);
}
+EXPORT_SYMBOL(flush_cache_all);
void flush_cache_mm(struct mm_struct *mm)
{
+ if (boot_cpu_data.dcache.n_aliases == 0)
+ return;
+
cacheop_on_each_cpu(local_flush_cache_mm, mm, 1);
}
void flush_cache_dup_mm(struct mm_struct *mm)
{
+ if (boot_cpu_data.dcache.n_aliases == 0)
+ return;
+
cacheop_on_each_cpu(local_flush_cache_dup_mm, mm, 1);
}
@@ -195,11 +205,13 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
cacheop_on_each_cpu(local_flush_cache_range, (void *)&data, 1);
}
+EXPORT_SYMBOL(flush_cache_range);
void flush_dcache_page(struct page *page)
{
cacheop_on_each_cpu(local_flush_dcache_page, page, 1);
}
+EXPORT_SYMBOL(flush_dcache_page);
void flush_icache_range(unsigned long start, unsigned long end)
{
@@ -265,7 +277,11 @@ static void __init emit_cache_params(void)
void __init cpu_cache_init(void)
{
- unsigned int cache_disabled = !(__raw_readl(CCR) & CCR_CACHE_ENABLE);
+ unsigned int cache_disabled = 0;
+
+#ifdef CCR
+ cache_disabled = !(__raw_readl(CCR) & CCR_CACHE_ENABLE);
+#endif
compute_alias(&boot_cpu_data.icache);
compute_alias(&boot_cpu_data.dcache);
diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c
index e098ec158dd..902967e3f84 100644
--- a/arch/sh/mm/consistent.c
+++ b/arch/sh/mm/consistent.c
@@ -15,11 +15,15 @@
#include <linux/dma-mapping.h>
#include <linux/dma-debug.h>
#include <linux/io.h>
+#include <linux/module.h>
#include <asm/cacheflush.h>
#include <asm/addrspace.h>
#define PREALLOC_DMA_DEBUG_ENTRIES 4096
+struct dma_map_ops *dma_ops;
+EXPORT_SYMBOL(dma_ops);
+
static int __init dma_init(void)
{
dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
@@ -27,15 +31,12 @@ static int __init dma_init(void)
}
fs_initcall(dma_init);
-void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp)
+void *dma_generic_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp)
{
void *ret, *ret_nocache;
int order = get_order(size);
- if (dma_alloc_from_coherent(dev, size, dma_handle, &ret))
- return ret;
-
ret = (void *)__get_free_pages(gfp, order);
if (!ret)
return NULL;
@@ -57,35 +58,26 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
*dma_handle = virt_to_phys(ret);
- debug_dma_alloc_coherent(dev, size, *dma_handle, ret_nocache);
-
return ret_nocache;
}
-EXPORT_SYMBOL(dma_alloc_coherent);
-void dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle)
+void dma_generic_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle)
{
int order = get_order(size);
unsigned long pfn = dma_handle >> PAGE_SHIFT;
int k;
- WARN_ON(irqs_disabled()); /* for portability */
-
- if (dma_release_from_coherent(dev, order, vaddr))
- return;
-
- debug_dma_free_coherent(dev, size, vaddr, dma_handle);
for (k = 0; k < (1 << order); k++)
__free_pages(pfn_to_page(pfn + k), 0);
+
iounmap(vaddr);
}
-EXPORT_SYMBOL(dma_free_coherent);
void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
{
-#ifdef CONFIG_CPU_SH5
+#if defined(CONFIG_CPU_SH5) || defined(CONFIG_PMB)
void *p1addr = vaddr;
#else
void *p1addr = (void*) P1SEGADDR((unsigned long)vaddr);
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 8173e38afd3..432acd07e76 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -15,6 +15,7 @@
#include <linux/pagemap.h>
#include <linux/percpu.h>
#include <linux/io.h>
+#include <linux/dma-mapping.h>
#include <asm/mmu_context.h>
#include <asm/tlb.h>
#include <asm/cacheflush.h>
@@ -186,11 +187,21 @@ void __init paging_init(void)
set_fixmap_nocache(FIX_UNCACHED, __pa(&__uncached_start));
}
+/*
+ * Early initialization for any I/O MMUs we might have.
+ */
+static void __init iommu_init(void)
+{
+ no_iommu_init();
+}
+
void __init mem_init(void)
{
int codesize, datasize, initsize;
int nid;
+ iommu_init();
+
num_physpages = 0;
high_memory = NULL;
@@ -323,4 +334,12 @@ int memory_add_physaddr_to_nid(u64 addr)
}
EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
#endif
+
#endif /* CONFIG_MEMORY_HOTPLUG */
+
+#ifdef CONFIG_PMB
+int __in_29bit_mode(void)
+{
+ return !(ctrl_inl(PMB_PASCR) & PASCR_SE);
+}
+#endif /* CONFIG_PMB */
diff --git a/arch/sh/mm/kmap.c b/arch/sh/mm/kmap.c
index 16e01b5fed0..15d74ea4209 100644
--- a/arch/sh/mm/kmap.c
+++ b/arch/sh/mm/kmap.c
@@ -39,7 +39,9 @@ void *kmap_coherent(struct page *page, unsigned long addr)
pagefault_disable();
idx = FIX_CMAP_END -
- ((addr & current_cpu_data.dcache.alias_mask) >> PAGE_SHIFT);
+ (((addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1)) +
+ (FIX_N_COLOURS * smp_processor_id()));
+
vaddr = __fix_to_virt(idx);
BUG_ON(!pte_none(*(kmap_coherent_pte - idx)));
diff --git a/arch/sh/mm/numa.c b/arch/sh/mm/numa.c
index 9b784fdb947..6c524446c0f 100644
--- a/arch/sh/mm/numa.c
+++ b/arch/sh/mm/numa.c
@@ -60,7 +60,7 @@ void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end)
unsigned long bootmem_paddr;
/* Don't allow bogus node assignment */
- BUG_ON(nid > MAX_NUMNODES || nid == 0);
+ BUG_ON(nid > MAX_NUMNODES || nid <= 0);
start_pfn = start >> PAGE_SHIFT;
end_pfn = end >> PAGE_SHIFT;
diff --git a/arch/sh/mm/pmb-fixed.c b/arch/sh/mm/pmb-fixed.c
deleted file mode 100644
index 43c8eac4d8a..00000000000
--- a/arch/sh/mm/pmb-fixed.c
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * arch/sh/mm/fixed_pmb.c
- *
- * Copyright (C) 2009 Renesas Solutions Corp.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#include <linux/init.h>
-#include <linux/mm.h>
-#include <linux/io.h>
-#include <asm/mmu.h>
-#include <asm/mmu_context.h>
-
-static int __uses_jump_to_uncached fixed_pmb_init(void)
-{
- int i;
- unsigned long addr, data;
-
- jump_to_uncached();
-
- for (i = 0; i < PMB_ENTRY_MAX; i++) {
- addr = PMB_DATA + (i << PMB_E_SHIFT);
- data = ctrl_inl(addr);
- if (!(data & PMB_V))
- continue;
-
- if (data & PMB_C) {
-#if defined(CONFIG_CACHE_WRITETHROUGH)
- data |= PMB_WT;
-#elif defined(CONFIG_CACHE_WRITEBACK)
- data &= ~PMB_WT;
-#else
- data &= ~(PMB_C | PMB_WT);
-#endif
- }
- ctrl_outl(data, addr);
- }
-
- back_to_cached();
-
- return 0;
-}
-arch_initcall(fixed_pmb_init);
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c
index aade3110211..280f6a16603 100644
--- a/arch/sh/mm/pmb.c
+++ b/arch/sh/mm/pmb.c
@@ -35,29 +35,9 @@
static void __pmb_unmap(struct pmb_entry *);
-static struct kmem_cache *pmb_cache;
+static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES];
static unsigned long pmb_map;
-static struct pmb_entry pmb_init_map[] = {
- /* vpn ppn flags (ub/sz/c/wt) */
-
- /* P1 Section Mappings */
- { 0x80000000, 0x00000000, PMB_SZ_64M | PMB_C, },
- { 0x84000000, 0x04000000, PMB_SZ_64M | PMB_C, },
- { 0x88000000, 0x08000000, PMB_SZ_128M | PMB_C, },
- { 0x90000000, 0x10000000, PMB_SZ_64M | PMB_C, },
- { 0x94000000, 0x14000000, PMB_SZ_64M | PMB_C, },
- { 0x98000000, 0x18000000, PMB_SZ_64M | PMB_C, },
-
- /* P2 Section Mappings */
- { 0xa0000000, 0x00000000, PMB_UB | PMB_SZ_64M | PMB_WT, },
- { 0xa4000000, 0x04000000, PMB_UB | PMB_SZ_64M | PMB_WT, },
- { 0xa8000000, 0x08000000, PMB_UB | PMB_SZ_128M | PMB_WT, },
- { 0xb0000000, 0x10000000, PMB_UB | PMB_SZ_64M | PMB_WT, },
- { 0xb4000000, 0x14000000, PMB_UB | PMB_SZ_64M | PMB_WT, },
- { 0xb8000000, 0x18000000, PMB_UB | PMB_SZ_64M | PMB_WT, },
-};
-
static inline unsigned long mk_pmb_entry(unsigned int entry)
{
return (entry & PMB_E_MASK) << PMB_E_SHIFT;
@@ -73,81 +53,68 @@ static inline unsigned long mk_pmb_data(unsigned int entry)
return mk_pmb_entry(entry) | PMB_DATA;
}
-static DEFINE_SPINLOCK(pmb_list_lock);
-static struct pmb_entry *pmb_list;
-
-static inline void pmb_list_add(struct pmb_entry *pmbe)
+static int pmb_alloc_entry(void)
{
- struct pmb_entry **p, *tmp;
+ unsigned int pos;
- p = &pmb_list;
- while ((tmp = *p) != NULL)
- p = &tmp->next;
+repeat:
+ pos = find_first_zero_bit(&pmb_map, NR_PMB_ENTRIES);
- pmbe->next = tmp;
- *p = pmbe;
-}
+ if (unlikely(pos > NR_PMB_ENTRIES))
+ return -ENOSPC;
-static inline void pmb_list_del(struct pmb_entry *pmbe)
-{
- struct pmb_entry **p, *tmp;
+ if (test_and_set_bit(pos, &pmb_map))
+ goto repeat;
- for (p = &pmb_list; (tmp = *p); p = &tmp->next)
- if (tmp == pmbe) {
- *p = tmp->next;
- return;
- }
+ return pos;
}
-struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
- unsigned long flags)
+static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
+ unsigned long flags, int entry)
{
struct pmb_entry *pmbe;
+ int pos;
+
+ if (entry == PMB_NO_ENTRY) {
+ pos = pmb_alloc_entry();
+ if (pos < 0)
+ return ERR_PTR(pos);
+ } else {
+ if (test_bit(entry, &pmb_map))
+ return ERR_PTR(-ENOSPC);
+ pos = entry;
+ }
- pmbe = kmem_cache_alloc(pmb_cache, GFP_KERNEL);
+ pmbe = &pmb_entry_list[pos];
if (!pmbe)
return ERR_PTR(-ENOMEM);
pmbe->vpn = vpn;
pmbe->ppn = ppn;
pmbe->flags = flags;
-
- spin_lock_irq(&pmb_list_lock);
- pmb_list_add(pmbe);
- spin_unlock_irq(&pmb_list_lock);
+ pmbe->entry = pos;
return pmbe;
}
-void pmb_free(struct pmb_entry *pmbe)
+static void pmb_free(struct pmb_entry *pmbe)
{
- spin_lock_irq(&pmb_list_lock);
- pmb_list_del(pmbe);
- spin_unlock_irq(&pmb_list_lock);
+ int pos = pmbe->entry;
- kmem_cache_free(pmb_cache, pmbe);
+ pmbe->vpn = 0;
+ pmbe->ppn = 0;
+ pmbe->flags = 0;
+ pmbe->entry = 0;
+
+ clear_bit(pos, &pmb_map);
}
/*
* Must be in P2 for __set_pmb_entry()
*/
-int __set_pmb_entry(unsigned long vpn, unsigned long ppn,
- unsigned long flags, int *entry)
+static void __set_pmb_entry(unsigned long vpn, unsigned long ppn,
+ unsigned long flags, int pos)
{
- unsigned int pos = *entry;
-
- if (unlikely(pos == PMB_NO_ENTRY))
- pos = find_first_zero_bit(&pmb_map, NR_PMB_ENTRIES);
-
-repeat:
- if (unlikely(pos > NR_PMB_ENTRIES))
- return -ENOSPC;
-
- if (test_and_set_bit(pos, &pmb_map)) {
- pos = find_first_zero_bit(&pmb_map, NR_PMB_ENTRIES);
- goto repeat;
- }
-
ctrl_outl(vpn | PMB_V, mk_pmb_addr(pos));
#ifdef CONFIG_CACHE_WRITETHROUGH
@@ -161,35 +128,21 @@ repeat:
#endif
ctrl_outl(ppn | flags | PMB_V, mk_pmb_data(pos));
-
- *entry = pos;
-
- return 0;
}
-int __uses_jump_to_uncached set_pmb_entry(struct pmb_entry *pmbe)
+static void __uses_jump_to_uncached set_pmb_entry(struct pmb_entry *pmbe)
{
- int ret;
-
jump_to_uncached();
- ret = __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, &pmbe->entry);
+ __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, pmbe->entry);
back_to_cached();
-
- return ret;
}
-void __uses_jump_to_uncached clear_pmb_entry(struct pmb_entry *pmbe)
+static void __uses_jump_to_uncached clear_pmb_entry(struct pmb_entry *pmbe)
{
unsigned int entry = pmbe->entry;
unsigned long addr;
- /*
- * Don't allow clearing of wired init entries, P1 or P2 access
- * without a corresponding mapping in the PMB will lead to reset
- * by the TLB.
- */
- if (unlikely(entry < ARRAY_SIZE(pmb_init_map) ||
- entry >= NR_PMB_ENTRIES))
+ if (unlikely(entry >= NR_PMB_ENTRIES))
return;
jump_to_uncached();
@@ -202,8 +155,6 @@ void __uses_jump_to_uncached clear_pmb_entry(struct pmb_entry *pmbe)
ctrl_outl(ctrl_inl(addr) & ~PMB_V, addr);
back_to_cached();
-
- clear_bit(entry, &pmb_map);
}
@@ -239,23 +190,17 @@ long pmb_remap(unsigned long vaddr, unsigned long phys,
again:
for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
- int ret;
-
if (size < pmb_sizes[i].size)
continue;
- pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag);
+ pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag,
+ PMB_NO_ENTRY);
if (IS_ERR(pmbe)) {
err = PTR_ERR(pmbe);
goto out;
}
- ret = set_pmb_entry(pmbe);
- if (ret != 0) {
- pmb_free(pmbe);
- err = -EBUSY;
- goto out;
- }
+ set_pmb_entry(pmbe);
phys += pmb_sizes[i].size;
vaddr += pmb_sizes[i].size;
@@ -292,11 +237,16 @@ out:
void pmb_unmap(unsigned long addr)
{
- struct pmb_entry **p, *pmbe;
+ struct pmb_entry *pmbe = NULL;
+ int i;
- for (p = &pmb_list; (pmbe = *p); p = &pmbe->next)
- if (pmbe->vpn == addr)
- break;
+ for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
+ if (test_bit(i, &pmb_map)) {
+ pmbe = &pmb_entry_list[i];
+ if (pmbe->vpn == addr)
+ break;
+ }
+ }
if (unlikely(!pmbe))
return;
@@ -306,13 +256,22 @@ void pmb_unmap(unsigned long addr)
static void __pmb_unmap(struct pmb_entry *pmbe)
{
- WARN_ON(!test_bit(pmbe->entry, &pmb_map));
+ BUG_ON(!test_bit(pmbe->entry, &pmb_map));
do {
struct pmb_entry *pmblink = pmbe;
- if (pmbe->entry != PMB_NO_ENTRY)
- clear_pmb_entry(pmbe);
+ /*
+ * We may be called before this pmb_entry has been
+ * entered into the PMB table via set_pmb_entry(), but
+ * that's OK because we've allocated a unique slot for
+ * this entry in pmb_alloc() (even if we haven't filled
+ * it yet).
+ *
+ * Therefore, calling clear_pmb_entry() is safe as no
+ * other mapping can be using that slot.
+ */
+ clear_pmb_entry(pmbe);
pmbe = pmblink->link;
@@ -320,42 +279,34 @@ static void __pmb_unmap(struct pmb_entry *pmbe)
} while (pmbe);
}
-static void pmb_cache_ctor(void *pmb)
+#ifdef CONFIG_PMB
+int __uses_jump_to_uncached pmb_init(void)
{
- struct pmb_entry *pmbe = pmb;
-
- memset(pmb, 0, sizeof(struct pmb_entry));
-
- pmbe->entry = PMB_NO_ENTRY;
-}
-
-static int __uses_jump_to_uncached pmb_init(void)
-{
- unsigned int nr_entries = ARRAY_SIZE(pmb_init_map);
- unsigned int entry, i;
-
- BUG_ON(unlikely(nr_entries >= NR_PMB_ENTRIES));
-
- pmb_cache = kmem_cache_create("pmb", sizeof(struct pmb_entry), 0,
- SLAB_PANIC, pmb_cache_ctor);
+ unsigned int i;
+ long size, ret;
jump_to_uncached();
/*
- * Ordering is important, P2 must be mapped in the PMB before we
- * can set PMB.SE, and P1 must be mapped before we jump back to
- * P1 space.
+ * Insert PMB entries for the P1 and P2 areas so that, after
+ * we've switched the MMU to 32-bit mode, the semantics of P1
+ * and P2 are the same as in 29-bit mode, e.g.
+ *
+ * P1 - provides a cached window onto physical memory
+ * P2 - provides an uncached window onto physical memory
*/
- for (entry = 0; entry < nr_entries; entry++) {
- struct pmb_entry *pmbe = pmb_init_map + entry;
+ size = __MEMORY_START + __MEMORY_SIZE;
- __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, &entry);
- }
+ ret = pmb_remap(P1SEG, 0x00000000, size, PMB_C);
+ BUG_ON(ret != size);
+
+ ret = pmb_remap(P2SEG, 0x00000000, size, PMB_WT | PMB_UB);
+ BUG_ON(ret != size);
ctrl_outl(0, PMB_IRMCR);
/* PMB.SE and UB[7] */
- ctrl_outl((1 << 31) | (1 << 7), PMB_PASCR);
+ ctrl_outl(PASCR_SE | (1 << 7), PMB_PASCR);
/* Flush out the TLB */
i = ctrl_inl(MMUCR);
@@ -366,7 +317,53 @@ static int __uses_jump_to_uncached pmb_init(void)
return 0;
}
-arch_initcall(pmb_init);
+#else
+int __uses_jump_to_uncached pmb_init(void)
+{
+ int i;
+ unsigned long addr, data;
+
+ jump_to_uncached();
+
+ for (i = 0; i < PMB_ENTRY_MAX; i++) {
+ struct pmb_entry *pmbe;
+ unsigned long vpn, ppn, flags;
+
+ addr = PMB_DATA + (i << PMB_E_SHIFT);
+ data = ctrl_inl(addr);
+ if (!(data & PMB_V))
+ continue;
+
+ if (data & PMB_C) {
+#if defined(CONFIG_CACHE_WRITETHROUGH)
+ data |= PMB_WT;
+#elif defined(CONFIG_CACHE_WRITEBACK)
+ data &= ~PMB_WT;
+#else
+ data &= ~(PMB_C | PMB_WT);
+#endif
+ }
+ ctrl_outl(data, addr);
+
+ ppn = data & PMB_PFN_MASK;
+
+ flags = data & (PMB_C | PMB_WT | PMB_UB);
+ flags |= data & PMB_SZ_MASK;
+
+ addr = PMB_ADDR + (i << PMB_E_SHIFT);
+ data = ctrl_inl(addr);
+
+ vpn = data & PMB_PFN_MASK;
+
+ pmbe = pmb_alloc(vpn, ppn, flags, i);
+ WARN_ON(IS_ERR(pmbe));
+ }
+
+ back_to_cached();
+
+ return 0;
+}
+#endif /* CONFIG_PMB */
static int pmb_seq_show(struct seq_file *file, void *iter)
{
@@ -434,15 +431,18 @@ postcore_initcall(pmb_debugfs_init);
static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state)
{
static pm_message_t prev_state;
+ int i;
/* Restore the PMB after a resume from hibernation */
if (state.event == PM_EVENT_ON &&
prev_state.event == PM_EVENT_FREEZE) {
struct pmb_entry *pmbe;
- spin_lock_irq(&pmb_list_lock);
- for (pmbe = pmb_list; pmbe; pmbe = pmbe->next)
- set_pmb_entry(pmbe);
- spin_unlock_irq(&pmb_list_lock);
+ for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
+ if (test_bit(i, &pmb_map)) {
+ pmbe = &pmb_entry_list[i];
+ set_pmb_entry(pmbe);
+ }
+ }
}
prev_state = state;
return 0;
diff --git a/arch/sh/oprofile/Makefile b/arch/sh/oprofile/Makefile
index 8e6eec91c14..4886c5c1786 100644
--- a/arch/sh/oprofile/Makefile
+++ b/arch/sh/oprofile/Makefile
@@ -7,7 +7,3 @@ DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
timer_int.o )
oprofile-y := $(DRIVER_OBJS) common.o backtrace.o
-
-oprofile-$(CONFIG_CPU_SUBTYPE_SH7750S) += op_model_sh7750.o
-oprofile-$(CONFIG_CPU_SUBTYPE_SH7750) += op_model_sh7750.o
-oprofile-$(CONFIG_CPU_SUBTYPE_SH7091) += op_model_sh7750.o
diff --git a/arch/sh/oprofile/common.c b/arch/sh/oprofile/common.c
index 44f4e31c6d6..ac604937f3e 100644
--- a/arch/sh/oprofile/common.c
+++ b/arch/sh/oprofile/common.c
@@ -20,9 +20,6 @@
#include <asm/processor.h>
#include "op_impl.h"
-extern struct op_sh_model op_model_sh7750_ops __weak;
-extern struct op_sh_model op_model_sh4a_ops __weak;
-
static struct op_sh_model *model;
static struct op_counter_config ctr[20];
@@ -94,33 +91,14 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
*/
ops->backtrace = sh_backtrace;
- switch (current_cpu_data.type) {
- /* SH-4 types */
- case CPU_SH7750:
- case CPU_SH7750S:
- lmodel = &op_model_sh7750_ops;
- break;
-
- /* SH-4A types */
- case CPU_SH7763:
- case CPU_SH7770:
- case CPU_SH7780:
- case CPU_SH7781:
- case CPU_SH7785:
- case CPU_SH7786:
- case CPU_SH7723:
- case CPU_SH7724:
- case CPU_SHX3:
- lmodel = &op_model_sh4a_ops;
- break;
-
- /* SH4AL-DSP types */
- case CPU_SH7343:
- case CPU_SH7722:
- case CPU_SH7366:
- lmodel = &op_model_sh4a_ops;
- break;
- }
+ /*
+ * XXX
+ *
+ * All of the SH7750/SH-4A counters have been converted to perf,
+ * this infrastructure hook is left for other users until they've
+ * had a chance to convert over, at which point all of this
+ * will be deleted.
+ */
if (!lmodel)
return -ENODEV;
diff --git a/arch/sh/oprofile/op_impl.h b/arch/sh/oprofile/op_impl.h
index 4d509975eba..1244479ceb2 100644
--- a/arch/sh/oprofile/op_impl.h
+++ b/arch/sh/oprofile/op_impl.h
@@ -6,7 +6,7 @@ struct op_counter_config {
unsigned long enabled;
unsigned long event;
- unsigned long long count;
+ unsigned long count;
/* Dummy values for userspace tool compliance */
unsigned long kernel;
diff --git a/arch/sh/oprofile/op_model_sh7750.c b/arch/sh/oprofile/op_model_sh7750.c
deleted file mode 100644
index c892c7c30c2..00000000000
--- a/arch/sh/oprofile/op_model_sh7750.c
+++ /dev/null
@@ -1,255 +0,0 @@
-/*
- * arch/sh/oprofile/op_model_sh7750.c
- *
- * OProfile support for SH7750/SH7750S Performance Counters
- *
- * Copyright (C) 2003 - 2008 Paul Mundt
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#include <linux/kernel.h>
-#include <linux/oprofile.h>
-#include <linux/profile.h>
-#include <linux/init.h>
-#include <linux/errno.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/fs.h>
-#include "op_impl.h"
-
-#define PM_CR_BASE 0xff000084 /* 16-bit */
-#define PM_CTR_BASE 0xff100004 /* 32-bit */
-
-#define PMCR(n) (PM_CR_BASE + ((n) * 0x04))
-#define PMCTRH(n) (PM_CTR_BASE + 0x00 + ((n) * 0x08))
-#define PMCTRL(n) (PM_CTR_BASE + 0x04 + ((n) * 0x08))
-
-#define PMCR_PMM_MASK 0x0000003f
-
-#define PMCR_CLKF 0x00000100
-#define PMCR_PMCLR 0x00002000
-#define PMCR_PMST 0x00004000
-#define PMCR_PMEN 0x00008000
-
-struct op_sh_model op_model_sh7750_ops;
-
-#define NR_CNTRS 2
-
-static struct sh7750_ppc_register_config {
- unsigned int ctrl;
- unsigned long cnt_hi;
- unsigned long cnt_lo;
-} regcache[NR_CNTRS];
-
-/*
- * There are a number of events supported by each counter (33 in total).
- * Since we have 2 counters, each counter will take the event code as it
- * corresponds to the PMCR PMM setting. Each counter can be configured
- * independently.
- *
- * Event Code Description
- * ---------- -----------
- *
- * 0x01 Operand read access
- * 0x02 Operand write access
- * 0x03 UTLB miss
- * 0x04 Operand cache read miss
- * 0x05 Operand cache write miss
- * 0x06 Instruction fetch (w/ cache)
- * 0x07 Instruction TLB miss
- * 0x08 Instruction cache miss
- * 0x09 All operand accesses
- * 0x0a All instruction accesses
- * 0x0b OC RAM operand access
- * 0x0d On-chip I/O space access
- * 0x0e Operand access (r/w)
- * 0x0f Operand cache miss (r/w)
- * 0x10 Branch instruction
- * 0x11 Branch taken
- * 0x12 BSR/BSRF/JSR
- * 0x13 Instruction execution
- * 0x14 Instruction execution in parallel
- * 0x15 FPU Instruction execution
- * 0x16 Interrupt
- * 0x17 NMI
- * 0x18 trapa instruction execution
- * 0x19 UBCA match
- * 0x1a UBCB match
- * 0x21 Instruction cache fill
- * 0x22 Operand cache fill
- * 0x23 Elapsed time
- * 0x24 Pipeline freeze by I-cache miss
- * 0x25 Pipeline freeze by D-cache miss
- * 0x27 Pipeline freeze by branch instruction
- * 0x28 Pipeline freeze by CPU register
- * 0x29 Pipeline freeze by FPU
- *
- * Unfortunately we don't have a native exception or interrupt for counter
- * overflow (although since these counters can run for 16.3 days without
- * overflowing, it's not really necessary).
- *
- * OProfile on the other hand likes to have samples taken periodically, so
- * for now we just piggyback the timer interrupt to get the expected
- * behavior.
- */
-
-static int sh7750_timer_notify(struct pt_regs *regs)
-{
- oprofile_add_sample(regs, 0);
- return 0;
-}
-
-static u64 sh7750_read_counter(int counter)
-{
- return (u64)((u64)(__raw_readl(PMCTRH(counter)) & 0xffff) << 32) |
- __raw_readl(PMCTRL(counter));
-}
-
-/*
- * Files will be in a path like:
- *
- * /<oprofilefs mount point>/<counter number>/<file>
- *
- * So when dealing with <file>, we look to the parent dentry for the counter
- * number.
- */
-static inline int to_counter(struct file *file)
-{
- const unsigned char *name = file->f_path.dentry->d_parent->d_name.name;
-
- return (int)simple_strtol(name, NULL, 10);
-}
-
-/*
- * XXX: We have 48-bit counters, so we're probably going to want something
- * more along the lines of oprofilefs_ullong_to_user().. Truncating to
- * unsigned long works fine for now though, as long as we don't attempt to
- * profile for too horribly long.
- */
-static ssize_t sh7750_read_count(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- int counter = to_counter(file);
- u64 val = sh7750_read_counter(counter);
-
- return oprofilefs_ulong_to_user((unsigned long)val, buf, count, ppos);
-}
-
-static ssize_t sh7750_write_count(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- int counter = to_counter(file);
- unsigned long val;
-
- if (oprofilefs_ulong_from_user(&val, buf, count))
- return -EFAULT;
-
- /*
- * Any write will clear the counter, although only 0 should be
- * written for this purpose, as we do not support setting the
- * counter to an arbitrary value.
- */
- WARN_ON(val != 0);
-
- __raw_writew(__raw_readw(PMCR(counter)) | PMCR_PMCLR, PMCR(counter));
-
- return count;
-}
-
-static const struct file_operations count_fops = {
- .read = sh7750_read_count,
- .write = sh7750_write_count,
-};
-
-static int sh7750_ppc_create_files(struct super_block *sb, struct dentry *dir)
-{
- return oprofilefs_create_file(sb, dir, "count", &count_fops);
-}
-
-static void sh7750_ppc_reg_setup(struct op_counter_config *ctr)
-{
- unsigned int counters = op_model_sh7750_ops.num_counters;
- int i;
-
- for (i = 0; i < counters; i++) {
- regcache[i].ctrl = 0;
- regcache[i].cnt_hi = 0;
- regcache[i].cnt_lo = 0;
-
- if (!ctr[i].enabled)
- continue;
-
- regcache[i].ctrl |= ctr[i].event | PMCR_PMEN | PMCR_PMST;
- regcache[i].cnt_hi = (unsigned long)((ctr->count >> 32) & 0xffff);
- regcache[i].cnt_lo = (unsigned long)(ctr->count & 0xffffffff);
- }
-}
-
-static void sh7750_ppc_cpu_setup(void *args)
-{
- unsigned int counters = op_model_sh7750_ops.num_counters;
- int i;
-
- for (i = 0; i < counters; i++) {
- __raw_writew(0, PMCR(i));
- __raw_writel(regcache[i].cnt_hi, PMCTRH(i));
- __raw_writel(regcache[i].cnt_lo, PMCTRL(i));
- }
-}
-
-static void sh7750_ppc_cpu_start(void *args)
-{
- unsigned int counters = op_model_sh7750_ops.num_counters;
- int i;
-
- for (i = 0; i < counters; i++)
- __raw_writew(regcache[i].ctrl, PMCR(i));
-}
-
-static void sh7750_ppc_cpu_stop(void *args)
-{
- unsigned int counters = op_model_sh7750_ops.num_counters;
- int i;
-
- /* Disable the counters */
- for (i = 0; i < counters; i++)
- __raw_writew(__raw_readw(PMCR(i)) & ~PMCR_PMEN, PMCR(i));
-}
-
-static inline void sh7750_ppc_reset(void)
-{
- unsigned int counters = op_model_sh7750_ops.num_counters;
- int i;
-
- /* Clear the counters */
- for (i = 0; i < counters; i++)
- __raw_writew(__raw_readw(PMCR(i)) | PMCR_PMCLR, PMCR(i));
-}
-
-static int sh7750_ppc_init(void)
-{
- sh7750_ppc_reset();
-
- return register_timer_hook(sh7750_timer_notify);
-}
-
-static void sh7750_ppc_exit(void)
-{
- unregister_timer_hook(sh7750_timer_notify);
-
- sh7750_ppc_reset();
-}
-
-struct op_sh_model op_model_sh7750_ops = {
- .cpu_type = "sh/sh7750",
- .num_counters = NR_CNTRS,
- .reg_setup = sh7750_ppc_reg_setup,
- .cpu_setup = sh7750_ppc_cpu_setup,
- .cpu_start = sh7750_ppc_cpu_start,
- .cpu_stop = sh7750_ppc_cpu_stop,
- .init = sh7750_ppc_init,
- .exit = sh7750_ppc_exit,
- .create_files = sh7750_ppc_create_files,
-};
diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c
index da1218e8ee8..63f73ae8a89 100644
--- a/arch/sparc/kernel/time_64.c
+++ b/arch/sparc/kernel/time_64.c
@@ -847,7 +847,7 @@ void __init time_init(void)
sparc64_clockevent.min_delta_ns =
clockevent_delta2ns(0xF, &sparc64_clockevent);
- printk("clockevent: mult[%lx] shift[%d]\n",
+ printk("clockevent: mult[%ux] shift[%d]\n",
sparc64_clockevent.mult, sparc64_clockevent.shift);
setup_sparc64_timer();
diff --git a/arch/um/drivers/mmapper_kern.c b/arch/um/drivers/mmapper_kern.c
index eb240323c40..d22f9e5c0ea 100644
--- a/arch/um/drivers/mmapper_kern.c
+++ b/arch/um/drivers/mmapper_kern.c
@@ -16,7 +16,7 @@
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/mm.h>
-#include <linux/smp_lock.h>
+
#include <asm/uaccess.h>
#include "mem_user.h"
@@ -78,7 +78,6 @@ out:
static int mmapper_open(struct inode *inode, struct file *file)
{
- cycle_kernel_lock();
return 0;
}
@@ -115,18 +114,16 @@ static int __init mmapper_init(void)
v_buf = (char *) find_iomem("mmapper", &mmapper_size);
if (mmapper_size == 0) {
printk(KERN_ERR "mmapper_init - find_iomem failed\n");
- goto out;
+ return -ENODEV;
}
+ p_buf = __pa(v_buf);
err = misc_register(&mmapper_dev);
if (err) {
printk(KERN_ERR "mmapper - misc_register failed, err = %d\n",
err);
- goto out;
+ return err;;
}
-
- p_buf = __pa(v_buf);
-out:
return 0;
}
diff --git a/arch/um/drivers/random.c b/arch/um/drivers/random.c
index 6eabb7022a2..4949044773b 100644
--- a/arch/um/drivers/random.c
+++ b/arch/um/drivers/random.c
@@ -7,7 +7,6 @@
* of the GNU General Public License, incorporated herein by reference.
*/
#include <linux/sched.h>
-#include <linux/smp_lock.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/interrupt.h>
@@ -34,8 +33,6 @@ static DECLARE_WAIT_QUEUE_HEAD(host_read_wait);
static int rng_dev_open (struct inode *inode, struct file *filp)
{
- cycle_kernel_lock();
-
/* enforce read-only access to this chrdev */
if ((filp->f_mode & FMODE_READ) == 0)
return -EINVAL;
diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile
index cfb0010fa94..1a58ad89fdf 100644
--- a/arch/x86/crypto/Makefile
+++ b/arch/x86/crypto/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_CRYPTO_AES_X86_64) += aes-x86_64.o
obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o
obj-$(CONFIG_CRYPTO_SALSA20_X86_64) += salsa20-x86_64.o
obj-$(CONFIG_CRYPTO_AES_NI_INTEL) += aesni-intel.o
+obj-$(CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL) += ghash-clmulni-intel.o
obj-$(CONFIG_CRYPTO_CRC32C_INTEL) += crc32c-intel.o
@@ -24,3 +25,5 @@ twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_glue.o
salsa20-x86_64-y := salsa20-x86_64-asm_64.o salsa20_glue.o
aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o
+
+ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
index eb0566e8331..20bb0e1ac68 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -16,6 +16,7 @@
*/
#include <linux/linkage.h>
+#include <asm/inst.h>
.text
@@ -122,103 +123,72 @@ ENTRY(aesni_set_key)
movups 0x10(%rsi), %xmm2 # other user key
movaps %xmm2, (%rcx)
add $0x10, %rcx
- # aeskeygenassist $0x1, %xmm2, %xmm1 # round 1
- .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x01
+ AESKEYGENASSIST 0x1 %xmm2 %xmm1 # round 1
call _key_expansion_256a
- # aeskeygenassist $0x1, %xmm0, %xmm1
- .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x01
+ AESKEYGENASSIST 0x1 %xmm0 %xmm1
call _key_expansion_256b
- # aeskeygenassist $0x2, %xmm2, %xmm1 # round 2
- .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x02
+ AESKEYGENASSIST 0x2 %xmm2 %xmm1 # round 2
call _key_expansion_256a
- # aeskeygenassist $0x2, %xmm0, %xmm1
- .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x02
+ AESKEYGENASSIST 0x2 %xmm0 %xmm1
call _key_expansion_256b
- # aeskeygenassist $0x4, %xmm2, %xmm1 # round 3
- .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x04
+ AESKEYGENASSIST 0x4 %xmm2 %xmm1 # round 3
call _key_expansion_256a
- # aeskeygenassist $0x4, %xmm0, %xmm1
- .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x04
+ AESKEYGENASSIST 0x4 %xmm0 %xmm1
call _key_expansion_256b
- # aeskeygenassist $0x8, %xmm2, %xmm1 # round 4
- .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x08
+ AESKEYGENASSIST 0x8 %xmm2 %xmm1 # round 4
call _key_expansion_256a
- # aeskeygenassist $0x8, %xmm0, %xmm1
- .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x08
+ AESKEYGENASSIST 0x8 %xmm0 %xmm1
call _key_expansion_256b
- # aeskeygenassist $0x10, %xmm2, %xmm1 # round 5
- .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x10
+ AESKEYGENASSIST 0x10 %xmm2 %xmm1 # round 5
call _key_expansion_256a
- # aeskeygenassist $0x10, %xmm0, %xmm1
- .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x10
+ AESKEYGENASSIST 0x10 %xmm0 %xmm1
call _key_expansion_256b
- # aeskeygenassist $0x20, %xmm2, %xmm1 # round 6
- .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x20
+ AESKEYGENASSIST 0x20 %xmm2 %xmm1 # round 6
call _key_expansion_256a
- # aeskeygenassist $0x20, %xmm0, %xmm1
- .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x20
+ AESKEYGENASSIST 0x20 %xmm0 %xmm1
call _key_expansion_256b
- # aeskeygenassist $0x40, %xmm2, %xmm1 # round 7
- .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x40
+ AESKEYGENASSIST 0x40 %xmm2 %xmm1 # round 7
call _key_expansion_256a
jmp .Ldec_key
.Lenc_key192:
movq 0x10(%rsi), %xmm2 # other user key
- # aeskeygenassist $0x1, %xmm2, %xmm1 # round 1
- .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x01
+ AESKEYGENASSIST 0x1 %xmm2 %xmm1 # round 1
call _key_expansion_192a
- # aeskeygenassist $0x2, %xmm2, %xmm1 # round 2
- .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x02
+ AESKEYGENASSIST 0x2 %xmm2 %xmm1 # round 2
call _key_expansion_192b
- # aeskeygenassist $0x4, %xmm2, %xmm1 # round 3
- .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x04
+ AESKEYGENASSIST 0x4 %xmm2 %xmm1 # round 3
call _key_expansion_192a
- # aeskeygenassist $0x8, %xmm2, %xmm1 # round 4
- .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x08
+ AESKEYGENASSIST 0x8 %xmm2 %xmm1 # round 4
call _key_expansion_192b
- # aeskeygenassist $0x10, %xmm2, %xmm1 # round 5
- .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x10
+ AESKEYGENASSIST 0x10 %xmm2 %xmm1 # round 5
call _key_expansion_192a
- # aeskeygenassist $0x20, %xmm2, %xmm1 # round 6
- .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x20
+ AESKEYGENASSIST 0x20 %xmm2 %xmm1 # round 6
call _key_expansion_192b
- # aeskeygenassist $0x40, %xmm2, %xmm1 # round 7
- .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x40
+ AESKEYGENASSIST 0x40 %xmm2 %xmm1 # round 7
call _key_expansion_192a
- # aeskeygenassist $0x80, %xmm2, %xmm1 # round 8
- .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x80
+ AESKEYGENASSIST 0x80 %xmm2 %xmm1 # round 8
call _key_expansion_192b
jmp .Ldec_key
.Lenc_key128:
- # aeskeygenassist $0x1, %xmm0, %xmm1 # round 1
- .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x01
+ AESKEYGENASSIST 0x1 %xmm0 %xmm1 # round 1
call _key_expansion_128
- # aeskeygenassist $0x2, %xmm0, %xmm1 # round 2
- .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x02
+ AESKEYGENASSIST 0x2 %xmm0 %xmm1 # round 2
call _key_expansion_128
- # aeskeygenassist $0x4, %xmm0, %xmm1 # round 3
- .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x04
+ AESKEYGENASSIST 0x4 %xmm0 %xmm1 # round 3
call _key_expansion_128
- # aeskeygenassist $0x8, %xmm0, %xmm1 # round 4
- .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x08
+ AESKEYGENASSIST 0x8 %xmm0 %xmm1 # round 4
call _key_expansion_128
- # aeskeygenassist $0x10, %xmm0, %xmm1 # round 5
- .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x10
+ AESKEYGENASSIST 0x10 %xmm0 %xmm1 # round 5
call _key_expansion_128
- # aeskeygenassist $0x20, %xmm0, %xmm1 # round 6
- .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x20
+ AESKEYGENASSIST 0x20 %xmm0 %xmm1 # round 6
call _key_expansion_128
- # aeskeygenassist $0x40, %xmm0, %xmm1 # round 7
- .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x40
+ AESKEYGENASSIST 0x40 %xmm0 %xmm1 # round 7
call _key_expansion_128
- # aeskeygenassist $0x80, %xmm0, %xmm1 # round 8
- .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x80
+ AESKEYGENASSIST 0x80 %xmm0 %xmm1 # round 8
call _key_expansion_128
- # aeskeygenassist $0x1b, %xmm0, %xmm1 # round 9
- .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x1b
+ AESKEYGENASSIST 0x1b %xmm0 %xmm1 # round 9
call _key_expansion_128
- # aeskeygenassist $0x36, %xmm0, %xmm1 # round 10
- .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x36
+ AESKEYGENASSIST 0x36 %xmm0 %xmm1 # round 10
call _key_expansion_128
.Ldec_key:
sub $0x10, %rcx
@@ -231,8 +201,7 @@ ENTRY(aesni_set_key)
.align 4
.Ldec_key_loop:
movaps (%rdi), %xmm0
- # aesimc %xmm0, %xmm1
- .byte 0x66, 0x0f, 0x38, 0xdb, 0xc8
+ AESIMC %xmm0 %xmm1
movaps %xmm1, (%rsi)
add $0x10, %rdi
sub $0x10, %rsi
@@ -274,51 +243,37 @@ _aesni_enc1:
je .Lenc192
add $0x20, TKEYP
movaps -0x60(TKEYP), KEY
- # aesenc KEY, STATE
- .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
+ AESENC KEY STATE
movaps -0x50(TKEYP), KEY
- # aesenc KEY, STATE
- .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
+ AESENC KEY STATE
.align 4
.Lenc192:
movaps -0x40(TKEYP), KEY
- # aesenc KEY, STATE
- .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
+ AESENC KEY STATE
movaps -0x30(TKEYP), KEY
- # aesenc KEY, STATE
- .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
+ AESENC KEY STATE
.align 4
.Lenc128:
movaps -0x20(TKEYP), KEY
- # aesenc KEY, STATE
- .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
+ AESENC KEY STATE
movaps -0x10(TKEYP), KEY
- # aesenc KEY, STATE
- .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
+ AESENC KEY STATE
movaps (TKEYP), KEY
- # aesenc KEY, STATE
- .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
+ AESENC KEY STATE
movaps 0x10(TKEYP), KEY
- # aesenc KEY, STATE
- .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
+ AESENC KEY STATE
movaps 0x20(TKEYP), KEY
- # aesenc KEY, STATE
- .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
+ AESENC KEY STATE
movaps 0x30(TKEYP), KEY
- # aesenc KEY, STATE
- .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
+ AESENC KEY STATE
movaps 0x40(TKEYP), KEY
- # aesenc KEY, STATE
- .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
+ AESENC KEY STATE
movaps 0x50(TKEYP), KEY
- # aesenc KEY, STATE
- .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
+ AESENC KEY STATE
movaps 0x60(TKEYP), KEY
- # aesenc KEY, STATE
- .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
+ AESENC KEY STATE
movaps 0x70(TKEYP), KEY
- # aesenclast KEY, STATE # last round
- .byte 0x66, 0x0f, 0x38, 0xdd, 0xc2
+ AESENCLAST KEY STATE
ret
/*
@@ -353,135 +308,79 @@ _aesni_enc4:
je .L4enc192
add $0x20, TKEYP
movaps -0x60(TKEYP), KEY
- # aesenc KEY, STATE1
- .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
- # aesenc KEY, STATE2
- .byte 0x66, 0x0f, 0x38, 0xdc, 0xe2
- # aesenc KEY, STATE3
- .byte 0x66, 0x0f, 0x38, 0xdc, 0xea
- # aesenc KEY, STATE4
- .byte 0x66, 0x0f, 0x38, 0xdc, 0xf2
+ AESENC KEY STATE1
+ AESENC KEY STATE2
+ AESENC KEY STATE3
+ AESENC KEY STATE4
movaps -0x50(TKEYP), KEY
- # aesenc KEY, STATE1
- .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
- # aesenc KEY, STATE2
- .byte 0x66, 0x0f, 0x38, 0xdc, 0xe2
- # aesenc KEY, STATE3
- .byte 0x66, 0x0f, 0x38, 0xdc, 0xea
- # aesenc KEY, STATE4
- .byte 0x66, 0x0f, 0x38, 0xdc, 0xf2
+ AESENC KEY STATE1
+ AESENC KEY STATE2
+ AESENC KEY STATE3
+ AESENC KEY STATE4
#.align 4
.L4enc192:
movaps -0x40(TKEYP), KEY
- # aesenc KEY, STATE1
- .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
- # aesenc KEY, STATE2
- .byte 0x66, 0x0f, 0x38, 0xdc, 0xe2
- # aesenc KEY, STATE3
- .byte 0x66, 0x0f, 0x38, 0xdc, 0xea
- # aesenc KEY, STATE4
- .byte 0x66, 0x0f, 0x38, 0xdc, 0xf2
+ AESENC KEY STATE1
+ AESENC KEY STATE2
+ AESENC KEY STATE3
+ AESENC KEY STATE4
movaps -0x30(TKEYP), KEY
- # aesenc KEY, STATE1
- .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
- # aesenc KEY, STATE2
- .byte 0x66, 0x0f, 0x38, 0xdc, 0xe2
- # aesenc KEY, STATE3
- .byte 0x66, 0x0f, 0x38, 0xdc, 0xea
- # aesenc KEY, STATE4
- .byte 0x66, 0x0f, 0x38, 0xdc, 0xf2
+ AESENC KEY STATE1
+ AESENC KEY STATE2
+ AESENC KEY STATE3
+ AESENC KEY STATE4
#.align 4
.L4enc128:
movaps -0x20(TKEYP), KEY
- # aesenc KEY, STATE1
- .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
- # aesenc KEY, STATE2
- .byte 0x66, 0x0f, 0x38, 0xdc, 0xe2
- # aesenc KEY, STATE3
- .byte 0x66, 0x0f, 0x38, 0xdc, 0xea
- # aesenc KEY, STATE4
- .byte 0x66, 0x0f, 0x38, 0xdc, 0xf2
+ AESENC KEY STATE1
+ AESENC KEY STATE2
+ AESENC KEY STATE3
+ AESENC KEY STATE4
movaps -0x10(TKEYP), KEY
- # aesenc KEY, STATE1
- .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
- # aesenc KEY, STATE2
- .byte 0x66, 0x0f, 0x38, 0xdc, 0xe2
- # aesenc KEY, STATE3
- .byte 0x66, 0x0f, 0x38, 0xdc, 0xea
- # aesenc KEY, STATE4
- .byte 0x66, 0x0f, 0x38, 0xdc, 0xf2
+ AESENC KEY STATE1
+ AESENC KEY STATE2
+ AESENC KEY STATE3
+ AESENC KEY STATE4
movaps (TKEYP), KEY
- # aesenc KEY, STATE1
- .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
- # aesenc KEY, STATE2
- .byte 0x66, 0x0f, 0x38, 0xdc, 0xe2
- # aesenc KEY, STATE3
- .byte 0x66, 0x0f, 0x38, 0xdc, 0xea
- # aesenc KEY, STATE4
- .byte 0x66, 0x0f, 0x38, 0xdc, 0xf2
+ AESENC KEY STATE1
+ AESENC KEY STATE2
+ AESENC KEY STATE3
+ AESENC KEY STATE4
movaps 0x10(TKEYP), KEY
- # aesenc KEY, STATE1
- .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
- # aesenc KEY, STATE2
- .byte 0x66, 0x0f, 0x38, 0xdc, 0xe2
- # aesenc KEY, STATE3
- .byte 0x66, 0x0f, 0x38, 0xdc, 0xea
- # aesenc KEY, STATE4
- .byte 0x66, 0x0f, 0x38, 0xdc, 0xf2
+ AESENC KEY STATE1
+ AESENC KEY STATE2
+ AESENC KEY STATE3
+ AESENC KEY STATE4
movaps 0x20(TKEYP), KEY
- # aesenc KEY, STATE1
- .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
- # aesenc KEY, STATE2
- .byte 0x66, 0x0f, 0x38, 0xdc, 0xe2
- # aesenc KEY, STATE3
- .byte 0x66, 0x0f, 0x38, 0xdc, 0xea
- # aesenc KEY, STATE4
- .byte 0x66, 0x0f, 0x38, 0xdc, 0xf2
+ AESENC KEY STATE1
+ AESENC KEY STATE2
+ AESENC KEY STATE3
+ AESENC KEY STATE4
movaps 0x30(TKEYP), KEY
- # aesenc KEY, STATE1
- .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
- # aesenc KEY, STATE2
- .byte 0x66, 0x0f, 0x38, 0xdc, 0xe2
- # aesenc KEY, STATE3
- .byte 0x66, 0x0f, 0x38, 0xdc, 0xea
- # aesenc KEY, STATE4
- .byte 0x66, 0x0f, 0x38, 0xdc, 0xf2
+ AESENC KEY STATE1
+ AESENC KEY STATE2
+ AESENC KEY STATE3
+ AESENC KEY STATE4
movaps 0x40(TKEYP), KEY
- # aesenc KEY, STATE1
- .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
- # aesenc KEY, STATE2
- .byte 0x66, 0x0f, 0x38, 0xdc, 0xe2
- # aesenc KEY, STATE3
- .byte 0x66, 0x0f, 0x38, 0xdc, 0xea
- # aesenc KEY, STATE4
- .byte 0x66, 0x0f, 0x38, 0xdc, 0xf2
+ AESENC KEY STATE1
+ AESENC KEY STATE2
+ AESENC KEY STATE3
+ AESENC KEY STATE4
movaps 0x50(TKEYP), KEY
- # aesenc KEY, STATE1
- .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
- # aesenc KEY, STATE2
- .byte 0x66, 0x0f, 0x38, 0xdc, 0xe2
- # aesenc KEY, STATE3
- .byte 0x66, 0x0f, 0x38, 0xdc, 0xea
- # aesenc KEY, STATE4
- .byte 0x66, 0x0f, 0x38, 0xdc, 0xf2
+ AESENC KEY STATE1
+ AESENC KEY STATE2
+ AESENC KEY STATE3
+ AESENC KEY STATE4
movaps 0x60(TKEYP), KEY
- # aesenc KEY, STATE1
- .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
- # aesenc KEY, STATE2
- .byte 0x66, 0x0f, 0x38, 0xdc, 0xe2
- # aesenc KEY, STATE3
- .byte 0x66, 0x0f, 0x38, 0xdc, 0xea
- # aesenc KEY, STATE4
- .byte 0x66, 0x0f, 0x38, 0xdc, 0xf2
+ AESENC KEY STATE1
+ AESENC KEY STATE2
+ AESENC KEY STATE3
+ AESENC KEY STATE4
movaps 0x70(TKEYP), KEY
- # aesenclast KEY, STATE1 # last round
- .byte 0x66, 0x0f, 0x38, 0xdd, 0xc2
- # aesenclast KEY, STATE2
- .byte 0x66, 0x0f, 0x38, 0xdd, 0xe2
- # aesenclast KEY, STATE3
- .byte 0x66, 0x0f, 0x38, 0xdd, 0xea
- # aesenclast KEY, STATE4
- .byte 0x66, 0x0f, 0x38, 0xdd, 0xf2
+ AESENCLAST KEY STATE1 # last round
+ AESENCLAST KEY STATE2
+ AESENCLAST KEY STATE3
+ AESENCLAST KEY STATE4
ret
/*
@@ -518,51 +417,37 @@ _aesni_dec1:
je .Ldec192
add $0x20, TKEYP
movaps -0x60(TKEYP), KEY
- # aesdec KEY, STATE
- .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
+ AESDEC KEY STATE
movaps -0x50(TKEYP), KEY
- # aesdec KEY, STATE
- .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
+ AESDEC KEY STATE
.align 4
.Ldec192:
movaps -0x40(TKEYP), KEY
- # aesdec KEY, STATE
- .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
+ AESDEC KEY STATE
movaps -0x30(TKEYP), KEY
- # aesdec KEY, STATE
- .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
+ AESDEC KEY STATE
.align 4
.Ldec128:
movaps -0x20(TKEYP), KEY
- # aesdec KEY, STATE
- .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
+ AESDEC KEY STATE
movaps -0x10(TKEYP), KEY
- # aesdec KEY, STATE
- .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
+ AESDEC KEY STATE
movaps (TKEYP), KEY
- # aesdec KEY, STATE
- .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
+ AESDEC KEY STATE
movaps 0x10(TKEYP), KEY
- # aesdec KEY, STATE
- .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
+ AESDEC KEY STATE
movaps 0x20(TKEYP), KEY
- # aesdec KEY, STATE
- .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
+ AESDEC KEY STATE
movaps 0x30(TKEYP), KEY
- # aesdec KEY, STATE
- .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
+ AESDEC KEY STATE
movaps 0x40(TKEYP), KEY
- # aesdec KEY, STATE
- .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
+ AESDEC KEY STATE
movaps 0x50(TKEYP), KEY
- # aesdec KEY, STATE
- .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
+ AESDEC KEY STATE
movaps 0x60(TKEYP), KEY
- # aesdec KEY, STATE
- .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
+ AESDEC KEY STATE
movaps 0x70(TKEYP), KEY
- # aesdeclast KEY, STATE # last round
- .byte 0x66, 0x0f, 0x38, 0xdf, 0xc2
+ AESDECLAST KEY STATE
ret
/*
@@ -597,135 +482,79 @@ _aesni_dec4:
je .L4dec192
add $0x20, TKEYP
movaps -0x60(TKEYP), KEY
- # aesdec KEY, STATE1
- .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
- # aesdec KEY, STATE2
- .byte 0x66, 0x0f, 0x38, 0xde, 0xe2
- # aesdec KEY, STATE3
- .byte 0x66, 0x0f, 0x38, 0xde, 0xea
- # aesdec KEY, STATE4
- .byte 0x66, 0x0f, 0x38, 0xde, 0xf2
+ AESDEC KEY STATE1
+ AESDEC KEY STATE2
+ AESDEC KEY STATE3
+ AESDEC KEY STATE4
movaps -0x50(TKEYP), KEY
- # aesdec KEY, STATE1
- .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
- # aesdec KEY, STATE2
- .byte 0x66, 0x0f, 0x38, 0xde, 0xe2
- # aesdec KEY, STATE3
- .byte 0x66, 0x0f, 0x38, 0xde, 0xea
- # aesdec KEY, STATE4
- .byte 0x66, 0x0f, 0x38, 0xde, 0xf2
+ AESDEC KEY STATE1
+ AESDEC KEY STATE2
+ AESDEC KEY STATE3
+ AESDEC KEY STATE4
.align 4
.L4dec192:
movaps -0x40(TKEYP), KEY
- # aesdec KEY, STATE1
- .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
- # aesdec KEY, STATE2
- .byte 0x66, 0x0f, 0x38, 0xde, 0xe2
- # aesdec KEY, STATE3
- .byte 0x66, 0x0f, 0x38, 0xde, 0xea
- # aesdec KEY, STATE4
- .byte 0x66, 0x0f, 0x38, 0xde, 0xf2
+ AESDEC KEY STATE1
+ AESDEC KEY STATE2
+ AESDEC KEY STATE3
+ AESDEC KEY STATE4
movaps -0x30(TKEYP), KEY
- # aesdec KEY, STATE1
- .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
- # aesdec KEY, STATE2
- .byte 0x66, 0x0f, 0x38, 0xde, 0xe2
- # aesdec KEY, STATE3
- .byte 0x66, 0x0f, 0x38, 0xde, 0xea
- # aesdec KEY, STATE4
- .byte 0x66, 0x0f, 0x38, 0xde, 0xf2
+ AESDEC KEY STATE1
+ AESDEC KEY STATE2
+ AESDEC KEY STATE3
+ AESDEC KEY STATE4
.align 4
.L4dec128:
movaps -0x20(TKEYP), KEY
- # aesdec KEY, STATE1
- .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
- # aesdec KEY, STATE2
- .byte 0x66, 0x0f, 0x38, 0xde, 0xe2
- # aesdec KEY, STATE3
- .byte 0x66, 0x0f, 0x38, 0xde, 0xea
- # aesdec KEY, STATE4
- .byte 0x66, 0x0f, 0x38, 0xde, 0xf2
+ AESDEC KEY STATE1
+ AESDEC KEY STATE2
+ AESDEC KEY STATE3
+ AESDEC KEY STATE4
movaps -0x10(TKEYP), KEY
- # aesdec KEY, STATE1
- .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
- # aesdec KEY, STATE2
- .byte 0x66, 0x0f, 0x38, 0xde, 0xe2
- # aesdec KEY, STATE3
- .byte 0x66, 0x0f, 0x38, 0xde, 0xea
- # aesdec KEY, STATE4
- .byte 0x66, 0x0f, 0x38, 0xde, 0xf2
+ AESDEC KEY STATE1
+ AESDEC KEY STATE2
+ AESDEC KEY STATE3
+ AESDEC KEY STATE4
movaps (TKEYP), KEY
- # aesdec KEY, STATE1
- .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
- # aesdec KEY, STATE2
- .byte 0x66, 0x0f, 0x38, 0xde, 0xe2
- # aesdec KEY, STATE3
- .byte 0x66, 0x0f, 0x38, 0xde, 0xea
- # aesdec KEY, STATE4
- .byte 0x66, 0x0f, 0x38, 0xde, 0xf2
+ AESDEC KEY STATE1
+ AESDEC KEY STATE2
+ AESDEC KEY STATE3
+ AESDEC KEY STATE4
movaps 0x10(TKEYP), KEY
- # aesdec KEY, STATE1
- .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
- # aesdec KEY, STATE2
- .byte 0x66, 0x0f, 0x38, 0xde, 0xe2
- # aesdec KEY, STATE3
- .byte 0x66, 0x0f, 0x38, 0xde, 0xea
- # aesdec KEY, STATE4
- .byte 0x66, 0x0f, 0x38, 0xde, 0xf2
+ AESDEC KEY STATE1
+ AESDEC KEY STATE2
+ AESDEC KEY STATE3
+ AESDEC KEY STATE4
movaps 0x20(TKEYP), KEY
- # aesdec KEY, STATE1
- .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
- # aesdec KEY, STATE2
- .byte 0x66, 0x0f, 0x38, 0xde, 0xe2
- # aesdec KEY, STATE3
- .byte 0x66, 0x0f, 0x38, 0xde, 0xea
- # aesdec KEY, STATE4
- .byte 0x66, 0x0f, 0x38, 0xde, 0xf2
+ AESDEC KEY STATE1
+ AESDEC KEY STATE2
+ AESDEC KEY STATE3
+ AESDEC KEY STATE4
movaps 0x30(TKEYP), KEY
- # aesdec KEY, STATE1
- .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
- # aesdec KEY, STATE2
- .byte 0x66, 0x0f, 0x38, 0xde, 0xe2
- # aesdec KEY, STATE3
- .byte 0x66, 0x0f, 0x38, 0xde, 0xea
- # aesdec KEY, STATE4
- .byte 0x66, 0x0f, 0x38, 0xde, 0xf2
+ AESDEC KEY STATE1
+ AESDEC KEY STATE2
+ AESDEC KEY STATE3
+ AESDEC KEY STATE4
movaps 0x40(TKEYP), KEY
- # aesdec KEY, STATE1
- .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
- # aesdec KEY, STATE2
- .byte 0x66, 0x0f, 0x38, 0xde, 0xe2
- # aesdec KEY, STATE3
- .byte 0x66, 0x0f, 0x38, 0xde, 0xea
- # aesdec KEY, STATE4
- .byte 0x66, 0x0f, 0x38, 0xde, 0xf2
+ AESDEC KEY STATE1
+ AESDEC KEY STATE2
+ AESDEC KEY STATE3
+ AESDEC KEY STATE4
movaps 0x50(TKEYP), KEY
- # aesdec KEY, STATE1
- .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
- # aesdec KEY, STATE2
- .byte 0x66, 0x0f, 0x38, 0xde, 0xe2
- # aesdec KEY, STATE3
- .byte 0x66, 0x0f, 0x38, 0xde, 0xea
- # aesdec KEY, STATE4
- .byte 0x66, 0x0f, 0x38, 0xde, 0xf2
+ AESDEC KEY STATE1
+ AESDEC KEY STATE2
+ AESDEC KEY STATE3
+ AESDEC KEY STATE4
movaps 0x60(TKEYP), KEY
- # aesdec KEY, STATE1
- .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
- # aesdec KEY, STATE2
- .byte 0x66, 0x0f, 0x38, 0xde, 0xe2
- # aesdec KEY, STATE3
- .byte 0x66, 0x0f, 0x38, 0xde, 0xea
- # aesdec KEY, STATE4
- .byte 0x66, 0x0f, 0x38, 0xde, 0xf2
+ AESDEC KEY STATE1
+ AESDEC KEY STATE2
+ AESDEC KEY STATE3
+ AESDEC KEY STATE4
movaps 0x70(TKEYP), KEY
- # aesdeclast KEY, STATE1 # last round
- .byte 0x66, 0x0f, 0x38, 0xdf, 0xc2
- # aesdeclast KEY, STATE2
- .byte 0x66, 0x0f, 0x38, 0xdf, 0xe2
- # aesdeclast KEY, STATE3
- .byte 0x66, 0x0f, 0x38, 0xdf, 0xea
- # aesdeclast KEY, STATE4
- .byte 0x66, 0x0f, 0x38, 0xdf, 0xf2
+ AESDECLAST KEY STATE1 # last round
+ AESDECLAST KEY STATE2
+ AESDECLAST KEY STATE3
+ AESDECLAST KEY STATE4
ret
/*
diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S
new file mode 100644
index 00000000000..1eb7f90cb7b
--- /dev/null
+++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S
@@ -0,0 +1,157 @@
+/*
+ * Accelerated GHASH implementation with Intel PCLMULQDQ-NI
+ * instructions. This file contains accelerated part of ghash
+ * implementation. More information about PCLMULQDQ can be found at:
+ *
+ * http://software.intel.com/en-us/articles/carry-less-multiplication-and-its-usage-for-computing-the-gcm-mode/
+ *
+ * Copyright (c) 2009 Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ * Vinodh Gopal
+ * Erdinc Ozturk
+ * Deniz Karakoyunlu
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+#include <asm/inst.h>
+
+.data
+
+.align 16
+.Lbswap_mask:
+ .octa 0x000102030405060708090a0b0c0d0e0f
+.Lpoly:
+ .octa 0xc2000000000000000000000000000001
+.Ltwo_one:
+ .octa 0x00000001000000000000000000000001
+
+#define DATA %xmm0
+#define SHASH %xmm1
+#define T1 %xmm2
+#define T2 %xmm3
+#define T3 %xmm4
+#define BSWAP %xmm5
+#define IN1 %xmm6
+
+.text
+
+/*
+ * __clmul_gf128mul_ble: internal ABI
+ * input:
+ * DATA: operand1
+ * SHASH: operand2, hash_key << 1 mod poly
+ * output:
+ * DATA: operand1 * operand2 mod poly
+ * changed:
+ * T1
+ * T2
+ * T3
+ */
+__clmul_gf128mul_ble:
+ movaps DATA, T1
+ pshufd $0b01001110, DATA, T2
+ pshufd $0b01001110, SHASH, T3
+ pxor DATA, T2
+ pxor SHASH, T3
+
+ PCLMULQDQ 0x00 SHASH DATA # DATA = a0 * b0
+ PCLMULQDQ 0x11 SHASH T1 # T1 = a1 * b1
+ PCLMULQDQ 0x00 T3 T2 # T2 = (a1 + a0) * (b1 + b0)
+ pxor DATA, T2
+ pxor T1, T2 # T2 = a0 * b1 + a1 * b0
+
+ movaps T2, T3
+ pslldq $8, T3
+ psrldq $8, T2
+ pxor T3, DATA
+ pxor T2, T1 # <T1:DATA> is result of
+ # carry-less multiplication
+
+ # first phase of the reduction
+ movaps DATA, T3
+ psllq $1, T3
+ pxor DATA, T3
+ psllq $5, T3
+ pxor DATA, T3
+ psllq $57, T3
+ movaps T3, T2
+ pslldq $8, T2
+ psrldq $8, T3
+ pxor T2, DATA
+ pxor T3, T1
+
+ # second phase of the reduction
+ movaps DATA, T2
+ psrlq $5, T2
+ pxor DATA, T2
+ psrlq $1, T2
+ pxor DATA, T2
+ psrlq $1, T2
+ pxor T2, T1
+ pxor T1, DATA
+ ret
+
+/* void clmul_ghash_mul(char *dst, const be128 *shash) */
+ENTRY(clmul_ghash_mul)
+ movups (%rdi), DATA
+ movups (%rsi), SHASH
+ movaps .Lbswap_mask, BSWAP
+ PSHUFB_XMM BSWAP DATA
+ call __clmul_gf128mul_ble
+ PSHUFB_XMM BSWAP DATA
+ movups DATA, (%rdi)
+ ret
+
+/*
+ * void clmul_ghash_update(char *dst, const char *src, unsigned int srclen,
+ * const be128 *shash);
+ */
+ENTRY(clmul_ghash_update)
+ cmp $16, %rdx
+ jb .Lupdate_just_ret # check length
+ movaps .Lbswap_mask, BSWAP
+ movups (%rdi), DATA
+ movups (%rcx), SHASH
+ PSHUFB_XMM BSWAP DATA
+.align 4
+.Lupdate_loop:
+ movups (%rsi), IN1
+ PSHUFB_XMM BSWAP IN1
+ pxor IN1, DATA
+ call __clmul_gf128mul_ble
+ sub $16, %rdx
+ add $16, %rsi
+ cmp $16, %rdx
+ jge .Lupdate_loop
+ PSHUFB_XMM BSWAP DATA
+ movups DATA, (%rdi)
+.Lupdate_just_ret:
+ ret
+
+/*
+ * void clmul_ghash_setkey(be128 *shash, const u8 *key);
+ *
+ * Calculate hash_key << 1 mod poly
+ */
+ENTRY(clmul_ghash_setkey)
+ movaps .Lbswap_mask, BSWAP
+ movups (%rsi), %xmm0
+ PSHUFB_XMM BSWAP %xmm0
+ movaps %xmm0, %xmm1
+ psllq $1, %xmm0
+ psrlq $63, %xmm1
+ movaps %xmm1, %xmm2
+ pslldq $8, %xmm1
+ psrldq $8, %xmm2
+ por %xmm1, %xmm0
+ # reduction
+ pshufd $0b00100100, %xmm2, %xmm1
+ pcmpeqd .Ltwo_one, %xmm1
+ pand .Lpoly, %xmm1
+ pxor %xmm1, %xmm0
+ movups %xmm0, (%rdi)
+ ret
diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c
new file mode 100644
index 00000000000..cbcc8d8ea93
--- /dev/null
+++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c
@@ -0,0 +1,333 @@
+/*
+ * Accelerated GHASH implementation with Intel PCLMULQDQ-NI
+ * instructions. This file contains glue code.
+ *
+ * Copyright (c) 2009 Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/crypto.h>
+#include <crypto/algapi.h>
+#include <crypto/cryptd.h>
+#include <crypto/gf128mul.h>
+#include <crypto/internal/hash.h>
+#include <asm/i387.h>
+
+#define GHASH_BLOCK_SIZE 16
+#define GHASH_DIGEST_SIZE 16
+
+void clmul_ghash_mul(char *dst, const be128 *shash);
+
+void clmul_ghash_update(char *dst, const char *src, unsigned int srclen,
+ const be128 *shash);
+
+void clmul_ghash_setkey(be128 *shash, const u8 *key);
+
+struct ghash_async_ctx {
+ struct cryptd_ahash *cryptd_tfm;
+};
+
+struct ghash_ctx {
+ be128 shash;
+};
+
+struct ghash_desc_ctx {
+ u8 buffer[GHASH_BLOCK_SIZE];
+ u32 bytes;
+};
+
+static int ghash_init(struct shash_desc *desc)
+{
+ struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+
+ memset(dctx, 0, sizeof(*dctx));
+
+ return 0;
+}
+
+static int ghash_setkey(struct crypto_shash *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ struct ghash_ctx *ctx = crypto_shash_ctx(tfm);
+
+ if (keylen != GHASH_BLOCK_SIZE) {
+ crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ clmul_ghash_setkey(&ctx->shash, key);
+
+ return 0;
+}
+
+static int ghash_update(struct shash_desc *desc,
+ const u8 *src, unsigned int srclen)
+{
+ struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+ struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
+ u8 *dst = dctx->buffer;
+
+ kernel_fpu_begin();
+ if (dctx->bytes) {
+ int n = min(srclen, dctx->bytes);
+ u8 *pos = dst + (GHASH_BLOCK_SIZE - dctx->bytes);
+
+ dctx->bytes -= n;
+ srclen -= n;
+
+ while (n--)
+ *pos++ ^= *src++;
+
+ if (!dctx->bytes)
+ clmul_ghash_mul(dst, &ctx->shash);
+ }
+
+ clmul_ghash_update(dst, src, srclen, &ctx->shash);
+ kernel_fpu_end();
+
+ if (srclen & 0xf) {
+ src += srclen - (srclen & 0xf);
+ srclen &= 0xf;
+ dctx->bytes = GHASH_BLOCK_SIZE - srclen;
+ while (srclen--)
+ *dst++ ^= *src++;
+ }
+
+ return 0;
+}
+
+static void ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
+{
+ u8 *dst = dctx->buffer;
+
+ if (dctx->bytes) {
+ u8 *tmp = dst + (GHASH_BLOCK_SIZE - dctx->bytes);
+
+ while (dctx->bytes--)
+ *tmp++ ^= 0;
+
+ kernel_fpu_begin();
+ clmul_ghash_mul(dst, &ctx->shash);
+ kernel_fpu_end();
+ }
+
+ dctx->bytes = 0;
+}
+
+static int ghash_final(struct shash_desc *desc, u8 *dst)
+{
+ struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+ struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
+ u8 *buf = dctx->buffer;
+
+ ghash_flush(ctx, dctx);
+ memcpy(dst, buf, GHASH_BLOCK_SIZE);
+
+ return 0;
+}
+
+static struct shash_alg ghash_alg = {
+ .digestsize = GHASH_DIGEST_SIZE,
+ .init = ghash_init,
+ .update = ghash_update,
+ .final = ghash_final,
+ .setkey = ghash_setkey,
+ .descsize = sizeof(struct ghash_desc_ctx),
+ .base = {
+ .cra_name = "__ghash",
+ .cra_driver_name = "__ghash-pclmulqdqni",
+ .cra_priority = 0,
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_blocksize = GHASH_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct ghash_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(ghash_alg.base.cra_list),
+ },
+};
+
+static int ghash_async_init(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct ahash_request *cryptd_req = ahash_request_ctx(req);
+ struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
+
+ if (!irq_fpu_usable()) {
+ memcpy(cryptd_req, req, sizeof(*req));
+ ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
+ return crypto_ahash_init(cryptd_req);
+ } else {
+ struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
+ struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm);
+
+ desc->tfm = child;
+ desc->flags = req->base.flags;
+ return crypto_shash_init(desc);
+ }
+}
+
+static int ghash_async_update(struct ahash_request *req)
+{
+ struct ahash_request *cryptd_req = ahash_request_ctx(req);
+
+ if (!irq_fpu_usable()) {
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
+
+ memcpy(cryptd_req, req, sizeof(*req));
+ ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
+ return crypto_ahash_update(cryptd_req);
+ } else {
+ struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
+ return shash_ahash_update(req, desc);
+ }
+}
+
+static int ghash_async_final(struct ahash_request *req)
+{
+ struct ahash_request *cryptd_req = ahash_request_ctx(req);
+
+ if (!irq_fpu_usable()) {
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
+
+ memcpy(cryptd_req, req, sizeof(*req));
+ ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
+ return crypto_ahash_final(cryptd_req);
+ } else {
+ struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
+ return crypto_shash_final(desc, req->result);
+ }
+}
+
+static int ghash_async_digest(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct ahash_request *cryptd_req = ahash_request_ctx(req);
+ struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
+
+ if (!irq_fpu_usable()) {
+ memcpy(cryptd_req, req, sizeof(*req));
+ ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
+ return crypto_ahash_digest(cryptd_req);
+ } else {
+ struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
+ struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm);
+
+ desc->tfm = child;
+ desc->flags = req->base.flags;
+ return shash_ahash_digest(req, desc);
+ }
+}
+
+static int ghash_async_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct crypto_ahash *child = &ctx->cryptd_tfm->base;
+ int err;
+
+ crypto_ahash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
+ crypto_ahash_set_flags(child, crypto_ahash_get_flags(tfm)
+ & CRYPTO_TFM_REQ_MASK);
+ err = crypto_ahash_setkey(child, key, keylen);
+ crypto_ahash_set_flags(tfm, crypto_ahash_get_flags(child)
+ & CRYPTO_TFM_RES_MASK);
+
+ return 0;
+}
+
+static int ghash_async_init_tfm(struct crypto_tfm *tfm)
+{
+ struct cryptd_ahash *cryptd_tfm;
+ struct ghash_async_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ cryptd_tfm = cryptd_alloc_ahash("__ghash-pclmulqdqni", 0, 0);
+ if (IS_ERR(cryptd_tfm))
+ return PTR_ERR(cryptd_tfm);
+ ctx->cryptd_tfm = cryptd_tfm;
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct ahash_request) +
+ crypto_ahash_reqsize(&cryptd_tfm->base));
+
+ return 0;
+}
+
+static void ghash_async_exit_tfm(struct crypto_tfm *tfm)
+{
+ struct ghash_async_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ cryptd_free_ahash(ctx->cryptd_tfm);
+}
+
+static struct ahash_alg ghash_async_alg = {
+ .init = ghash_async_init,
+ .update = ghash_async_update,
+ .final = ghash_async_final,
+ .setkey = ghash_async_setkey,
+ .digest = ghash_async_digest,
+ .halg = {
+ .digestsize = GHASH_DIGEST_SIZE,
+ .base = {
+ .cra_name = "ghash",
+ .cra_driver_name = "ghash-clmulni",
+ .cra_priority = 400,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = GHASH_BLOCK_SIZE,
+ .cra_type = &crypto_ahash_type,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(ghash_async_alg.halg.base.cra_list),
+ .cra_init = ghash_async_init_tfm,
+ .cra_exit = ghash_async_exit_tfm,
+ },
+ },
+};
+
+static int __init ghash_pclmulqdqni_mod_init(void)
+{
+ int err;
+
+ if (!cpu_has_pclmulqdq) {
+ printk(KERN_INFO "Intel PCLMULQDQ-NI instructions are not"
+ " detected.\n");
+ return -ENODEV;
+ }
+
+ err = crypto_register_shash(&ghash_alg);
+ if (err)
+ goto err_out;
+ err = crypto_register_ahash(&ghash_async_alg);
+ if (err)
+ goto err_shash;
+
+ return 0;
+
+err_shash:
+ crypto_unregister_shash(&ghash_alg);
+err_out:
+ return err;
+}
+
+static void __exit ghash_pclmulqdqni_mod_exit(void)
+{
+ crypto_unregister_ahash(&ghash_async_alg);
+ crypto_unregister_shash(&ghash_alg);
+}
+
+module_init(ghash_pclmulqdqni_mod_init);
+module_exit(ghash_pclmulqdqni_mod_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("GHASH Message Digest Algorithm, "
+ "acclerated by PCLMULQDQ-NI");
+MODULE_ALIAS("ghash");
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 9cfc88b9774..613700f27a4 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -248,6 +248,7 @@ extern const char * const x86_power_flags[32];
#define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC)
#define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE)
#define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR)
+#define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ)
#if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64)
# define cpu_has_invlpg 1
diff --git a/arch/x86/include/asm/hpet.h b/arch/x86/include/asm/hpet.h
index 1c22cb05ad6..5d89fd2a369 100644
--- a/arch/x86/include/asm/hpet.h
+++ b/arch/x86/include/asm/hpet.h
@@ -65,11 +65,12 @@
/* hpet memory map physical address */
extern unsigned long hpet_address;
extern unsigned long force_hpet_address;
+extern u8 hpet_blockid;
extern int hpet_force_user;
extern int is_hpet_enabled(void);
extern int hpet_enable(void);
extern void hpet_disable(void);
-extern unsigned long hpet_readl(unsigned long a);
+extern unsigned int hpet_readl(unsigned int a);
extern void force_hpet_resume(void);
extern void hpet_msi_unmask(unsigned int irq);
@@ -78,9 +79,9 @@ extern void hpet_msi_write(unsigned int irq, struct msi_msg *msg);
extern void hpet_msi_read(unsigned int irq, struct msi_msg *msg);
#ifdef CONFIG_PCI_MSI
-extern int arch_setup_hpet_msi(unsigned int irq);
+extern int arch_setup_hpet_msi(unsigned int irq, unsigned int id);
#else
-static inline int arch_setup_hpet_msi(unsigned int irq)
+static inline int arch_setup_hpet_msi(unsigned int irq, unsigned int id)
{
return -EINVAL;
}
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
index 0b20bbb758f..ebfb8a9e11f 100644
--- a/arch/x86/include/asm/i387.h
+++ b/arch/x86/include/asm/i387.h
@@ -10,6 +10,8 @@
#ifndef _ASM_X86_I387_H
#define _ASM_X86_I387_H
+#ifndef __ASSEMBLY__
+
#include <linux/sched.h>
#include <linux/kernel_stat.h>
#include <linux/regset.h>
@@ -411,4 +413,9 @@ static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk)
}
}
+#endif /* __ASSEMBLY__ */
+
+#define PSHUFB_XMM5_XMM0 .byte 0x66, 0x0f, 0x38, 0x00, 0xc5
+#define PSHUFB_XMM5_XMM6 .byte 0x66, 0x0f, 0x38, 0x00, 0xf5
+
#endif /* _ASM_X86_I387_H */
diff --git a/arch/x86/include/asm/inst.h b/arch/x86/include/asm/inst.h
new file mode 100644
index 00000000000..14cf526091f
--- /dev/null
+++ b/arch/x86/include/asm/inst.h
@@ -0,0 +1,150 @@
+/*
+ * Generate .byte code for some instructions not supported by old
+ * binutils.
+ */
+#ifndef X86_ASM_INST_H
+#define X86_ASM_INST_H
+
+#ifdef __ASSEMBLY__
+
+ .macro XMM_NUM opd xmm
+ .ifc \xmm,%xmm0
+ \opd = 0
+ .endif
+ .ifc \xmm,%xmm1
+ \opd = 1
+ .endif
+ .ifc \xmm,%xmm2
+ \opd = 2
+ .endif
+ .ifc \xmm,%xmm3
+ \opd = 3
+ .endif
+ .ifc \xmm,%xmm4
+ \opd = 4
+ .endif
+ .ifc \xmm,%xmm5
+ \opd = 5
+ .endif
+ .ifc \xmm,%xmm6
+ \opd = 6
+ .endif
+ .ifc \xmm,%xmm7
+ \opd = 7
+ .endif
+ .ifc \xmm,%xmm8
+ \opd = 8
+ .endif
+ .ifc \xmm,%xmm9
+ \opd = 9
+ .endif
+ .ifc \xmm,%xmm10
+ \opd = 10
+ .endif
+ .ifc \xmm,%xmm11
+ \opd = 11
+ .endif
+ .ifc \xmm,%xmm12
+ \opd = 12
+ .endif
+ .ifc \xmm,%xmm13
+ \opd = 13
+ .endif
+ .ifc \xmm,%xmm14
+ \opd = 14
+ .endif
+ .ifc \xmm,%xmm15
+ \opd = 15
+ .endif
+ .endm
+
+ .macro PFX_OPD_SIZE
+ .byte 0x66
+ .endm
+
+ .macro PFX_REX opd1 opd2
+ .if (\opd1 | \opd2) & 8
+ .byte 0x40 | ((\opd1 & 8) >> 3) | ((\opd2 & 8) >> 1)
+ .endif
+ .endm
+
+ .macro MODRM mod opd1 opd2
+ .byte \mod | (\opd1 & 7) | ((\opd2 & 7) << 3)
+ .endm
+
+ .macro PSHUFB_XMM xmm1 xmm2
+ XMM_NUM pshufb_opd1 \xmm1
+ XMM_NUM pshufb_opd2 \xmm2
+ PFX_OPD_SIZE
+ PFX_REX pshufb_opd1 pshufb_opd2
+ .byte 0x0f, 0x38, 0x00
+ MODRM 0xc0 pshufb_opd1 pshufb_opd2
+ .endm
+
+ .macro PCLMULQDQ imm8 xmm1 xmm2
+ XMM_NUM clmul_opd1 \xmm1
+ XMM_NUM clmul_opd2 \xmm2
+ PFX_OPD_SIZE
+ PFX_REX clmul_opd1 clmul_opd2
+ .byte 0x0f, 0x3a, 0x44
+ MODRM 0xc0 clmul_opd1 clmul_opd2
+ .byte \imm8
+ .endm
+
+ .macro AESKEYGENASSIST rcon xmm1 xmm2
+ XMM_NUM aeskeygen_opd1 \xmm1
+ XMM_NUM aeskeygen_opd2 \xmm2
+ PFX_OPD_SIZE
+ PFX_REX aeskeygen_opd1 aeskeygen_opd2
+ .byte 0x0f, 0x3a, 0xdf
+ MODRM 0xc0 aeskeygen_opd1 aeskeygen_opd2
+ .byte \rcon
+ .endm
+
+ .macro AESIMC xmm1 xmm2
+ XMM_NUM aesimc_opd1 \xmm1
+ XMM_NUM aesimc_opd2 \xmm2
+ PFX_OPD_SIZE
+ PFX_REX aesimc_opd1 aesimc_opd2
+ .byte 0x0f, 0x38, 0xdb
+ MODRM 0xc0 aesimc_opd1 aesimc_opd2
+ .endm
+
+ .macro AESENC xmm1 xmm2
+ XMM_NUM aesenc_opd1 \xmm1
+ XMM_NUM aesenc_opd2 \xmm2
+ PFX_OPD_SIZE
+ PFX_REX aesenc_opd1 aesenc_opd2
+ .byte 0x0f, 0x38, 0xdc
+ MODRM 0xc0 aesenc_opd1 aesenc_opd2
+ .endm
+
+ .macro AESENCLAST xmm1 xmm2
+ XMM_NUM aesenclast_opd1 \xmm1
+ XMM_NUM aesenclast_opd2 \xmm2
+ PFX_OPD_SIZE
+ PFX_REX aesenclast_opd1 aesenclast_opd2
+ .byte 0x0f, 0x38, 0xdd
+ MODRM 0xc0 aesenclast_opd1 aesenclast_opd2
+ .endm
+
+ .macro AESDEC xmm1 xmm2
+ XMM_NUM aesdec_opd1 \xmm1
+ XMM_NUM aesdec_opd2 \xmm2
+ PFX_OPD_SIZE
+ PFX_REX aesdec_opd1 aesdec_opd2
+ .byte 0x0f, 0x38, 0xde
+ MODRM 0xc0 aesdec_opd1 aesdec_opd2
+ .endm
+
+ .macro AESDECLAST xmm1 xmm2
+ XMM_NUM aesdeclast_opd1 \xmm1
+ XMM_NUM aesdeclast_opd2 \xmm2
+ PFX_OPD_SIZE
+ PFX_REX aesdeclast_opd1 aesdeclast_opd2
+ .byte 0x0f, 0x38, 0xdf
+ MODRM 0xc0 aesdeclast_opd1 aesdeclast_opd2
+ .endm
+#endif
+
+#endif
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 67e929b8987..87eee07da21 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -624,6 +624,7 @@ static int __init acpi_parse_hpet(struct acpi_table_header *table)
}
hpet_address = hpet_tbl->address.address;
+ hpet_blockid = hpet_tbl->sequence;
/*
* Some broken BIOSes advertise HPET at 0x0. We really do not
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index ad8c75b9e45..efb2b9cd132 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -647,7 +647,7 @@ static int __init calibrate_APIC_clock(void)
calibration_result = (delta * APIC_DIVISOR) / LAPIC_CAL_LOOPS;
apic_printk(APIC_VERBOSE, "..... delta %ld\n", delta);
- apic_printk(APIC_VERBOSE, "..... mult: %ld\n", lapic_clockevent.mult);
+ apic_printk(APIC_VERBOSE, "..... mult: %u\n", lapic_clockevent.mult);
apic_printk(APIC_VERBOSE, "..... calibration result: %u\n",
calibration_result);
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index c0b4468683f..d5d498fbee4 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -3267,7 +3267,8 @@ void destroy_irq(unsigned int irq)
* MSI message composition
*/
#ifdef CONFIG_PCI_MSI
-static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
+static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq,
+ struct msi_msg *msg, u8 hpet_id)
{
struct irq_cfg *cfg;
int err;
@@ -3301,7 +3302,10 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms
irte.dest_id = IRTE_DEST(dest);
/* Set source-id of interrupt request */
- set_msi_sid(&irte, pdev);
+ if (pdev)
+ set_msi_sid(&irte, pdev);
+ else
+ set_hpet_sid(&irte, hpet_id);
modify_irte(irq, &irte);
@@ -3466,7 +3470,7 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq)
int ret;
struct msi_msg msg;
- ret = msi_compose_msg(dev, irq, &msg);
+ ret = msi_compose_msg(dev, irq, &msg, -1);
if (ret < 0)
return ret;
@@ -3599,7 +3603,7 @@ int arch_setup_dmar_msi(unsigned int irq)
int ret;
struct msi_msg msg;
- ret = msi_compose_msg(NULL, irq, &msg);
+ ret = msi_compose_msg(NULL, irq, &msg, -1);
if (ret < 0)
return ret;
dmar_msi_write(irq, &msg);
@@ -3639,6 +3643,19 @@ static int hpet_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
#endif /* CONFIG_SMP */
+static struct irq_chip ir_hpet_msi_type = {
+ .name = "IR-HPET_MSI",
+ .unmask = hpet_msi_unmask,
+ .mask = hpet_msi_mask,
+#ifdef CONFIG_INTR_REMAP
+ .ack = ir_ack_apic_edge,
+#ifdef CONFIG_SMP
+ .set_affinity = ir_set_msi_irq_affinity,
+#endif
+#endif
+ .retrigger = ioapic_retrigger_irq,
+};
+
static struct irq_chip hpet_msi_type = {
.name = "HPET_MSI",
.unmask = hpet_msi_unmask,
@@ -3650,20 +3667,36 @@ static struct irq_chip hpet_msi_type = {
.retrigger = ioapic_retrigger_irq,
};
-int arch_setup_hpet_msi(unsigned int irq)
+int arch_setup_hpet_msi(unsigned int irq, unsigned int id)
{
int ret;
struct msi_msg msg;
struct irq_desc *desc = irq_to_desc(irq);
- ret = msi_compose_msg(NULL, irq, &msg);
+ if (intr_remapping_enabled) {
+ struct intel_iommu *iommu = map_hpet_to_ir(id);
+ int index;
+
+ if (!iommu)
+ return -1;
+
+ index = alloc_irte(iommu, irq, 1);
+ if (index < 0)
+ return -1;
+ }
+
+ ret = msi_compose_msg(NULL, irq, &msg, id);
if (ret < 0)
return ret;
hpet_msi_write(irq, &msg);
desc->status |= IRQ_MOVE_PCNTXT;
- set_irq_chip_and_handler_name(irq, &hpet_msi_type, handle_edge_irq,
- "edge");
+ if (irq_remapped(irq))
+ set_irq_chip_and_handler_name(irq, &ir_hpet_msi_type,
+ handle_edge_irq, "edge");
+ else
+ set_irq_chip_and_handler_name(irq, &hpet_msi_type,
+ handle_edge_irq, "edge");
return 0;
}
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index dedc2bddf7a..ba6e6588460 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -33,6 +33,7 @@
* HPET address is set in acpi/boot.c, when an ACPI entry exists
*/
unsigned long hpet_address;
+u8 hpet_blockid; /* OS timer block num */
#ifdef CONFIG_PCI_MSI
static unsigned long hpet_num_timers;
#endif
@@ -47,12 +48,12 @@ struct hpet_dev {
char name[10];
};
-unsigned long hpet_readl(unsigned long a)
+inline unsigned int hpet_readl(unsigned int a)
{
return readl(hpet_virt_address + a);
}
-static inline void hpet_writel(unsigned long d, unsigned long a)
+static inline void hpet_writel(unsigned int d, unsigned int a)
{
writel(d, hpet_virt_address + a);
}
@@ -167,7 +168,7 @@ do { \
static void hpet_reserve_msi_timers(struct hpet_data *hd);
-static void hpet_reserve_platform_timers(unsigned long id)
+static void hpet_reserve_platform_timers(unsigned int id)
{
struct hpet __iomem *hpet = hpet_virt_address;
struct hpet_timer __iomem *timer = &hpet->hpet_timers[2];
@@ -205,7 +206,7 @@ static void hpet_reserve_platform_timers(unsigned long id)
}
#else
-static void hpet_reserve_platform_timers(unsigned long id) { }
+static void hpet_reserve_platform_timers(unsigned int id) { }
#endif
/*
@@ -246,7 +247,7 @@ static void hpet_reset_counter(void)
static void hpet_start_counter(void)
{
- unsigned long cfg = hpet_readl(HPET_CFG);
+ unsigned int cfg = hpet_readl(HPET_CFG);
cfg |= HPET_CFG_ENABLE;
hpet_writel(cfg, HPET_CFG);
}
@@ -271,7 +272,7 @@ static void hpet_resume_counter(void)
static void hpet_enable_legacy_int(void)
{
- unsigned long cfg = hpet_readl(HPET_CFG);
+ unsigned int cfg = hpet_readl(HPET_CFG);
cfg |= HPET_CFG_LEGACY;
hpet_writel(cfg, HPET_CFG);
@@ -314,7 +315,7 @@ static int hpet_setup_msi_irq(unsigned int irq);
static void hpet_set_mode(enum clock_event_mode mode,
struct clock_event_device *evt, int timer)
{
- unsigned long cfg, cmp, now;
+ unsigned int cfg, cmp, now;
uint64_t delta;
switch (mode) {
@@ -323,7 +324,7 @@ static void hpet_set_mode(enum clock_event_mode mode,
delta = ((uint64_t)(NSEC_PER_SEC/HZ)) * evt->mult;
delta >>= evt->shift;
now = hpet_readl(HPET_COUNTER);
- cmp = now + (unsigned long) delta;
+ cmp = now + (unsigned int) delta;
cfg = hpet_readl(HPET_Tn_CFG(timer));
/* Make sure we use edge triggered interrupts */
cfg &= ~HPET_TN_LEVEL;
@@ -339,7 +340,7 @@ static void hpet_set_mode(enum clock_event_mode mode,
* (See AMD-8111 HyperTransport I/O Hub Data Sheet,
* Publication # 24674)
*/
- hpet_writel((unsigned long) delta, HPET_Tn_CMP(timer));
+ hpet_writel((unsigned int) delta, HPET_Tn_CMP(timer));
hpet_start_counter();
hpet_print_config();
break;
@@ -383,13 +384,24 @@ static int hpet_next_event(unsigned long delta,
hpet_writel(cnt, HPET_Tn_CMP(timer));
/*
- * We need to read back the CMP register to make sure that
- * what we wrote hit the chip before we compare it to the
- * counter.
+ * We need to read back the CMP register on certain HPET
+ * implementations (ATI chipsets) which seem to delay the
+ * transfer of the compare register into the internal compare
+ * logic. With small deltas this might actually be too late as
+ * the counter could already be higher than the compare value
+ * at that point and we would wait for the next hpet interrupt
+ * forever. We found out that reading the CMP register back
+ * forces the transfer so we can rely on the comparison with
+ * the counter register below. If the read back from the
+ * compare register does not match the value we programmed
+ * then we might have a real hardware problem. We can not do
+ * much about it here, but at least alert the user/admin with
+ * a prominent warning.
*/
- WARN_ON_ONCE((u32)hpet_readl(HPET_Tn_CMP(timer)) != cnt);
+ WARN_ONCE(hpet_readl(HPET_Tn_CMP(timer)) != cnt,
+ KERN_WARNING "hpet: compare register read back failed.\n");
- return (s32)((u32)hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0;
+ return (s32)(hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0;
}
static void hpet_legacy_set_mode(enum clock_event_mode mode,
@@ -415,7 +427,7 @@ static struct hpet_dev *hpet_devs;
void hpet_msi_unmask(unsigned int irq)
{
struct hpet_dev *hdev = get_irq_data(irq);
- unsigned long cfg;
+ unsigned int cfg;
/* unmask it */
cfg = hpet_readl(HPET_Tn_CFG(hdev->num));
@@ -425,7 +437,7 @@ void hpet_msi_unmask(unsigned int irq)
void hpet_msi_mask(unsigned int irq)
{
- unsigned long cfg;
+ unsigned int cfg;
struct hpet_dev *hdev = get_irq_data(irq);
/* mask it */
@@ -467,7 +479,7 @@ static int hpet_msi_next_event(unsigned long delta,
static int hpet_setup_msi_irq(unsigned int irq)
{
- if (arch_setup_hpet_msi(irq)) {
+ if (arch_setup_hpet_msi(irq, hpet_blockid)) {
destroy_irq(irq);
return -EINVAL;
}
@@ -584,6 +596,8 @@ static void hpet_msi_capability_lookup(unsigned int start_timer)
unsigned int num_timers_used = 0;
int i;
+ if (boot_cpu_has(X86_FEATURE_ARAT))
+ return;
id = hpet_readl(HPET_ID);
num_timers = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT);
@@ -598,7 +612,7 @@ static void hpet_msi_capability_lookup(unsigned int start_timer)
for (i = start_timer; i < num_timers - RESERVE_TIMERS; i++) {
struct hpet_dev *hdev = &hpet_devs[num_timers_used];
- unsigned long cfg = hpet_readl(HPET_Tn_CFG(i));
+ unsigned int cfg = hpet_readl(HPET_Tn_CFG(i));
/* Only consider HPET timer with MSI support */
if (!(cfg & HPET_TN_FSB_CAP))
@@ -813,7 +827,7 @@ static int hpet_clocksource_register(void)
*/
int __init hpet_enable(void)
{
- unsigned long id;
+ unsigned int id;
int i;
if (!is_hpet_capable())
@@ -872,10 +886,8 @@ int __init hpet_enable(void)
if (id & HPET_ID_LEGSUP) {
hpet_legacy_clockevent_register();
- hpet_msi_capability_lookup(2);
return 1;
}
- hpet_msi_capability_lookup(0);
return 0;
out_nohpet:
@@ -908,9 +920,17 @@ static __init int hpet_late_init(void)
if (!hpet_virt_address)
return -ENODEV;
+ if (hpet_readl(HPET_ID) & HPET_ID_LEGSUP)
+ hpet_msi_capability_lookup(2);
+ else
+ hpet_msi_capability_lookup(0);
+
hpet_reserve_platform_timers(hpet_readl(HPET_ID));
hpet_print_config();
+ if (boot_cpu_has(X86_FEATURE_ARAT))
+ return 0;
+
for_each_online_cpu(cpu) {
hpet_cpuhp_notify(NULL, CPU_ONLINE, (void *)(long)cpu);
}
@@ -925,7 +945,7 @@ fs_initcall(hpet_late_init);
void hpet_disable(void)
{
if (is_hpet_capable()) {
- unsigned long cfg = hpet_readl(HPET_CFG);
+ unsigned int cfg = hpet_readl(HPET_CFG);
if (hpet_legacy_int_enabled) {
cfg &= ~HPET_CFG_LEGACY;
@@ -965,8 +985,8 @@ static int hpet_prev_update_sec;
static struct rtc_time hpet_alarm_time;
static unsigned long hpet_pie_count;
static u32 hpet_t1_cmp;
-static unsigned long hpet_default_delta;
-static unsigned long hpet_pie_delta;
+static u32 hpet_default_delta;
+static u32 hpet_pie_delta;
static unsigned long hpet_pie_limit;
static rtc_irq_handler irq_handler;
@@ -1017,7 +1037,8 @@ EXPORT_SYMBOL_GPL(hpet_unregister_irq_handler);
*/
int hpet_rtc_timer_init(void)
{
- unsigned long cfg, cnt, delta, flags;
+ unsigned int cfg, cnt, delta;
+ unsigned long flags;
if (!is_hpet_enabled())
return 0;
@@ -1027,7 +1048,7 @@ int hpet_rtc_timer_init(void)
clc = (uint64_t) hpet_clockevent.mult * NSEC_PER_SEC;
clc >>= hpet_clockevent.shift + DEFAULT_RTC_SHIFT;
- hpet_default_delta = (unsigned long) clc;
+ hpet_default_delta = clc;
}
if (!(hpet_rtc_flags & RTC_PIE) || hpet_pie_limit)
@@ -1113,7 +1134,7 @@ int hpet_set_periodic_freq(unsigned long freq)
clc = (uint64_t) hpet_clockevent.mult * NSEC_PER_SEC;
do_div(clc, freq);
clc >>= hpet_clockevent.shift;
- hpet_pie_delta = (unsigned long) clc;
+ hpet_pie_delta = clc;
}
return 1;
}
@@ -1127,7 +1148,7 @@ EXPORT_SYMBOL_GPL(hpet_rtc_dropped_irq);
static void hpet_rtc_timer_reinit(void)
{
- unsigned long cfg, delta;
+ unsigned int cfg, delta;
int lost_ints = -1;
if (unlikely(!hpet_rtc_flags)) {
diff --git a/arch/x86/kernel/vmiclock_32.c b/arch/x86/kernel/vmiclock_32.c
index 611b9e2360d..74c92bb194d 100644
--- a/arch/x86/kernel/vmiclock_32.c
+++ b/arch/x86/kernel/vmiclock_32.c
@@ -226,7 +226,7 @@ static void __devinit vmi_time_init_clockevent(void)
evt->min_delta_ns = clockevent_delta2ns(1, evt);
evt->cpumask = cpumask_of(cpu);
- printk(KERN_WARNING "vmi: registering clock event %s. mult=%lu shift=%u\n",
+ printk(KERN_WARNING "vmi: registering clock event %s. mult=%u shift=%u\n",
evt->name, evt->mult, evt->shift);
clockevents_register_device(evt);
}
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index e02d92d12bc..9055e5872ff 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -73,7 +73,8 @@ void update_vsyscall_tz(void)
write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
}
-void update_vsyscall(struct timespec *wall_time, struct clocksource *clock)
+void update_vsyscall(struct timespec *wall_time, struct clocksource *clock,
+ u32 mult)
{
unsigned long flags;
@@ -82,7 +83,7 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock)
vsyscall_gtod_data.clock.vread = clock->vread;
vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
vsyscall_gtod_data.clock.mask = clock->mask;
- vsyscall_gtod_data.clock.mult = clock->mult;
+ vsyscall_gtod_data.clock.mult = mult;
vsyscall_gtod_data.clock.shift = clock->shift;
vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec;
vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec;
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 26b5dd0cb56..81c185a6971 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -440,6 +440,15 @@ config CRYPTO_WP512
See also:
<http://planeta.terra.com.br/informatica/paulobarreto/WhirlpoolPage.html>
+config CRYPTO_GHASH_CLMUL_NI_INTEL
+ tristate "GHASH digest algorithm (CLMUL-NI accelerated)"
+ depends on (X86 || UML_X86) && 64BIT
+ select CRYPTO_SHASH
+ select CRYPTO_CRYPTD
+ help
+ GHASH is message digest algorithm for GCM (Galois/Counter Mode).
+ The implementation is accelerated by CLMUL-NI of Intel.
+
comment "Ciphers"
config CRYPTO_AES
diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c
index 3aa6e3834bf..2bc33214284 100644
--- a/crypto/ansi_cprng.c
+++ b/crypto/ansi_cprng.c
@@ -85,7 +85,7 @@ static void xor_vectors(unsigned char *in1, unsigned char *in2,
* Returns DEFAULT_BLK_SZ bytes of random data per call
* returns 0 if generation succeded, <0 if something went wrong
*/
-static int _get_more_prng_bytes(struct prng_context *ctx)
+static int _get_more_prng_bytes(struct prng_context *ctx, int cont_test)
{
int i;
unsigned char tmp[DEFAULT_BLK_SZ];
@@ -132,7 +132,7 @@ static int _get_more_prng_bytes(struct prng_context *ctx)
*/
if (!memcmp(ctx->rand_data, ctx->last_rand_data,
DEFAULT_BLK_SZ)) {
- if (fips_enabled) {
+ if (cont_test) {
panic("cprng %p Failed repetition check!\n",
ctx);
}
@@ -185,16 +185,14 @@ static int _get_more_prng_bytes(struct prng_context *ctx)
}
/* Our exported functions */
-static int get_prng_bytes(char *buf, size_t nbytes, struct prng_context *ctx)
+static int get_prng_bytes(char *buf, size_t nbytes, struct prng_context *ctx,
+ int do_cont_test)
{
unsigned char *ptr = buf;
unsigned int byte_count = (unsigned int)nbytes;
int err;
- if (nbytes < 0)
- return -EINVAL;
-
spin_lock_bh(&ctx->prng_lock);
err = -EINVAL;
@@ -220,7 +218,7 @@ static int get_prng_bytes(char *buf, size_t nbytes, struct prng_context *ctx)
remainder:
if (ctx->rand_data_valid == DEFAULT_BLK_SZ) {
- if (_get_more_prng_bytes(ctx) < 0) {
+ if (_get_more_prng_bytes(ctx, do_cont_test) < 0) {
memset(buf, 0, nbytes);
err = -EINVAL;
goto done;
@@ -247,7 +245,7 @@ empty_rbuf:
*/
for (; byte_count >= DEFAULT_BLK_SZ; byte_count -= DEFAULT_BLK_SZ) {
if (ctx->rand_data_valid == DEFAULT_BLK_SZ) {
- if (_get_more_prng_bytes(ctx) < 0) {
+ if (_get_more_prng_bytes(ctx, do_cont_test) < 0) {
memset(buf, 0, nbytes);
err = -EINVAL;
goto done;
@@ -356,7 +354,7 @@ static int cprng_get_random(struct crypto_rng *tfm, u8 *rdata,
{
struct prng_context *prng = crypto_rng_ctx(tfm);
- return get_prng_bytes(rdata, dlen, prng);
+ return get_prng_bytes(rdata, dlen, prng, 0);
}
/*
@@ -404,19 +402,79 @@ static struct crypto_alg rng_alg = {
}
};
+#ifdef CONFIG_CRYPTO_FIPS
+static int fips_cprng_get_random(struct crypto_rng *tfm, u8 *rdata,
+ unsigned int dlen)
+{
+ struct prng_context *prng = crypto_rng_ctx(tfm);
+
+ return get_prng_bytes(rdata, dlen, prng, 1);
+}
+
+static int fips_cprng_reset(struct crypto_rng *tfm, u8 *seed, unsigned int slen)
+{
+ u8 rdata[DEFAULT_BLK_SZ];
+ int rc;
+
+ struct prng_context *prng = crypto_rng_ctx(tfm);
+
+ rc = cprng_reset(tfm, seed, slen);
+
+ if (!rc)
+ goto out;
+
+ /* this primes our continuity test */
+ rc = get_prng_bytes(rdata, DEFAULT_BLK_SZ, prng, 0);
+ prng->rand_data_valid = DEFAULT_BLK_SZ;
+
+out:
+ return rc;
+}
+
+static struct crypto_alg fips_rng_alg = {
+ .cra_name = "fips(ansi_cprng)",
+ .cra_driver_name = "fips_ansi_cprng",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_RNG,
+ .cra_ctxsize = sizeof(struct prng_context),
+ .cra_type = &crypto_rng_type,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(rng_alg.cra_list),
+ .cra_init = cprng_init,
+ .cra_exit = cprng_exit,
+ .cra_u = {
+ .rng = {
+ .rng_make_random = fips_cprng_get_random,
+ .rng_reset = fips_cprng_reset,
+ .seedsize = DEFAULT_PRNG_KSZ + 2*DEFAULT_BLK_SZ,
+ }
+ }
+};
+#endif
/* Module initalization */
static int __init prng_mod_init(void)
{
- if (fips_enabled)
- rng_alg.cra_priority += 200;
+ int rc = 0;
- return crypto_register_alg(&rng_alg);
+ rc = crypto_register_alg(&rng_alg);
+#ifdef CONFIG_CRYPTO_FIPS
+ if (rc)
+ goto out;
+
+ rc = crypto_register_alg(&fips_rng_alg);
+
+out:
+#endif
+ return rc;
}
static void __exit prng_mod_fini(void)
{
crypto_unregister_alg(&rng_alg);
+#ifdef CONFIG_CRYPTO_FIPS
+ crypto_unregister_alg(&fips_rng_alg);
+#endif
return;
}
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index 35335825a4e..f8ae0d94a64 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -711,6 +711,13 @@ struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm)
}
EXPORT_SYMBOL_GPL(cryptd_ahash_child);
+struct shash_desc *cryptd_shash_desc(struct ahash_request *req)
+{
+ struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
+ return &rctx->desc;
+}
+EXPORT_SYMBOL_GPL(cryptd_shash_desc);
+
void cryptd_free_ahash(struct cryptd_ahash *tfm)
{
crypto_free_ahash(&tfm->base);
diff --git a/crypto/digest.c b/crypto/digest.c
deleted file mode 100644
index 5d3f1303da9..00000000000
--- a/crypto/digest.c
+++ /dev/null
@@ -1,240 +0,0 @@
-/*
- * Cryptographic API.
- *
- * Digest operations.
- *
- * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- */
-
-#include <crypto/internal/hash.h>
-#include <crypto/scatterwalk.h>
-#include <linux/mm.h>
-#include <linux/errno.h>
-#include <linux/hardirq.h>
-#include <linux/highmem.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/scatterlist.h>
-
-#include "internal.h"
-
-static int init(struct hash_desc *desc)
-{
- struct crypto_tfm *tfm = crypto_hash_tfm(desc->tfm);
-
- tfm->__crt_alg->cra_digest.dia_init(tfm);
- return 0;
-}
-
-static int update2(struct hash_desc *desc,
- struct scatterlist *sg, unsigned int nbytes)
-{
- struct crypto_tfm *tfm = crypto_hash_tfm(desc->tfm);
- unsigned int alignmask = crypto_tfm_alg_alignmask(tfm);
-
- if (!nbytes)
- return 0;
-
- for (;;) {
- struct page *pg = sg_page(sg);
- unsigned int offset = sg->offset;
- unsigned int l = sg->length;
-
- if (unlikely(l > nbytes))
- l = nbytes;
- nbytes -= l;
-
- do {
- unsigned int bytes_from_page = min(l, ((unsigned int)
- (PAGE_SIZE)) -
- offset);
- char *src = crypto_kmap(pg, 0);
- char *p = src + offset;
-
- if (unlikely(offset & alignmask)) {
- unsigned int bytes =
- alignmask + 1 - (offset & alignmask);
- bytes = min(bytes, bytes_from_page);
- tfm->__crt_alg->cra_digest.dia_update(tfm, p,
- bytes);
- p += bytes;
- bytes_from_page -= bytes;
- l -= bytes;
- }
- tfm->__crt_alg->cra_digest.dia_update(tfm, p,
- bytes_from_page);
- crypto_kunmap(src, 0);
- crypto_yield(desc->flags);
- offset = 0;
- pg++;
- l -= bytes_from_page;
- } while (l > 0);
-
- if (!nbytes)
- break;
- sg = scatterwalk_sg_next(sg);
- }
-
- return 0;
-}
-
-static int update(struct hash_desc *desc,
- struct scatterlist *sg, unsigned int nbytes)
-{
- if (WARN_ON_ONCE(in_irq()))
- return -EDEADLK;
- return update2(desc, sg, nbytes);
-}
-
-static int final(struct hash_desc *desc, u8 *out)
-{
- struct crypto_tfm *tfm = crypto_hash_tfm(desc->tfm);
- unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
- struct digest_alg *digest = &tfm->__crt_alg->cra_digest;
-
- if (unlikely((unsigned long)out & alignmask)) {
- unsigned long align = alignmask + 1;
- unsigned long addr = (unsigned long)crypto_tfm_ctx(tfm);
- u8 *dst = (u8 *)ALIGN(addr, align) +
- ALIGN(tfm->__crt_alg->cra_ctxsize, align);
-
- digest->dia_final(tfm, dst);
- memcpy(out, dst, digest->dia_digestsize);
- } else
- digest->dia_final(tfm, out);
-
- return 0;
-}
-
-static int nosetkey(struct crypto_hash *tfm, const u8 *key, unsigned int keylen)
-{
- crypto_hash_clear_flags(tfm, CRYPTO_TFM_RES_MASK);
- return -ENOSYS;
-}
-
-static int setkey(struct crypto_hash *hash, const u8 *key, unsigned int keylen)
-{
- struct crypto_tfm *tfm = crypto_hash_tfm(hash);
-
- crypto_hash_clear_flags(hash, CRYPTO_TFM_RES_MASK);
- return tfm->__crt_alg->cra_digest.dia_setkey(tfm, key, keylen);
-}
-
-static int digest(struct hash_desc *desc,
- struct scatterlist *sg, unsigned int nbytes, u8 *out)
-{
- if (WARN_ON_ONCE(in_irq()))
- return -EDEADLK;
-
- init(desc);
- update2(desc, sg, nbytes);
- return final(desc, out);
-}
-
-int crypto_init_digest_ops(struct crypto_tfm *tfm)
-{
- struct hash_tfm *ops = &tfm->crt_hash;
- struct digest_alg *dalg = &tfm->__crt_alg->cra_digest;
-
- if (dalg->dia_digestsize > PAGE_SIZE / 8)
- return -EINVAL;
-
- ops->init = init;
- ops->update = update;
- ops->final = final;
- ops->digest = digest;
- ops->setkey = dalg->dia_setkey ? setkey : nosetkey;
- ops->digestsize = dalg->dia_digestsize;
-
- return 0;
-}
-
-void crypto_exit_digest_ops(struct crypto_tfm *tfm)
-{
-}
-
-static int digest_async_nosetkey(struct crypto_ahash *tfm_async, const u8 *key,
- unsigned int keylen)
-{
- crypto_ahash_clear_flags(tfm_async, CRYPTO_TFM_RES_MASK);
- return -ENOSYS;
-}
-
-static int digest_async_setkey(struct crypto_ahash *tfm_async, const u8 *key,
- unsigned int keylen)
-{
- struct crypto_tfm *tfm = crypto_ahash_tfm(tfm_async);
- struct digest_alg *dalg = &tfm->__crt_alg->cra_digest;
-
- crypto_ahash_clear_flags(tfm_async, CRYPTO_TFM_RES_MASK);
- return dalg->dia_setkey(tfm, key, keylen);
-}
-
-static int digest_async_init(struct ahash_request *req)
-{
- struct crypto_tfm *tfm = req->base.tfm;
- struct digest_alg *dalg = &tfm->__crt_alg->cra_digest;
-
- dalg->dia_init(tfm);
- return 0;
-}
-
-static int digest_async_update(struct ahash_request *req)
-{
- struct crypto_tfm *tfm = req->base.tfm;
- struct hash_desc desc = {
- .tfm = __crypto_hash_cast(tfm),
- .flags = req->base.flags,
- };
-
- update(&desc, req->src, req->nbytes);
- return 0;
-}
-
-static int digest_async_final(struct ahash_request *req)
-{
- struct crypto_tfm *tfm = req->base.tfm;
- struct hash_desc desc = {
- .tfm = __crypto_hash_cast(tfm),
- .flags = req->base.flags,
- };
-
- final(&desc, req->result);
- return 0;
-}
-
-static int digest_async_digest(struct ahash_request *req)
-{
- struct crypto_tfm *tfm = req->base.tfm;
- struct hash_desc desc = {
- .tfm = __crypto_hash_cast(tfm),
- .flags = req->base.flags,
- };
-
- return digest(&desc, req->src, req->nbytes, req->result);
-}
-
-int crypto_init_digest_ops_async(struct crypto_tfm *tfm)
-{
- struct ahash_tfm *crt = &tfm->crt_ahash;
- struct digest_alg *dalg = &tfm->__crt_alg->cra_digest;
-
- if (dalg->dia_digestsize > PAGE_SIZE / 8)
- return -EINVAL;
-
- crt->init = digest_async_init;
- crt->update = digest_async_update;
- crt->final = digest_async_final;
- crt->digest = digest_async_digest;
- crt->setkey = dalg->dia_setkey ? digest_async_setkey :
- digest_async_nosetkey;
- crt->digestsize = dalg->dia_digestsize;
-
- return 0;
-}
diff --git a/crypto/hash.c b/crypto/hash.c
deleted file mode 100644
index cb86b19fd10..00000000000
--- a/crypto/hash.c
+++ /dev/null
@@ -1,183 +0,0 @@
-/*
- * Cryptographic Hash operations.
- *
- * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- */
-
-#include <crypto/internal/hash.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/seq_file.h>
-
-#include "internal.h"
-
-static unsigned int crypto_hash_ctxsize(struct crypto_alg *alg, u32 type,
- u32 mask)
-{
- return alg->cra_ctxsize;
-}
-
-static int hash_setkey_unaligned(struct crypto_hash *crt, const u8 *key,
- unsigned int keylen)
-{
- struct crypto_tfm *tfm = crypto_hash_tfm(crt);
- struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
- unsigned long alignmask = crypto_hash_alignmask(crt);
- int ret;
- u8 *buffer, *alignbuffer;
- unsigned long absize;
-
- absize = keylen + alignmask;
- buffer = kmalloc(absize, GFP_ATOMIC);
- if (!buffer)
- return -ENOMEM;
-
- alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
- memcpy(alignbuffer, key, keylen);
- ret = alg->setkey(crt, alignbuffer, keylen);
- memset(alignbuffer, 0, keylen);
- kfree(buffer);
- return ret;
-}
-
-static int hash_setkey(struct crypto_hash *crt, const u8 *key,
- unsigned int keylen)
-{
- struct crypto_tfm *tfm = crypto_hash_tfm(crt);
- struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
- unsigned long alignmask = crypto_hash_alignmask(crt);
-
- if ((unsigned long)key & alignmask)
- return hash_setkey_unaligned(crt, key, keylen);
-
- return alg->setkey(crt, key, keylen);
-}
-
-static int hash_async_setkey(struct crypto_ahash *tfm_async, const u8 *key,
- unsigned int keylen)
-{
- struct crypto_tfm *tfm = crypto_ahash_tfm(tfm_async);
- struct crypto_hash *tfm_hash = __crypto_hash_cast(tfm);
- struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
-
- return alg->setkey(tfm_hash, key, keylen);
-}
-
-static int hash_async_init(struct ahash_request *req)
-{
- struct crypto_tfm *tfm = req->base.tfm;
- struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
- struct hash_desc desc = {
- .tfm = __crypto_hash_cast(tfm),
- .flags = req->base.flags,
- };
-
- return alg->init(&desc);
-}
-
-static int hash_async_update(struct ahash_request *req)
-{
- struct crypto_tfm *tfm = req->base.tfm;
- struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
- struct hash_desc desc = {
- .tfm = __crypto_hash_cast(tfm),
- .flags = req->base.flags,
- };
-
- return alg->update(&desc, req->src, req->nbytes);
-}
-
-static int hash_async_final(struct ahash_request *req)
-{
- struct crypto_tfm *tfm = req->base.tfm;
- struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
- struct hash_desc desc = {
- .tfm = __crypto_hash_cast(tfm),
- .flags = req->base.flags,
- };
-
- return alg->final(&desc, req->result);
-}
-
-static int hash_async_digest(struct ahash_request *req)
-{
- struct crypto_tfm *tfm = req->base.tfm;
- struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
- struct hash_desc desc = {
- .tfm = __crypto_hash_cast(tfm),
- .flags = req->base.flags,
- };
-
- return alg->digest(&desc, req->src, req->nbytes, req->result);
-}
-
-static int crypto_init_hash_ops_async(struct crypto_tfm *tfm)
-{
- struct ahash_tfm *crt = &tfm->crt_ahash;
- struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
-
- crt->init = hash_async_init;
- crt->update = hash_async_update;
- crt->final = hash_async_final;
- crt->digest = hash_async_digest;
- crt->setkey = hash_async_setkey;
- crt->digestsize = alg->digestsize;
-
- return 0;
-}
-
-static int crypto_init_hash_ops_sync(struct crypto_tfm *tfm)
-{
- struct hash_tfm *crt = &tfm->crt_hash;
- struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
-
- crt->init = alg->init;
- crt->update = alg->update;
- crt->final = alg->final;
- crt->digest = alg->digest;
- crt->setkey = hash_setkey;
- crt->digestsize = alg->digestsize;
-
- return 0;
-}
-
-static int crypto_init_hash_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
-{
- struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
-
- if (alg->digestsize > PAGE_SIZE / 8)
- return -EINVAL;
-
- if ((mask & CRYPTO_ALG_TYPE_HASH_MASK) != CRYPTO_ALG_TYPE_HASH_MASK)
- return crypto_init_hash_ops_async(tfm);
- else
- return crypto_init_hash_ops_sync(tfm);
-}
-
-static void crypto_hash_show(struct seq_file *m, struct crypto_alg *alg)
- __attribute__ ((unused));
-static void crypto_hash_show(struct seq_file *m, struct crypto_alg *alg)
-{
- seq_printf(m, "type : hash\n");
- seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
- seq_printf(m, "digestsize : %u\n", alg->cra_hash.digestsize);
-}
-
-const struct crypto_type crypto_hash_type = {
- .ctxsize = crypto_hash_ctxsize,
- .init = crypto_init_hash_ops,
-#ifdef CONFIG_PROC_FS
- .show = crypto_hash_show,
-#endif
-};
-EXPORT_SYMBOL_GPL(crypto_hash_type);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Generic cryptographic hash type");
diff --git a/crypto/proc.c b/crypto/proc.c
index 1c38733c224..58fef67d4f4 100644
--- a/crypto/proc.c
+++ b/crypto/proc.c
@@ -109,13 +109,6 @@ static int c_show(struct seq_file *m, void *p)
seq_printf(m, "max keysize : %u\n",
alg->cra_cipher.cia_max_keysize);
break;
-
- case CRYPTO_ALG_TYPE_DIGEST:
- seq_printf(m, "type : digest\n");
- seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
- seq_printf(m, "digestsize : %u\n",
- alg->cra_digest.dia_digestsize);
- break;
case CRYPTO_ALG_TYPE_COMPRESS:
seq_printf(m, "type : compression\n");
break;
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 6d5b746637b..7620bfce92f 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -1201,7 +1201,7 @@ static int test_cprng(struct crypto_rng *tfm, struct cprng_testvec *template,
unsigned int tcount)
{
const char *algo = crypto_tfm_alg_driver_name(crypto_rng_tfm(tfm));
- int err, i, j, seedsize;
+ int err = 0, i, j, seedsize;
u8 *seed;
char result[32];
@@ -1943,6 +1943,15 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}
}, {
+ .alg = "ghash",
+ .test = alg_test_hash,
+ .suite = {
+ .hash = {
+ .vecs = ghash_tv_template,
+ .count = GHASH_TEST_VECTORS
+ }
+ }
+ }, {
.alg = "hmac(md5)",
.test = alg_test_hash,
.suite = {
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index 9963b18983a..fb765173d41 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -1003,6 +1003,21 @@ static struct hash_testvec tgr128_tv_template[] = {
},
};
+#define GHASH_TEST_VECTORS 1
+
+static struct hash_testvec ghash_tv_template[] =
+{
+ {
+
+ .key = "\xdf\xa6\xbf\x4d\xed\x81\xdb\x03\xff\xca\xff\x95\xf8\x30\xf0\x61",
+ .ksize = 16,
+ .plaintext = "\x95\x2b\x2a\x56\xa5\x60\x04a\xc0\xb3\x2b\x66\x56\xa0\x5b\x40\xb6",
+ .psize = 16,
+ .digest = "\xda\x53\xeb\x0a\xd2\xc5\x5b\xb6"
+ "\x4f\xc4\x80\x2c\xc3\xfe\xda\x60",
+ },
+};
+
/*
* HMAC-MD5 test vectors from RFC2202
* (These need to be fixed to not use strlen).
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index a762283d2a2..e789e6c9a42 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -214,7 +214,7 @@ static void gdrom_spicommand(void *spi_string, int buflen)
gdrom_getsense(NULL);
return;
}
- outsw(PHYSADDR(GDROM_DATA_REG), cmd, 6);
+ outsw(GDROM_DATA_REG, cmd, 6);
}
@@ -298,7 +298,7 @@ static int gdrom_readtoc_cmd(struct gdromtoc *toc, int session)
err = -EINVAL;
goto cleanup_readtoc;
}
- insw(PHYSADDR(GDROM_DATA_REG), toc, tocsize/2);
+ insw(GDROM_DATA_REG, toc, tocsize/2);
if (gd.status & 0x01)
err = -EINVAL;
@@ -449,7 +449,7 @@ static int gdrom_getsense(short *bufstring)
GDROM_DEFAULT_TIMEOUT);
if (gd.pending)
goto cleanup_sense;
- insw(PHYSADDR(GDROM_DATA_REG), &sense, sense_command->buflen/2);
+ insw(GDROM_DATA_REG, &sense, sense_command->buflen/2);
if (sense[1] & 40) {
printk(KERN_INFO "GDROM: Drive not ready - command aborted\n");
goto cleanup_sense;
@@ -586,7 +586,7 @@ static void gdrom_readdisk_dma(struct work_struct *work)
spin_unlock(&gdrom_lock);
block = blk_rq_pos(req)/GD_TO_BLK + GD_SESSION_OFFSET;
block_cnt = blk_rq_sectors(req)/GD_TO_BLK;
- ctrl_outl(PHYSADDR(req->buffer), GDROM_DMA_STARTADDR_REG);
+ ctrl_outl(virt_to_phys(req->buffer), GDROM_DMA_STARTADDR_REG);
ctrl_outl(block_cnt * GDROM_HARD_SECTOR, GDROM_DMA_LENGTH_REG);
ctrl_outl(1, GDROM_DMA_DIRECTION_REG);
ctrl_outl(1, GDROM_DMA_ENABLE_REG);
@@ -615,7 +615,7 @@ static void gdrom_readdisk_dma(struct work_struct *work)
cpu_relax();
gd.pending = 1;
gd.transfer = 1;
- outsw(PHYSADDR(GDROM_DATA_REG), &read_command->cmd, 6);
+ outsw(GDROM_DATA_REG, &read_command->cmd, 6);
timeout = jiffies + HZ / 2;
/* Wait for any pending DMA to finish */
while (ctrl_inb(GDROM_DMA_STATUS_REG) &&
diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
index a96f3197e60..43412c03969 100644
--- a/drivers/char/agp/frontend.c
+++ b/drivers/char/agp/frontend.c
@@ -676,25 +676,25 @@ static int agp_open(struct inode *inode, struct file *file)
int minor = iminor(inode);
struct agp_file_private *priv;
struct agp_client *client;
- int rc = -ENXIO;
-
- lock_kernel();
- mutex_lock(&(agp_fe.agp_mutex));
if (minor != AGPGART_MINOR)
- goto err_out;
+ return -ENXIO;
+
+ mutex_lock(&(agp_fe.agp_mutex));
priv = kzalloc(sizeof(struct agp_file_private), GFP_KERNEL);
- if (priv == NULL)
- goto err_out_nomem;
+ if (priv == NULL) {
+ mutex_unlock(&(agp_fe.agp_mutex));
+ return -ENOMEM;
+ }
set_bit(AGP_FF_ALLOW_CLIENT, &priv->access_flags);
priv->my_pid = current->pid;
- if (capable(CAP_SYS_RAWIO)) {
+ if (capable(CAP_SYS_RAWIO))
/* Root priv, can be controller */
set_bit(AGP_FF_ALLOW_CONTROLLER, &priv->access_flags);
- }
+
client = agp_find_client_by_pid(current->pid);
if (client != NULL) {
@@ -704,16 +704,10 @@ static int agp_open(struct inode *inode, struct file *file)
file->private_data = (void *) priv;
agp_insert_file_private(priv);
DBG("private=%p, client=%p", priv, client);
- mutex_unlock(&(agp_fe.agp_mutex));
- unlock_kernel();
- return 0;
-err_out_nomem:
- rc = -ENOMEM;
-err_out:
mutex_unlock(&(agp_fe.agp_mutex));
- unlock_kernel();
- return rc;
+
+ return 0;
}
diff --git a/drivers/char/cs5535_gpio.c b/drivers/char/cs5535_gpio.c
index 04ba906b488..4d830dc482e 100644
--- a/drivers/char/cs5535_gpio.c
+++ b/drivers/char/cs5535_gpio.c
@@ -17,7 +17,7 @@
#include <linux/cdev.h>
#include <linux/ioport.h>
#include <linux/pci.h>
-#include <linux/smp_lock.h>
+
#include <asm/uaccess.h>
#include <asm/io.h>
@@ -158,7 +158,6 @@ static int cs5535_gpio_open(struct inode *inode, struct file *file)
{
u32 m = iminor(inode);
- cycle_kernel_lock();
/* the mask says which pins are usable by this driver */
if ((mask & (1 << m)) == 0)
return -EINVAL;
diff --git a/drivers/char/efirtc.c b/drivers/char/efirtc.c
index 34d15d54823..26a47dc88f6 100644
--- a/drivers/char/efirtc.c
+++ b/drivers/char/efirtc.c
@@ -27,8 +27,6 @@
* - Add module support
*/
-
-#include <linux/smp_lock.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/miscdevice.h>
@@ -174,13 +172,12 @@ static long efi_rtc_ioctl(struct file *file, unsigned int cmd,
return -EINVAL;
case RTC_RD_TIME:
- lock_kernel();
spin_lock_irqsave(&efi_rtc_lock, flags);
status = efi.get_time(&eft, &cap);
spin_unlock_irqrestore(&efi_rtc_lock,flags);
- unlock_kernel();
+
if (status != EFI_SUCCESS) {
/* should never happen */
printk(KERN_ERR "efitime: can't read time\n");
@@ -202,13 +199,11 @@ static long efi_rtc_ioctl(struct file *file, unsigned int cmd,
convert_to_efi_time(&wtime, &eft);
- lock_kernel();
spin_lock_irqsave(&efi_rtc_lock, flags);
status = efi.set_time(&eft);
spin_unlock_irqrestore(&efi_rtc_lock,flags);
- unlock_kernel();
return status == EFI_SUCCESS ? 0 : -EINVAL;
@@ -224,7 +219,6 @@ static long efi_rtc_ioctl(struct file *file, unsigned int cmd,
convert_to_efi_time(&wtime, &eft);
- lock_kernel();
spin_lock_irqsave(&efi_rtc_lock, flags);
/*
* XXX Fixme:
@@ -235,19 +229,16 @@ static long efi_rtc_ioctl(struct file *file, unsigned int cmd,
status = efi.set_wakeup_time((efi_bool_t)enabled, &eft);
spin_unlock_irqrestore(&efi_rtc_lock,flags);
- unlock_kernel();
return status == EFI_SUCCESS ? 0 : -EINVAL;
case RTC_WKALM_RD:
- lock_kernel();
spin_lock_irqsave(&efi_rtc_lock, flags);
status = efi.get_wakeup_time((efi_bool_t *)&enabled, (efi_bool_t *)&pending, &eft);
spin_unlock_irqrestore(&efi_rtc_lock,flags);
- unlock_kernel();
if (status != EFI_SUCCESS) return -EINVAL;
@@ -277,7 +268,6 @@ static int efi_rtc_open(struct inode *inode, struct file *file)
* We do accept multiple open files at the same time as we
* synchronize on the per call operation.
*/
- cycle_kernel_lock();
return 0;
}
diff --git a/drivers/char/generic_nvram.c b/drivers/char/generic_nvram.c
index ef31738c2cb..fda4181b5e6 100644
--- a/drivers/char/generic_nvram.c
+++ b/drivers/char/generic_nvram.c
@@ -19,7 +19,6 @@
#include <linux/miscdevice.h>
#include <linux/fcntl.h>
#include <linux/init.h>
-#include <linux/smp_lock.h>
#include <asm/uaccess.h>
#include <asm/nvram.h>
#ifdef CONFIG_PPC_PMAC
@@ -32,7 +31,6 @@ static ssize_t nvram_len;
static loff_t nvram_llseek(struct file *file, loff_t offset, int origin)
{
- lock_kernel();
switch (origin) {
case 1:
offset += file->f_pos;
@@ -41,12 +39,11 @@ static loff_t nvram_llseek(struct file *file, loff_t offset, int origin)
offset += nvram_len;
break;
}
- if (offset < 0) {
- unlock_kernel();
+ if (offset < 0)
return -EINVAL;
- }
+
file->f_pos = offset;
- unlock_kernel();
+
return file->f_pos;
}
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 1573aebd54b..e989f67bb61 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -52,7 +52,9 @@
static struct hwrng *current_rng;
static LIST_HEAD(rng_list);
static DEFINE_MUTEX(rng_mutex);
-
+static int data_avail;
+static u8 rng_buffer[SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES]
+ __cacheline_aligned;
static inline int hwrng_init(struct hwrng *rng)
{
@@ -67,19 +69,6 @@ static inline void hwrng_cleanup(struct hwrng *rng)
rng->cleanup(rng);
}
-static inline int hwrng_data_present(struct hwrng *rng, int wait)
-{
- if (!rng->data_present)
- return 1;
- return rng->data_present(rng, wait);
-}
-
-static inline int hwrng_data_read(struct hwrng *rng, u32 *data)
-{
- return rng->data_read(rng, data);
-}
-
-
static int rng_dev_open(struct inode *inode, struct file *filp)
{
/* enforce read-only access to this chrdev */
@@ -87,58 +76,90 @@ static int rng_dev_open(struct inode *inode, struct file *filp)
return -EINVAL;
if (filp->f_mode & FMODE_WRITE)
return -EINVAL;
- cycle_kernel_lock();
+ return 0;
+}
+
+static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
+ int wait) {
+ int present;
+
+ if (rng->read)
+ return rng->read(rng, (void *)buffer, size, wait);
+
+ if (rng->data_present)
+ present = rng->data_present(rng, wait);
+ else
+ present = 1;
+
+ if (present)
+ return rng->data_read(rng, (u32 *)buffer);
+
return 0;
}
static ssize_t rng_dev_read(struct file *filp, char __user *buf,
size_t size, loff_t *offp)
{
- u32 data;
ssize_t ret = 0;
int err = 0;
- int bytes_read;
+ int bytes_read, len;
while (size) {
- err = -ERESTARTSYS;
- if (mutex_lock_interruptible(&rng_mutex))
+ if (mutex_lock_interruptible(&rng_mutex)) {
+ err = -ERESTARTSYS;
goto out;
+ }
+
if (!current_rng) {
- mutex_unlock(&rng_mutex);
err = -ENODEV;
- goto out;
+ goto out_unlock;
}
- bytes_read = 0;
- if (hwrng_data_present(current_rng,
- !(filp->f_flags & O_NONBLOCK)))
- bytes_read = hwrng_data_read(current_rng, &data);
- mutex_unlock(&rng_mutex);
-
- err = -EAGAIN;
- if (!bytes_read && (filp->f_flags & O_NONBLOCK))
- goto out;
- if (bytes_read < 0) {
- err = bytes_read;
- goto out;
+ if (!data_avail) {
+ bytes_read = rng_get_data(current_rng, rng_buffer,
+ sizeof(rng_buffer),
+ !(filp->f_flags & O_NONBLOCK));
+ if (bytes_read < 0) {
+ err = bytes_read;
+ goto out_unlock;
+ }
+ data_avail = bytes_read;
}
- err = -EFAULT;
- while (bytes_read && size) {
- if (put_user((u8)data, buf++))
- goto out;
- size--;
- ret++;
- bytes_read--;
- data >>= 8;
+ if (!data_avail) {
+ if (filp->f_flags & O_NONBLOCK) {
+ err = -EAGAIN;
+ goto out_unlock;
+ }
+ } else {
+ len = data_avail;
+ if (len > size)
+ len = size;
+
+ data_avail -= len;
+
+ if (copy_to_user(buf + ret, rng_buffer + data_avail,
+ len)) {
+ err = -EFAULT;
+ goto out_unlock;
+ }
+
+ size -= len;
+ ret += len;
}
+ mutex_unlock(&rng_mutex);
+
if (need_resched())
schedule_timeout_interruptible(1);
- err = -ERESTARTSYS;
- if (signal_pending(current))
+
+ if (signal_pending(current)) {
+ err = -ERESTARTSYS;
goto out;
+ }
}
+out_unlock:
+ mutex_unlock(&rng_mutex);
out:
return ret ? : err;
}
@@ -280,7 +301,7 @@ int hwrng_register(struct hwrng *rng)
struct hwrng *old_rng, *tmp;
if (rng->name == NULL ||
- rng->data_read == NULL)
+ (rng->data_read == NULL && rng->read == NULL))
goto out;
mutex_lock(&rng_mutex);
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
index 915157fcff9..bdaef8e9402 100644
--- a/drivers/char/hw_random/virtio-rng.c
+++ b/drivers/char/hw_random/virtio-rng.c
@@ -16,6 +16,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
+
#include <linux/err.h>
#include <linux/hw_random.h>
#include <linux/scatterlist.h>
@@ -23,78 +24,64 @@
#include <linux/virtio.h>
#include <linux/virtio_rng.h>
-/* The host will fill any buffer we give it with sweet, sweet randomness. We
- * give it 64 bytes at a time, and the hwrng framework takes it 4 bytes at a
- * time. */
-#define RANDOM_DATA_SIZE 64
-
static struct virtqueue *vq;
-static u32 *random_data;
-static unsigned int data_left;
+static unsigned int data_avail;
static DECLARE_COMPLETION(have_data);
+static bool busy;
static void random_recv_done(struct virtqueue *vq)
{
- unsigned int len;
-
/* We can get spurious callbacks, e.g. shared IRQs + virtio_pci. */
- if (!vq->vq_ops->get_buf(vq, &len))
+ if (!vq->vq_ops->get_buf(vq, &data_avail))
return;
- data_left += len;
complete(&have_data);
}
-static void register_buffer(void)
+/* The host will fill any buffer we give it with sweet, sweet randomness. */
+static void register_buffer(u8 *buf, size_t size)
{
struct scatterlist sg;
- sg_init_one(&sg, random_data+data_left, RANDOM_DATA_SIZE-data_left);
+ sg_init_one(&sg, buf, size);
+
/* There should always be room for one buffer. */
- if (vq->vq_ops->add_buf(vq, &sg, 0, 1, random_data) < 0)
+ if (vq->vq_ops->add_buf(vq, &sg, 0, 1, buf) < 0)
BUG();
+
vq->vq_ops->kick(vq);
}
-/* At least we don't udelay() in a loop like some other drivers. */
-static int virtio_data_present(struct hwrng *rng, int wait)
+static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait)
{
- if (data_left >= sizeof(u32))
- return 1;
-again:
+ if (!busy) {
+ busy = true;
+ init_completion(&have_data);
+ register_buffer(buf, size);
+ }
+
if (!wait)
return 0;
wait_for_completion(&have_data);
- /* Not enough? Re-register. */
- if (unlikely(data_left < sizeof(u32))) {
- register_buffer();
- goto again;
- }
+ busy = false;
- return 1;
+ return data_avail;
}
-/* virtio_data_present() must have succeeded before this is called. */
-static int virtio_data_read(struct hwrng *rng, u32 *data)
+static void virtio_cleanup(struct hwrng *rng)
{
- BUG_ON(data_left < sizeof(u32));
- data_left -= sizeof(u32);
- *data = random_data[data_left / 4];
-
- if (data_left < sizeof(u32)) {
- init_completion(&have_data);
- register_buffer();
- }
- return sizeof(*data);
+ if (busy)
+ wait_for_completion(&have_data);
}
+
static struct hwrng virtio_hwrng = {
- .name = "virtio",
- .data_present = virtio_data_present,
- .data_read = virtio_data_read,
+ .name = "virtio",
+ .cleanup = virtio_cleanup,
+ .read = virtio_read,
};
static int virtrng_probe(struct virtio_device *vdev)
@@ -112,7 +99,6 @@ static int virtrng_probe(struct virtio_device *vdev)
return err;
}
- register_buffer();
return 0;
}
@@ -138,21 +124,11 @@ static struct virtio_driver virtio_rng = {
static int __init init(void)
{
- int err;
-
- random_data = kmalloc(RANDOM_DATA_SIZE, GFP_KERNEL);
- if (!random_data)
- return -ENOMEM;
-
- err = register_virtio_driver(&virtio_rng);
- if (err)
- kfree(random_data);
- return err;
+ return register_virtio_driver(&virtio_rng);
}
static void __exit fini(void)
{
- kfree(random_data);
unregister_virtio_driver(&virtio_rng);
}
module_init(init);
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index a074fceb67d..ad82ec92ebd 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -26,7 +26,6 @@
#include <linux/bootmem.h>
#include <linux/splice.h>
#include <linux/pfn.h>
-#include <linux/smp_lock.h>
#include <asm/uaccess.h>
#include <asm/io.h>
@@ -892,29 +891,23 @@ static int memory_open(struct inode *inode, struct file *filp)
{
int minor;
const struct memdev *dev;
- int ret = -ENXIO;
-
- lock_kernel();
minor = iminor(inode);
if (minor >= ARRAY_SIZE(devlist))
- goto out;
+ return -ENXIO;
dev = &devlist[minor];
if (!dev->fops)
- goto out;
+ return -ENXIO;
filp->f_op = dev->fops;
if (dev->dev_info)
filp->f_mapping->backing_dev_info = dev->dev_info;
if (dev->fops->open)
- ret = dev->fops->open(inode, filp);
- else
- ret = 0;
-out:
- unlock_kernel();
- return ret;
+ return dev->fops->open(inode, filp);
+
+ return 0;
}
static const struct file_operations memory_fops = {
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index 07fa612a58d..96f1cd086dd 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -49,7 +49,6 @@
#include <linux/device.h>
#include <linux/tty.h>
#include <linux/kmod.h>
-#include <linux/smp_lock.h>
/*
* Head entry for the doubly linked miscdevice list
@@ -118,8 +117,7 @@ static int misc_open(struct inode * inode, struct file * file)
struct miscdevice *c;
int err = -ENODEV;
const struct file_operations *old_fops, *new_fops = NULL;
-
- lock_kernel();
+
mutex_lock(&misc_mtx);
list_for_each_entry(c, &misc_list, list) {
@@ -157,7 +155,6 @@ static int misc_open(struct inode * inode, struct file * file)
fops_put(old_fops);
fail:
mutex_unlock(&misc_mtx);
- unlock_kernel();
return err;
}
diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
index 88cee4099be..4008e2ce73c 100644
--- a/drivers/char/nvram.c
+++ b/drivers/char/nvram.c
@@ -38,7 +38,6 @@
#define NVRAM_VERSION "1.3"
#include <linux/module.h>
-#include <linux/smp_lock.h>
#include <linux/nvram.h>
#define PC 1
@@ -111,6 +110,7 @@
#include <linux/spinlock.h>
#include <linux/io.h>
#include <linux/uaccess.h>
+#include <linux/smp_lock.h>
#include <asm/system.h>
@@ -214,7 +214,6 @@ void nvram_set_checksum(void)
static loff_t nvram_llseek(struct file *file, loff_t offset, int origin)
{
- lock_kernel();
switch (origin) {
case 0:
/* nothing to do */
@@ -226,7 +225,7 @@ static loff_t nvram_llseek(struct file *file, loff_t offset, int origin)
offset += NVRAM_BYTES;
break;
}
- unlock_kernel();
+
return (offset >= 0) ? (file->f_pos = offset) : -EINVAL;
}
diff --git a/drivers/char/pc8736x_gpio.c b/drivers/char/pc8736x_gpio.c
index 3f7da8cf3a8..8ecbcc174c1 100644
--- a/drivers/char/pc8736x_gpio.c
+++ b/drivers/char/pc8736x_gpio.c
@@ -20,7 +20,6 @@
#include <linux/mutex.h>
#include <linux/nsc_gpio.h>
#include <linux/platform_device.h>
-#include <linux/smp_lock.h>
#include <asm/uaccess.h>
#define DEVNAME "pc8736x_gpio"
@@ -223,7 +222,6 @@ static int pc8736x_gpio_open(struct inode *inode, struct file *file)
unsigned m = iminor(inode);
file->private_data = &pc8736x_gpio_ops;
- cycle_kernel_lock();
dev_dbg(&pdev->dev, "open %d\n", m);
if (m >= PC8736X_GPIO_CT)
diff --git a/drivers/char/scx200_gpio.c b/drivers/char/scx200_gpio.c
index 1d9100561c8..99e5272e3c5 100644
--- a/drivers/char/scx200_gpio.c
+++ b/drivers/char/scx200_gpio.c
@@ -12,7 +12,6 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
-#include <linux/smp_lock.h>
#include <asm/uaccess.h>
#include <asm/io.h>
@@ -52,7 +51,6 @@ static int scx200_gpio_open(struct inode *inode, struct file *file)
unsigned m = iminor(inode);
file->private_data = &scx200_gpio_ops;
- cycle_kernel_lock();
if (m >= MAX_PINS)
return -EINVAL;
return nonseekable_open(inode, file);
diff --git a/drivers/char/tb0219.c b/drivers/char/tb0219.c
index b3ec9b10e29..cad4eb65f13 100644
--- a/drivers/char/tb0219.c
+++ b/drivers/char/tb0219.c
@@ -21,7 +21,6 @@
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/smp_lock.h>
#include <asm/io.h>
#include <asm/reboot.h>
@@ -38,7 +37,7 @@ MODULE_PARM_DESC(major, "Major device number");
static void (*old_machine_restart)(char *command);
static void __iomem *tb0219_base;
-static spinlock_t tb0219_lock;
+static DEFINE_SPINLOCK(tb0219_lock);
#define tb0219_read(offset) readw(tb0219_base + (offset))
#define tb0219_write(offset, value) writew((value), tb0219_base + (offset))
@@ -237,7 +236,6 @@ static int tanbac_tb0219_open(struct inode *inode, struct file *file)
{
unsigned int minor;
- cycle_kernel_lock();
minor = iminor(inode);
switch (minor) {
case 0:
@@ -306,8 +304,6 @@ static int __devinit tb0219_probe(struct platform_device *dev)
return retval;
}
- spin_lock_init(&tb0219_lock);
-
old_machine_restart = _machine_restart;
_machine_restart = tb0219_restart;
diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c
index 40dbe54056c..73933a41ce8 100644
--- a/drivers/infiniband/hw/ipath/ipath_file_ops.c
+++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c
@@ -1821,7 +1821,6 @@ done:
static int ipath_open(struct inode *in, struct file *fp)
{
/* The real work is performed later in ipath_assign_port() */
- cycle_kernel_lock();
fp->private_data = kzalloc(sizeof(struct ipath_filedata), GFP_KERNEL);
return fp->private_data ? 0 : -ENOMEM;
}
diff --git a/drivers/input/keyboard/sh_keysc.c b/drivers/input/keyboard/sh_keysc.c
index 887af79b7bf..076111fc72d 100644
--- a/drivers/input/keyboard/sh_keysc.c
+++ b/drivers/input/keyboard/sh_keysc.c
@@ -18,9 +18,9 @@
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/input.h>
+#include <linux/input/sh_keysc.h>
#include <linux/clk.h>
#include <linux/io.h>
-#include <asm/sh_keysc.h>
#define KYCR1_OFFS 0x00
#define KYCR2_OFFS 0x04
diff --git a/drivers/input/misc/hp_sdc_rtc.c b/drivers/input/misc/hp_sdc_rtc.c
index ea821b54696..ad730e15afc 100644
--- a/drivers/input/misc/hp_sdc_rtc.c
+++ b/drivers/input/misc/hp_sdc_rtc.c
@@ -35,7 +35,6 @@
#include <linux/hp_sdc.h>
#include <linux/errno.h>
-#include <linux/smp_lock.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/module.h>
@@ -409,7 +408,6 @@ static unsigned int hp_sdc_rtc_poll(struct file *file, poll_table *wait)
static int hp_sdc_rtc_open(struct inode *inode, struct file *file)
{
- cycle_kernel_lock();
return 0;
}
diff --git a/drivers/macintosh/ans-lcd.c b/drivers/macintosh/ans-lcd.c
index 6a822189325..a3d25da2f27 100644
--- a/drivers/macintosh/ans-lcd.c
+++ b/drivers/macintosh/ans-lcd.c
@@ -3,7 +3,6 @@
*/
#include <linux/types.h>
-#include <linux/smp_lock.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/miscdevice.h>
@@ -26,6 +25,7 @@
static unsigned long anslcd_short_delay = 80;
static unsigned long anslcd_long_delay = 3280;
static volatile unsigned char __iomem *anslcd_ptr;
+static DEFINE_MUTEX(anslcd_mutex);
#undef DEBUG
@@ -65,26 +65,31 @@ anslcd_write( struct file * file, const char __user * buf,
if (!access_ok(VERIFY_READ, buf, count))
return -EFAULT;
+
+ mutex_lock(&anslcd_mutex);
for ( i = *ppos; count > 0; ++i, ++p, --count )
{
char c;
__get_user(c, p);
anslcd_write_byte_data( c );
}
+ mutex_unlock(&anslcd_mutex);
*ppos = i;
return p - buf;
}
-static int
-anslcd_ioctl( struct inode * inode, struct file * file,
- unsigned int cmd, unsigned long arg )
+static long
+anslcd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
char ch, __user *temp;
+ long ret = 0;
#ifdef DEBUG
printk(KERN_DEBUG "LCD: ioctl(%d,%d)\n",cmd,arg);
#endif
+ mutex_lock(&anslcd_mutex);
+
switch ( cmd )
{
case ANSLCD_CLEAR:
@@ -93,7 +98,7 @@ anslcd_ioctl( struct inode * inode, struct file * file,
anslcd_write_byte_ctrl ( 0x06 );
anslcd_write_byte_ctrl ( 0x01 );
anslcd_write_byte_ctrl ( 0x02 );
- return 0;
+ break;
case ANSLCD_SENDCTRL:
temp = (char __user *) arg;
__get_user(ch, temp);
@@ -101,33 +106,37 @@ anslcd_ioctl( struct inode * inode, struct file * file,
anslcd_write_byte_ctrl ( ch );
__get_user(ch, temp);
}
- return 0;
+ break;
case ANSLCD_SETSHORTDELAY:
if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
- anslcd_short_delay=arg;
- return 0;
+ ret =-EACCES;
+ else
+ anslcd_short_delay=arg;
+ break;
case ANSLCD_SETLONGDELAY:
if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
- anslcd_long_delay=arg;
- return 0;
+ ret = -EACCES;
+ else
+ anslcd_long_delay=arg;
+ break;
default:
- return -EINVAL;
+ ret = -EINVAL;
}
+
+ mutex_unlock(&anslcd_mutex);
+ return ret;
}
static int
anslcd_open( struct inode * inode, struct file * file )
{
- cycle_kernel_lock();
return 0;
}
const struct file_operations anslcd_fops = {
- .write = anslcd_write,
- .ioctl = anslcd_ioctl,
- .open = anslcd_open,
+ .write = anslcd_write,
+ .unlocked_ioctl = anslcd_ioctl,
+ .open = anslcd_open,
};
static struct miscdevice anslcd_dev = {
@@ -168,6 +177,7 @@ anslcd_init(void)
printk(KERN_DEBUG "LCD: init\n");
#endif
+ mutex_lock(&anslcd_mutex);
anslcd_write_byte_ctrl ( 0x38 );
anslcd_write_byte_ctrl ( 0x0c );
anslcd_write_byte_ctrl ( 0x06 );
@@ -176,6 +186,7 @@ anslcd_init(void)
for(a=0;a<80;a++) {
anslcd_write_byte_data(anslcd_logo[a]);
}
+ mutex_unlock(&anslcd_mutex);
return 0;
}
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 08f2d07bf56..a296e717e86 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -35,6 +35,14 @@ config MFD_ASIC3
This driver supports the ASIC3 multifunction chip found on many
PDAs (mainly iPAQ and HTC based ones)
+config MFD_SH_MOBILE_SDHI
+ bool "Support for SuperH Mobile SDHI"
+ depends on SUPERH
+ select MFD_CORE
+ ---help---
+ This driver supports the SDHI hardware block found in many
+ SuperH Mobile SoCs.
+
config MFD_DM355EVM_MSP
bool "DaVinci DM355 EVM microcontroller"
depends on I2C && MACH_DAVINCI_DM355_EVM
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index af0fc903cec..11350c1d930 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -4,6 +4,7 @@
obj-$(CONFIG_MFD_SM501) += sm501.o
obj-$(CONFIG_MFD_ASIC3) += asic3.o
+obj-$(CONFIG_MFD_SH_MOBILE_SDHI) += sh_mobile_sdhi.o
obj-$(CONFIG_HTC_EGPIO) += htc-egpio.o
obj-$(CONFIG_HTC_PASIC3) += htc-pasic3.o
diff --git a/drivers/mfd/sh_mobile_sdhi.c b/drivers/mfd/sh_mobile_sdhi.c
new file mode 100644
index 00000000000..03efae8041a
--- /dev/null
+++ b/drivers/mfd/sh_mobile_sdhi.c
@@ -0,0 +1,156 @@
+/*
+ * SuperH Mobile SDHI
+ *
+ * Copyright (C) 2009 Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Based on "Compaq ASIC3 support":
+ *
+ * Copyright 2001 Compaq Computer Corporation.
+ * Copyright 2004-2005 Phil Blundell
+ * Copyright 2007-2008 OpenedHand Ltd.
+ *
+ * Authors: Phil Blundell <pb@handhelds.org>,
+ * Samuel Ortiz <sameo@openedhand.com>
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+
+#include <linux/mfd/core.h>
+#include <linux/mfd/tmio.h>
+#include <linux/mfd/sh_mobile_sdhi.h>
+
+struct sh_mobile_sdhi {
+ struct clk *clk;
+ struct tmio_mmc_data mmc_data;
+ struct mfd_cell cell_mmc;
+};
+
+static struct resource sh_mobile_sdhi_resources[] = {
+ {
+ .start = 0x000,
+ .end = 0x1ff,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct mfd_cell sh_mobile_sdhi_cell = {
+ .name = "tmio-mmc",
+ .num_resources = ARRAY_SIZE(sh_mobile_sdhi_resources),
+ .resources = sh_mobile_sdhi_resources,
+};
+
+static void sh_mobile_sdhi_set_pwr(struct platform_device *tmio, int state)
+{
+ struct platform_device *pdev = to_platform_device(tmio->dev.parent);
+ struct sh_mobile_sdhi_info *p = pdev->dev.platform_data;
+
+ if (p && p->set_pwr)
+ p->set_pwr(pdev, state);
+}
+
+static int __init sh_mobile_sdhi_probe(struct platform_device *pdev)
+{
+ struct sh_mobile_sdhi *priv;
+ struct resource *mem;
+ char clk_name[8];
+ int ret, irq;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem)
+ dev_err(&pdev->dev, "missing MEM resource\n");
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ dev_err(&pdev->dev, "missing IRQ resource\n");
+
+ if (!mem || (irq < 0))
+ return -EINVAL;
+
+ priv = kzalloc(sizeof(struct sh_mobile_sdhi), GFP_KERNEL);
+ if (priv == NULL) {
+ dev_err(&pdev->dev, "kzalloc failed\n");
+ return -ENOMEM;
+ }
+
+ snprintf(clk_name, sizeof(clk_name), "sdhi%d", pdev->id);
+ priv->clk = clk_get(&pdev->dev, clk_name);
+ if (IS_ERR(priv->clk)) {
+ dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name);
+ ret = PTR_ERR(priv->clk);
+ kfree(priv);
+ return ret;
+ }
+
+ clk_enable(priv->clk);
+
+ /* FIXME: silly const unsigned int hclk */
+ *(unsigned int *)&priv->mmc_data.hclk = clk_get_rate(priv->clk);
+ priv->mmc_data.set_pwr = sh_mobile_sdhi_set_pwr;
+
+ memcpy(&priv->cell_mmc, &sh_mobile_sdhi_cell, sizeof(priv->cell_mmc));
+ priv->cell_mmc.driver_data = &priv->mmc_data;
+ priv->cell_mmc.platform_data = &priv->cell_mmc;
+ priv->cell_mmc.data_size = sizeof(priv->cell_mmc);
+
+ platform_set_drvdata(pdev, priv);
+
+ ret = mfd_add_devices(&pdev->dev, pdev->id,
+ &priv->cell_mmc, 1, mem, irq);
+ if (ret) {
+ clk_disable(priv->clk);
+ clk_put(priv->clk);
+ kfree(priv);
+ }
+
+ return ret;
+}
+
+static int sh_mobile_sdhi_remove(struct platform_device *pdev)
+{
+ struct sh_mobile_sdhi *priv = platform_get_drvdata(pdev);
+
+ mfd_remove_devices(&pdev->dev);
+ clk_disable(priv->clk);
+ clk_put(priv->clk);
+ kfree(priv);
+
+ return 0;
+}
+
+static struct platform_driver sh_mobile_sdhi_driver = {
+ .driver = {
+ .name = "sh_mobile_sdhi",
+ .owner = THIS_MODULE,
+ },
+ .probe = sh_mobile_sdhi_probe,
+ .remove = __devexit_p(sh_mobile_sdhi_remove),
+};
+
+static int __init sh_mobile_sdhi_init(void)
+{
+ return platform_driver_register(&sh_mobile_sdhi_driver);
+}
+
+static void __exit sh_mobile_sdhi_exit(void)
+{
+ platform_driver_unregister(&sh_mobile_sdhi_driver);
+}
+
+module_init(sh_mobile_sdhi_init);
+module_exit(sh_mobile_sdhi_exit);
+
+MODULE_DESCRIPTION("SuperH Mobile SDHI driver");
+MODULE_AUTHOR("Magnus Damm");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 432ae8358c8..e04b751680d 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -329,7 +329,7 @@ config MMC_SDRICOH_CS
config MMC_TMIO
tristate "Toshiba Mobile IO Controller (TMIO) MMC/SD function support"
- depends on MFD_TMIO || MFD_ASIC3
+ depends on MFD_TMIO || MFD_ASIC3 || SUPERH
help
This provides support for the SD/MMC cell found in TC6393XB,
T7L66XB and also HTC ASIC3
diff --git a/drivers/parisc/eisa_eeprom.c b/drivers/parisc/eisa_eeprom.c
index 8c0b26e9b98..cce00ed81f3 100644
--- a/drivers/parisc/eisa_eeprom.c
+++ b/drivers/parisc/eisa_eeprom.c
@@ -75,17 +75,8 @@ static ssize_t eisa_eeprom_read(struct file * file,
return ret;
}
-static int eisa_eeprom_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd,
- unsigned long arg)
-{
- return -ENOTTY;
-}
-
static int eisa_eeprom_open(struct inode *inode, struct file *file)
{
- cycle_kernel_lock();
-
if (file->f_mode & FMODE_WRITE)
return -EINVAL;
@@ -104,7 +95,6 @@ static const struct file_operations eisa_eeprom_fops = {
.owner = THIS_MODULE,
.llseek = eisa_eeprom_llseek,
.read = eisa_eeprom_read,
- .ioctl = eisa_eeprom_ioctl,
.open = eisa_eeprom_open,
.release = eisa_eeprom_release,
};
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c
index 0ed78a764de..3b3658669be 100644
--- a/drivers/pci/intr_remapping.c
+++ b/drivers/pci/intr_remapping.c
@@ -2,6 +2,7 @@
#include <linux/dmar.h>
#include <linux/spinlock.h>
#include <linux/jiffies.h>
+#include <linux/hpet.h>
#include <linux/pci.h>
#include <linux/irq.h>
#include <asm/io_apic.h>
@@ -14,7 +15,8 @@
#include "pci.h"
static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
-static int ir_ioapic_num;
+static struct hpet_scope ir_hpet[MAX_HPET_TBS];
+static int ir_ioapic_num, ir_hpet_num;
int intr_remapping_enabled;
static int disable_intremap;
@@ -343,6 +345,16 @@ int flush_irte(int irq)
return rc;
}
+struct intel_iommu *map_hpet_to_ir(u8 hpet_id)
+{
+ int i;
+
+ for (i = 0; i < MAX_HPET_TBS; i++)
+ if (ir_hpet[i].id == hpet_id)
+ return ir_hpet[i].iommu;
+ return NULL;
+}
+
struct intel_iommu *map_ioapic_to_ir(int apic)
{
int i;
@@ -470,6 +482,36 @@ int set_ioapic_sid(struct irte *irte, int apic)
return 0;
}
+int set_hpet_sid(struct irte *irte, u8 id)
+{
+ int i;
+ u16 sid = 0;
+
+ if (!irte)
+ return -1;
+
+ for (i = 0; i < MAX_HPET_TBS; i++) {
+ if (ir_hpet[i].id == id) {
+ sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn;
+ break;
+ }
+ }
+
+ if (sid == 0) {
+ pr_warning("Failed to set source-id of HPET block (%d)\n", id);
+ return -1;
+ }
+
+ /*
+ * Should really use SQ_ALL_16. Some platforms are broken.
+ * While we figure out the right quirks for these broken platforms, use
+ * SQ_13_IGNORE_3 for now.
+ */
+ set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_13_IGNORE_3, sid);
+
+ return 0;
+}
+
int set_msi_sid(struct irte *irte, struct pci_dev *dev)
{
struct pci_dev *bridge;
@@ -711,6 +753,34 @@ error:
return -1;
}
+static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope,
+ struct intel_iommu *iommu)
+{
+ struct acpi_dmar_pci_path *path;
+ u8 bus;
+ int count;
+
+ bus = scope->bus;
+ path = (struct acpi_dmar_pci_path *)(scope + 1);
+ count = (scope->length - sizeof(struct acpi_dmar_device_scope))
+ / sizeof(struct acpi_dmar_pci_path);
+
+ while (--count > 0) {
+ /*
+ * Access PCI directly due to the PCI
+ * subsystem isn't initialized yet.
+ */
+ bus = read_pci_config_byte(bus, path->dev, path->fn,
+ PCI_SECONDARY_BUS);
+ path++;
+ }
+ ir_hpet[ir_hpet_num].bus = bus;
+ ir_hpet[ir_hpet_num].devfn = PCI_DEVFN(path->dev, path->fn);
+ ir_hpet[ir_hpet_num].iommu = iommu;
+ ir_hpet[ir_hpet_num].id = scope->enumeration_id;
+ ir_hpet_num++;
+}
+
static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
struct intel_iommu *iommu)
{
@@ -740,8 +810,8 @@ static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
ir_ioapic_num++;
}
-static int ir_parse_ioapic_scope(struct acpi_dmar_header *header,
- struct intel_iommu *iommu)
+static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header,
+ struct intel_iommu *iommu)
{
struct acpi_dmar_hardware_unit *drhd;
struct acpi_dmar_device_scope *scope;
@@ -765,6 +835,17 @@ static int ir_parse_ioapic_scope(struct acpi_dmar_header *header,
drhd->address);
ir_parse_one_ioapic_scope(scope, iommu);
+ } else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET) {
+ if (ir_hpet_num == MAX_HPET_TBS) {
+ printk(KERN_WARNING "Exceeded Max HPET blocks\n");
+ return -1;
+ }
+
+ printk(KERN_INFO "HPET id %d under DRHD base"
+ " 0x%Lx\n", scope->enumeration_id,
+ drhd->address);
+
+ ir_parse_one_hpet_scope(scope, iommu);
}
start += scope->length;
}
@@ -785,7 +866,7 @@ int __init parse_ioapics_under_ir(void)
struct intel_iommu *iommu = drhd->iommu;
if (ecap_ir_support(iommu->ecap)) {
- if (ir_parse_ioapic_scope(drhd->hdr, iommu))
+ if (ir_parse_ioapic_hpet_scope(drhd->hdr, iommu))
return -1;
ir_supported = 1;
diff --git a/drivers/pci/intr_remapping.h b/drivers/pci/intr_remapping.h
index 63a263c1841..5662fecfee6 100644
--- a/drivers/pci/intr_remapping.h
+++ b/drivers/pci/intr_remapping.h
@@ -7,4 +7,11 @@ struct ioapic_scope {
unsigned int devfn; /* PCI devfn number */
};
+struct hpet_scope {
+ struct intel_iommu *iommu;
+ u8 id;
+ unsigned int bus;
+ unsigned int devfn;
+};
+
#define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0)
diff --git a/drivers/rtc/rtc-ds1302.c b/drivers/rtc/rtc-ds1302.c
index d490628b64d..1e73c8f42e3 100644
--- a/drivers/rtc/rtc-ds1302.c
+++ b/drivers/rtc/rtc-ds1302.c
@@ -201,7 +201,7 @@ static struct platform_driver ds1302_platform_driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
},
- .remove = __exit_p(ds1302_rtc_remove),
+ .remove = __devexit_p(ds1302_rtc_remove),
};
static int __init ds1302_rtc_init(void)
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index aaccc8ecfa8..fdb2e7c1450 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -24,7 +24,6 @@
#include <asm/ccwdev.h>
#include <asm/ebcdic.h>
#include <asm/idals.h>
-#include <asm/todclk.h>
#include <asm/itcw.h>
/* This is ugly... */
@@ -64,6 +63,7 @@ static void do_restore_device(struct work_struct *);
static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *);
static void dasd_device_timeout(unsigned long);
static void dasd_block_timeout(unsigned long);
+static void __dasd_process_erp(struct dasd_device *, struct dasd_ccw_req *);
/*
* SECTION: Operations on the device structure.
@@ -960,7 +960,7 @@ static void dasd_device_timeout(unsigned long ptr)
device = (struct dasd_device *) ptr;
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
/* re-activate request queue */
- device->stopped &= ~DASD_STOPPED_PENDING;
+ dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
dasd_schedule_device_bh(device);
}
@@ -994,10 +994,9 @@ static void dasd_handle_killed_request(struct ccw_device *cdev,
return;
cqr = (struct dasd_ccw_req *) intparm;
if (cqr->status != DASD_CQR_IN_IO) {
- DBF_EVENT(DBF_DEBUG,
- "invalid status in handle_killed_request: "
- "bus_id %s, status %02x",
- dev_name(&cdev->dev), cqr->status);
+ DBF_EVENT_DEVID(DBF_DEBUG, cdev,
+ "invalid status in handle_killed_request: "
+ "%02x", cqr->status);
return;
}
@@ -1023,7 +1022,7 @@ void dasd_generic_handle_state_change(struct dasd_device *device)
/* First of all start sense subsystem status request. */
dasd_eer_snss(device);
- device->stopped &= ~DASD_STOPPED_PENDING;
+ dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING);
dasd_schedule_device_bh(device);
if (device->block)
dasd_schedule_block_bh(device->block);
@@ -1045,12 +1044,13 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
case -EIO:
break;
case -ETIMEDOUT:
- DBF_EVENT(DBF_WARNING, "%s(%s): request timed out\n",
- __func__, dev_name(&cdev->dev));
+ DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
+ "request timed out\n", __func__);
break;
default:
- DBF_EVENT(DBF_WARNING, "%s(%s): unknown error %ld\n",
- __func__, dev_name(&cdev->dev), PTR_ERR(irb));
+ DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
+ "unknown error %ld\n", __func__,
+ PTR_ERR(irb));
}
dasd_handle_killed_request(cdev, intparm);
return;
@@ -1405,6 +1405,20 @@ void dasd_schedule_device_bh(struct dasd_device *device)
tasklet_hi_schedule(&device->tasklet);
}
+void dasd_device_set_stop_bits(struct dasd_device *device, int bits)
+{
+ device->stopped |= bits;
+}
+EXPORT_SYMBOL_GPL(dasd_device_set_stop_bits);
+
+void dasd_device_remove_stop_bits(struct dasd_device *device, int bits)
+{
+ device->stopped &= ~bits;
+ if (!device->stopped)
+ wake_up(&generic_waitq);
+}
+EXPORT_SYMBOL_GPL(dasd_device_remove_stop_bits);
+
/*
* Queue a request to the head of the device ccw_queue.
* Start the I/O if possible.
@@ -1465,58 +1479,135 @@ static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr)
}
/*
- * Queue a request to the tail of the device ccw_queue and wait for
- * it's completion.
+ * checks if error recovery is necessary, returns 1 if yes, 0 otherwise.
*/
-int dasd_sleep_on(struct dasd_ccw_req *cqr)
+static int __dasd_sleep_on_erp(struct dasd_ccw_req *cqr)
{
struct dasd_device *device;
- int rc;
+ dasd_erp_fn_t erp_fn;
+ if (cqr->status == DASD_CQR_FILLED)
+ return 0;
device = cqr->startdev;
+ if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) {
+ if (cqr->status == DASD_CQR_TERMINATED) {
+ device->discipline->handle_terminated_request(cqr);
+ return 1;
+ }
+ if (cqr->status == DASD_CQR_NEED_ERP) {
+ erp_fn = device->discipline->erp_action(cqr);
+ erp_fn(cqr);
+ return 1;
+ }
+ if (cqr->status == DASD_CQR_FAILED)
+ dasd_log_sense(cqr, &cqr->irb);
+ if (cqr->refers) {
+ __dasd_process_erp(device, cqr);
+ return 1;
+ }
+ }
+ return 0;
+}
- cqr->callback = dasd_wakeup_cb;
- cqr->callback_data = (void *) &generic_waitq;
- dasd_add_request_tail(cqr);
- wait_event(generic_waitq, _wait_for_wakeup(cqr));
+static int __dasd_sleep_on_loop_condition(struct dasd_ccw_req *cqr)
+{
+ if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) {
+ if (cqr->refers) /* erp is not done yet */
+ return 1;
+ return ((cqr->status != DASD_CQR_DONE) &&
+ (cqr->status != DASD_CQR_FAILED));
+ } else
+ return (cqr->status == DASD_CQR_FILLED);
+}
- if (cqr->status == DASD_CQR_DONE)
+static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible)
+{
+ struct dasd_device *device;
+ int rc;
+ struct list_head ccw_queue;
+ struct dasd_ccw_req *cqr;
+
+ INIT_LIST_HEAD(&ccw_queue);
+ maincqr->status = DASD_CQR_FILLED;
+ device = maincqr->startdev;
+ list_add(&maincqr->blocklist, &ccw_queue);
+ for (cqr = maincqr; __dasd_sleep_on_loop_condition(cqr);
+ cqr = list_first_entry(&ccw_queue,
+ struct dasd_ccw_req, blocklist)) {
+
+ if (__dasd_sleep_on_erp(cqr))
+ continue;
+ if (cqr->status != DASD_CQR_FILLED) /* could be failed */
+ continue;
+
+ /* Non-temporary stop condition will trigger fail fast */
+ if (device->stopped & ~DASD_STOPPED_PENDING &&
+ test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
+ (!dasd_eer_enabled(device))) {
+ cqr->status = DASD_CQR_FAILED;
+ continue;
+ }
+
+ /* Don't try to start requests if device is stopped */
+ if (interruptible) {
+ rc = wait_event_interruptible(
+ generic_waitq, !(device->stopped));
+ if (rc == -ERESTARTSYS) {
+ cqr->status = DASD_CQR_FAILED;
+ maincqr->intrc = rc;
+ continue;
+ }
+ } else
+ wait_event(generic_waitq, !(device->stopped));
+
+ cqr->callback = dasd_wakeup_cb;
+ cqr->callback_data = (void *) &generic_waitq;
+ dasd_add_request_tail(cqr);
+ if (interruptible) {
+ rc = wait_event_interruptible(
+ generic_waitq, _wait_for_wakeup(cqr));
+ if (rc == -ERESTARTSYS) {
+ dasd_cancel_req(cqr);
+ /* wait (non-interruptible) for final status */
+ wait_event(generic_waitq,
+ _wait_for_wakeup(cqr));
+ cqr->status = DASD_CQR_FAILED;
+ maincqr->intrc = rc;
+ continue;
+ }
+ } else
+ wait_event(generic_waitq, _wait_for_wakeup(cqr));
+ }
+
+ maincqr->endclk = get_clock();
+ if ((maincqr->status != DASD_CQR_DONE) &&
+ (maincqr->intrc != -ERESTARTSYS))
+ dasd_log_sense(maincqr, &maincqr->irb);
+ if (maincqr->status == DASD_CQR_DONE)
rc = 0;
- else if (cqr->intrc)
- rc = cqr->intrc;
+ else if (maincqr->intrc)
+ rc = maincqr->intrc;
else
rc = -EIO;
return rc;
}
/*
+ * Queue a request to the tail of the device ccw_queue and wait for
+ * it's completion.
+ */
+int dasd_sleep_on(struct dasd_ccw_req *cqr)
+{
+ return _dasd_sleep_on(cqr, 0);
+}
+
+/*
* Queue a request to the tail of the device ccw_queue and wait
* interruptible for it's completion.
*/
int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr)
{
- struct dasd_device *device;
- int rc;
-
- device = cqr->startdev;
- cqr->callback = dasd_wakeup_cb;
- cqr->callback_data = (void *) &generic_waitq;
- dasd_add_request_tail(cqr);
- rc = wait_event_interruptible(generic_waitq, _wait_for_wakeup(cqr));
- if (rc == -ERESTARTSYS) {
- dasd_cancel_req(cqr);
- /* wait (non-interruptible) for final status */
- wait_event(generic_waitq, _wait_for_wakeup(cqr));
- cqr->intrc = rc;
- }
-
- if (cqr->status == DASD_CQR_DONE)
- rc = 0;
- else if (cqr->intrc)
- rc = cqr->intrc;
- else
- rc = -EIO;
- return rc;
+ return _dasd_sleep_on(cqr, 1);
}
/*
@@ -1630,7 +1721,7 @@ static void dasd_block_timeout(unsigned long ptr)
block = (struct dasd_block *) ptr;
spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags);
/* re-activate request queue */
- block->base->stopped &= ~DASD_STOPPED_PENDING;
+ dasd_device_remove_stop_bits(block->base, DASD_STOPPED_PENDING);
spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags);
dasd_schedule_block_bh(block);
}
@@ -1657,11 +1748,10 @@ void dasd_block_clear_timer(struct dasd_block *block)
/*
* Process finished error recovery ccw.
*/
-static inline void __dasd_block_process_erp(struct dasd_block *block,
- struct dasd_ccw_req *cqr)
+static void __dasd_process_erp(struct dasd_device *device,
+ struct dasd_ccw_req *cqr)
{
dasd_erp_fn_t erp_fn;
- struct dasd_device *device = block->base;
if (cqr->status == DASD_CQR_DONE)
DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful");
@@ -1725,9 +1815,12 @@ static void __dasd_process_request_queue(struct dasd_block *block)
*/
if (!list_empty(&block->ccw_queue))
break;
- spin_lock_irqsave(get_ccwdev_lock(basedev->cdev), flags);
- basedev->stopped |= DASD_STOPPED_PENDING;
- spin_unlock_irqrestore(get_ccwdev_lock(basedev->cdev), flags);
+ spin_lock_irqsave(
+ get_ccwdev_lock(basedev->cdev), flags);
+ dasd_device_set_stop_bits(basedev,
+ DASD_STOPPED_PENDING);
+ spin_unlock_irqrestore(
+ get_ccwdev_lock(basedev->cdev), flags);
dasd_block_set_timer(block, HZ/2);
break;
}
@@ -1813,7 +1906,7 @@ restart:
cqr->status = DASD_CQR_FILLED;
cqr->retries = 255;
spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
- base->stopped |= DASD_STOPPED_QUIESCE;
+ dasd_device_set_stop_bits(base, DASD_STOPPED_QUIESCE);
spin_unlock_irqrestore(get_ccwdev_lock(base->cdev),
flags);
goto restart;
@@ -1821,7 +1914,7 @@ restart:
/* Process finished ERP request. */
if (cqr->refers) {
- __dasd_block_process_erp(block, cqr);
+ __dasd_process_erp(base, cqr);
goto restart;
}
@@ -1952,7 +2045,7 @@ restart_cb:
/* Process finished ERP request. */
if (cqr->refers) {
spin_lock_bh(&block->queue_lock);
- __dasd_block_process_erp(block, cqr);
+ __dasd_process_erp(block->base, cqr);
spin_unlock_bh(&block->queue_lock);
/* restart list_for_xx loop since dasd_process_erp
* might remove multiple elements */
@@ -2208,18 +2301,11 @@ int dasd_generic_probe(struct ccw_device *cdev,
{
int ret;
- ret = ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP);
- if (ret) {
- DBF_EVENT(DBF_WARNING,
- "dasd_generic_probe: could not set ccw-device options "
- "for %s\n", dev_name(&cdev->dev));
- return ret;
- }
ret = dasd_add_sysfs_files(cdev);
if (ret) {
- DBF_EVENT(DBF_WARNING,
- "dasd_generic_probe: could not add sysfs entries "
- "for %s\n", dev_name(&cdev->dev));
+ DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s",
+ "dasd_generic_probe: could not add "
+ "sysfs entries");
return ret;
}
cdev->handler = &dasd_int_handler;
@@ -2418,16 +2504,16 @@ int dasd_generic_notify(struct ccw_device *cdev, int event)
cqr->status = DASD_CQR_QUEUED;
cqr->retries++;
}
- device->stopped |= DASD_STOPPED_DC_WAIT;
+ dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT);
dasd_device_clear_timer(device);
dasd_schedule_device_bh(device);
ret = 1;
break;
case CIO_OPER:
/* FIXME: add a sanity check. */
- device->stopped &= ~DASD_STOPPED_DC_WAIT;
+ dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT);
if (device->stopped & DASD_UNRESUMED_PM) {
- device->stopped &= ~DASD_UNRESUMED_PM;
+ dasd_device_remove_stop_bits(device, DASD_UNRESUMED_PM);
dasd_restore_device(device);
ret = 1;
break;
@@ -2452,7 +2538,7 @@ int dasd_generic_pm_freeze(struct ccw_device *cdev)
if (IS_ERR(device))
return PTR_ERR(device);
/* disallow new I/O */
- device->stopped |= DASD_STOPPED_PM;
+ dasd_device_set_stop_bits(device, DASD_STOPPED_PM);
/* clear active requests */
INIT_LIST_HEAD(&freeze_queue);
spin_lock_irq(get_ccwdev_lock(cdev));
@@ -2504,14 +2590,18 @@ int dasd_generic_restore_device(struct ccw_device *cdev)
return PTR_ERR(device);
/* allow new IO again */
- device->stopped &= ~DASD_STOPPED_PM;
- device->stopped &= ~DASD_UNRESUMED_PM;
+ dasd_device_remove_stop_bits(device,
+ (DASD_STOPPED_PM | DASD_UNRESUMED_PM));
dasd_schedule_device_bh(device);
- if (device->discipline->restore)
+ /*
+ * call discipline restore function
+ * if device is stopped do nothing e.g. for disconnected devices
+ */
+ if (device->discipline->restore && !(device->stopped))
rc = device->discipline->restore(device);
- if (rc)
+ if (rc || device->stopped)
/*
* if the resume failed for the DASD we put it in
* an UNRESUMED stop state
@@ -2561,8 +2651,7 @@ static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
cqr->startdev = device;
cqr->memdev = device;
cqr->expires = 10*HZ;
- clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
- cqr->retries = 2;
+ cqr->retries = 256;
cqr->buildclk = get_clock();
cqr->status = DASD_CQR_FILLED;
return cqr;
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index e8ff7b0c961..44796ba4eb9 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -12,7 +12,6 @@
#include <linux/timer.h>
#include <linux/slab.h>
#include <asm/idals.h>
-#include <asm/todclk.h>
#define PRINTK_HEADER "dasd_erp(3990): "
@@ -70,8 +69,7 @@ dasd_3990_erp_cleanup(struct dasd_ccw_req * erp, char final_status)
* processing until the started timer has expired or an related
* interrupt was received.
*/
-static void
-dasd_3990_erp_block_queue(struct dasd_ccw_req * erp, int expires)
+static void dasd_3990_erp_block_queue(struct dasd_ccw_req *erp, int expires)
{
struct dasd_device *device = erp->startdev;
@@ -81,10 +79,13 @@ dasd_3990_erp_block_queue(struct dasd_ccw_req * erp, int expires)
"blocking request queue for %is", expires/HZ);
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
- device->stopped |= DASD_STOPPED_PENDING;
+ dasd_device_set_stop_bits(device, DASD_STOPPED_PENDING);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
erp->status = DASD_CQR_FILLED;
- dasd_block_set_timer(device->block, expires);
+ if (erp->block)
+ dasd_block_set_timer(erp->block, expires);
+ else
+ dasd_device_set_timer(device, expires);
}
/*
@@ -243,9 +244,13 @@ dasd_3990_erp_DCTL(struct dasd_ccw_req * erp, char modifier)
* DESCRIPTION
* Setup ERP to do the ERP action 1 (see Reference manual).
* Repeat the operation on a different channel path.
- * If all alternate paths have been tried, the request is posted with a
- * permanent error.
- * Note: duplex handling is not implemented (yet).
+ * As deviation from the recommended recovery action, we reset the path mask
+ * after we have tried each path and go through all paths a second time.
+ * This will cover situations where only one path at a time is actually down,
+ * but all paths fail and recover just with the same sequence and timing as
+ * we try to use them (flapping links).
+ * If all alternate paths have been tried twice, the request is posted with
+ * a permanent error.
*
* PARAMETER
* erp pointer to the current ERP
@@ -254,17 +259,25 @@ dasd_3990_erp_DCTL(struct dasd_ccw_req * erp, char modifier)
* erp pointer to the ERP
*
*/
-static struct dasd_ccw_req *
-dasd_3990_erp_action_1(struct dasd_ccw_req * erp)
+static struct dasd_ccw_req *dasd_3990_erp_action_1_sec(struct dasd_ccw_req *erp)
{
+ erp->function = dasd_3990_erp_action_1_sec;
+ dasd_3990_erp_alternate_path(erp);
+ return erp;
+}
+static struct dasd_ccw_req *dasd_3990_erp_action_1(struct dasd_ccw_req *erp)
+{
erp->function = dasd_3990_erp_action_1;
-
dasd_3990_erp_alternate_path(erp);
-
+ if (erp->status == DASD_CQR_FAILED) {
+ erp->status = DASD_CQR_FILLED;
+ erp->retries = 10;
+ erp->lpm = LPM_ANYPATH;
+ erp->function = dasd_3990_erp_action_1_sec;
+ }
return erp;
-
-} /* end dasd_3990_erp_action_1 */
+} /* end dasd_3990_erp_action_1(b) */
/*
* DASD_3990_ERP_ACTION_4
@@ -2295,6 +2308,7 @@ static struct dasd_ccw_req *dasd_3990_erp_add_erp(struct dasd_ccw_req *cqr)
return cqr;
}
+ ccw = cqr->cpaddr;
if (cqr->cpmode == 1) {
/* make a shallow copy of the original tcw but set new tsb */
erp->cpmode = 1;
@@ -2303,6 +2317,9 @@ static struct dasd_ccw_req *dasd_3990_erp_add_erp(struct dasd_ccw_req *cqr)
tsb = (struct tsb *) &tcw[1];
*tcw = *((struct tcw *)cqr->cpaddr);
tcw->tsb = (long)tsb;
+ } else if (ccw->cmd_code == DASD_ECKD_CCW_PSF) {
+ /* PSF cannot be chained from NOOP/TIC */
+ erp->cpaddr = cqr->cpaddr;
} else {
/* initialize request with default TIC to current ERP/CQR */
ccw = erp->cpaddr;
@@ -2487,6 +2504,8 @@ dasd_3990_erp_further_erp(struct dasd_ccw_req *erp)
erp = dasd_3990_erp_action_1(erp);
+ } else if (erp->function == dasd_3990_erp_action_1_sec) {
+ erp = dasd_3990_erp_action_1_sec(erp);
} else if (erp->function == dasd_3990_erp_action_5) {
/* retries have not been successful */
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index 70a008c0052..fd1231738ef 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -152,6 +152,7 @@ static struct alias_lcu *_allocate_lcu(struct dasd_uid *uid)
INIT_WORK(&lcu->suc_data.worker, summary_unit_check_handling_work);
INIT_DELAYED_WORK(&lcu->ruac_data.dwork, lcu_update_work);
spin_lock_init(&lcu->lock);
+ init_completion(&lcu->lcu_setup);
return lcu;
out_err4:
@@ -240,6 +241,67 @@ int dasd_alias_make_device_known_to_lcu(struct dasd_device *device)
}
/*
+ * The first device to be registered on an LCU will have to do
+ * some additional setup steps to configure that LCU on the
+ * storage server. All further devices should wait with their
+ * initialization until the first device is done.
+ * To synchronize this work, the first device will call
+ * dasd_alias_lcu_setup_complete when it is done, and all
+ * other devices will wait for it with dasd_alias_wait_for_lcu_setup.
+ */
+void dasd_alias_lcu_setup_complete(struct dasd_device *device)
+{
+ struct dasd_eckd_private *private;
+ unsigned long flags;
+ struct alias_server *server;
+ struct alias_lcu *lcu;
+ struct dasd_uid *uid;
+
+ private = (struct dasd_eckd_private *) device->private;
+ uid = &private->uid;
+ lcu = NULL;
+ spin_lock_irqsave(&aliastree.lock, flags);
+ server = _find_server(uid);
+ if (server)
+ lcu = _find_lcu(server, uid);
+ spin_unlock_irqrestore(&aliastree.lock, flags);
+ if (!lcu) {
+ DBF_EVENT_DEVID(DBF_ERR, device->cdev,
+ "could not find lcu for %04x %02x",
+ uid->ssid, uid->real_unit_addr);
+ WARN_ON(1);
+ return;
+ }
+ complete_all(&lcu->lcu_setup);
+}
+
+void dasd_alias_wait_for_lcu_setup(struct dasd_device *device)
+{
+ struct dasd_eckd_private *private;
+ unsigned long flags;
+ struct alias_server *server;
+ struct alias_lcu *lcu;
+ struct dasd_uid *uid;
+
+ private = (struct dasd_eckd_private *) device->private;
+ uid = &private->uid;
+ lcu = NULL;
+ spin_lock_irqsave(&aliastree.lock, flags);
+ server = _find_server(uid);
+ if (server)
+ lcu = _find_lcu(server, uid);
+ spin_unlock_irqrestore(&aliastree.lock, flags);
+ if (!lcu) {
+ DBF_EVENT_DEVID(DBF_ERR, device->cdev,
+ "could not find lcu for %04x %02x",
+ uid->ssid, uid->real_unit_addr);
+ WARN_ON(1);
+ return;
+ }
+ wait_for_completion(&lcu->lcu_setup);
+}
+
+/*
* This function removes a device from the scope of alias management.
* The complicated part is to make sure that it is not in use by
* any of the workers. If necessary cancel the work.
@@ -755,11 +817,11 @@ static void __stop_device_on_lcu(struct dasd_device *device,
{
/* If pos == device then device is already locked! */
if (pos == device) {
- pos->stopped |= DASD_STOPPED_SU;
+ dasd_device_set_stop_bits(pos, DASD_STOPPED_SU);
return;
}
spin_lock(get_ccwdev_lock(pos->cdev));
- pos->stopped |= DASD_STOPPED_SU;
+ dasd_device_set_stop_bits(pos, DASD_STOPPED_SU);
spin_unlock(get_ccwdev_lock(pos->cdev));
}
@@ -793,26 +855,26 @@ static void _unstop_all_devices_on_lcu(struct alias_lcu *lcu)
list_for_each_entry(device, &lcu->active_devices, alias_list) {
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
- device->stopped &= ~DASD_STOPPED_SU;
+ dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
}
list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
- device->stopped &= ~DASD_STOPPED_SU;
+ dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
}
list_for_each_entry(pavgroup, &lcu->grouplist, group) {
list_for_each_entry(device, &pavgroup->baselist, alias_list) {
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
- device->stopped &= ~DASD_STOPPED_SU;
+ dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
flags);
}
list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
- device->stopped &= ~DASD_STOPPED_SU;
+ dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
flags);
}
@@ -836,7 +898,8 @@ static void summary_unit_check_handling_work(struct work_struct *work)
/* 2. reset summary unit check */
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
- device->stopped &= ~(DASD_STOPPED_SU | DASD_STOPPED_PENDING);
+ dasd_device_remove_stop_bits(device,
+ (DASD_STOPPED_SU | DASD_STOPPED_PENDING));
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
reset_summary_unit_check(lcu, device, suc_data->reason);
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index 4e49b4a6c88..f64d0db881b 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -24,7 +24,6 @@
#include <asm/ebcdic.h>
#include <asm/io.h>
#include <asm/s390_ext.h>
-#include <asm/todclk.h>
#include <asm/vtoc.h>
#include <asm/diag.h>
@@ -145,6 +144,15 @@ dasd_diag_erp(struct dasd_device *device)
mdsk_term_io(device);
rc = mdsk_init_io(device, device->block->bp_block, 0, NULL);
+ if (rc == 4) {
+ if (!(device->features & DASD_FEATURE_READONLY)) {
+ dev_warn(&device->cdev->dev,
+ "The access mode of a DIAG device changed"
+ " to read-only");
+ device->features |= DASD_FEATURE_READONLY;
+ }
+ rc = 0;
+ }
if (rc)
dev_warn(&device->cdev->dev, "DIAG ERP failed with "
"rc=%d\n", rc);
@@ -433,16 +441,20 @@ dasd_diag_check_device(struct dasd_device *device)
for (sb = 512; sb < bsize; sb = sb << 1)
block->s2b_shift++;
rc = mdsk_init_io(device, block->bp_block, 0, NULL);
- if (rc) {
+ if (rc && (rc != 4)) {
dev_warn(&device->cdev->dev, "DIAG initialization "
"failed with rc=%d\n", rc);
rc = -EIO;
} else {
+ if (rc == 4)
+ device->features |= DASD_FEATURE_READONLY;
dev_info(&device->cdev->dev,
- "New DASD with %ld byte/block, total size %ld KB\n",
+ "New DASD with %ld byte/block, total size %ld KB%s\n",
(unsigned long) block->bp_block,
(unsigned long) (block->blocks <<
- block->s2b_shift) >> 1);
+ block->s2b_shift) >> 1,
+ (rc == 4) ? ", read-only device" : "");
+ rc = 0;
}
out_label:
free_page((long) label);
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 417b97cd3f9..5819dc02a14 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -24,7 +24,6 @@
#include <asm/idals.h>
#include <asm/ebcdic.h>
#include <asm/io.h>
-#include <asm/todclk.h>
#include <asm/uaccess.h>
#include <asm/cio.h>
#include <asm/ccwdev.h>
@@ -78,6 +77,11 @@ MODULE_DEVICE_TABLE(ccw, dasd_eckd_ids);
static struct ccw_driver dasd_eckd_driver; /* see below */
+#define INIT_CQR_OK 0
+#define INIT_CQR_UNFORMATTED 1
+#define INIT_CQR_ERROR 2
+
+
/* initial attempt at a probe function. this can be simplified once
* the other detection code is gone */
static int
@@ -86,11 +90,12 @@ dasd_eckd_probe (struct ccw_device *cdev)
int ret;
/* set ECKD specific ccw-device options */
- ret = ccw_device_set_options(cdev, CCWDEV_ALLOW_FORCE);
+ ret = ccw_device_set_options(cdev, CCWDEV_ALLOW_FORCE |
+ CCWDEV_DO_PATHGROUP | CCWDEV_DO_MULTIPATH);
if (ret) {
- DBF_EVENT(DBF_WARNING,
- "dasd_eckd_probe: could not set ccw-device options "
- "for %s\n", dev_name(&cdev->dev));
+ DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s",
+ "dasd_eckd_probe: could not set "
+ "ccw-device options");
return ret;
}
ret = dasd_generic_probe(cdev, &dasd_eckd_discipline);
@@ -749,8 +754,7 @@ static struct dasd_ccw_req *dasd_eckd_build_rcd_lpm(struct dasd_device *device,
cqr->block = NULL;
cqr->expires = 10*HZ;
cqr->lpm = lpm;
- clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
- cqr->retries = 2;
+ cqr->retries = 256;
cqr->buildclk = get_clock();
cqr->status = DASD_CQR_FILLED;
return cqr;
@@ -885,16 +889,15 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
rc = dasd_eckd_read_conf_lpm(device, &conf_data,
&conf_len, lpm);
if (rc && rc != -EOPNOTSUPP) { /* -EOPNOTSUPP is ok */
- DBF_EVENT(DBF_WARNING,
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
"Read configuration data returned "
- "error %d for device: %s", rc,
- dev_name(&device->cdev->dev));
+ "error %d", rc);
return rc;
}
if (conf_data == NULL) {
- DBF_EVENT(DBF_WARNING, "No configuration "
- "data retrieved for device: %s",
- dev_name(&device->cdev->dev));
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
+ "No configuration data "
+ "retrieved");
continue; /* no error */
}
/* save first valid configuration data */
@@ -941,16 +944,14 @@ static int dasd_eckd_read_features(struct dasd_device *device)
sizeof(struct dasd_rssd_features)),
device);
if (IS_ERR(cqr)) {
- DBF_EVENT(DBF_WARNING, "Could not allocate initialization "
- "request for device: %s",
- dev_name(&device->cdev->dev));
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", "Could not "
+ "allocate initialization request");
return PTR_ERR(cqr);
}
cqr->startdev = device;
cqr->memdev = device;
cqr->block = NULL;
- clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
- cqr->retries = 5;
+ cqr->retries = 256;
cqr->expires = 10 * HZ;
/* Prepare for Read Subsystem Data */
@@ -1012,9 +1013,9 @@ static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device,
}
psf_ssc_data = (struct dasd_psf_ssc_data *)cqr->data;
psf_ssc_data->order = PSF_ORDER_SSC;
- psf_ssc_data->suborder = 0x40;
+ psf_ssc_data->suborder = 0xc0;
if (enable_pav) {
- psf_ssc_data->suborder |= 0x88;
+ psf_ssc_data->suborder |= 0x08;
psf_ssc_data->reserved[0] = 0x88;
}
ccw = cqr->cpaddr;
@@ -1025,6 +1026,7 @@ static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device,
cqr->startdev = device;
cqr->memdev = device;
cqr->block = NULL;
+ cqr->retries = 256;
cqr->expires = 10*HZ;
cqr->buildclk = get_clock();
cqr->status = DASD_CQR_FILLED;
@@ -1057,7 +1059,7 @@ dasd_eckd_psf_ssc(struct dasd_device *device, int enable_pav)
/*
* Valide storage server of current device.
*/
-static int dasd_eckd_validate_server(struct dasd_device *device)
+static void dasd_eckd_validate_server(struct dasd_device *device)
{
int rc;
struct dasd_eckd_private *private;
@@ -1068,15 +1070,12 @@ static int dasd_eckd_validate_server(struct dasd_device *device)
else
enable_pav = 1;
rc = dasd_eckd_psf_ssc(device, enable_pav);
+
/* may be requested feature is not available on server,
* therefore just report error and go ahead */
private = (struct dasd_eckd_private *) device->private;
- DBF_EVENT(DBF_WARNING, "PSF-SSC on storage subsystem %s.%s.%04x "
- "returned rc=%d for device: %s",
- private->uid.vendor, private->uid.serial,
- private->uid.ssid, rc, dev_name(&device->cdev->dev));
- /* RE-Read Configuration Data */
- return dasd_eckd_read_conf(device);
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "PSF-SSC for SSID %04x "
+ "returned rc=%d", private->uid.ssid, rc);
}
/*
@@ -1090,6 +1089,15 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
struct dasd_block *block;
int is_known, rc;
+ if (!ccw_device_is_pathgroup(device->cdev)) {
+ dev_warn(&device->cdev->dev,
+ "A channel path group could not be established\n");
+ return -EIO;
+ }
+ if (!ccw_device_is_multipath(device->cdev)) {
+ dev_info(&device->cdev->dev,
+ "The DASD is not operating in multipath mode\n");
+ }
private = (struct dasd_eckd_private *) device->private;
if (!private) {
private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
@@ -1123,9 +1131,9 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
if (private->uid.type == UA_BASE_DEVICE) {
block = dasd_alloc_block();
if (IS_ERR(block)) {
- DBF_EVENT(DBF_WARNING, "could not allocate dasd "
- "block structure for device: %s",
- dev_name(&device->cdev->dev));
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
+ "could not allocate dasd "
+ "block structure");
rc = PTR_ERR(block);
goto out_err1;
}
@@ -1139,12 +1147,21 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
rc = is_known;
goto out_err2;
}
+ /*
+ * dasd_eckd_vaildate_server is done on the first device that
+ * is found for an LCU. All later other devices have to wait
+ * for it, so they will read the correct feature codes.
+ */
if (!is_known) {
- /* new lcu found */
- rc = dasd_eckd_validate_server(device); /* will switch pav on */
- if (rc)
- goto out_err3;
- }
+ dasd_eckd_validate_server(device);
+ dasd_alias_lcu_setup_complete(device);
+ } else
+ dasd_alias_wait_for_lcu_setup(device);
+
+ /* device may report different configuration data after LCU setup */
+ rc = dasd_eckd_read_conf(device);
+ if (rc)
+ goto out_err3;
/* Read Feature Codes */
dasd_eckd_read_features(device);
@@ -1153,9 +1170,8 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
&private->rdc_data, 64);
if (rc) {
- DBF_EVENT(DBF_WARNING,
- "Read device characteristics failed, rc=%d for "
- "device: %s", rc, dev_name(&device->cdev->dev));
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
+ "Read device characteristic failed, rc=%d", rc);
goto out_err3;
}
/* find the vaild cylinder size */
@@ -1256,12 +1272,29 @@ dasd_eckd_analysis_ccw(struct dasd_device *device)
cqr->block = NULL;
cqr->startdev = device;
cqr->memdev = device;
- cqr->retries = 0;
+ cqr->retries = 255;
cqr->buildclk = get_clock();
cqr->status = DASD_CQR_FILLED;
return cqr;
}
+/* differentiate between 'no record found' and any other error */
+static int dasd_eckd_analysis_evaluation(struct dasd_ccw_req *init_cqr)
+{
+ char *sense;
+ if (init_cqr->status == DASD_CQR_DONE)
+ return INIT_CQR_OK;
+ else if (init_cqr->status == DASD_CQR_NEED_ERP ||
+ init_cqr->status == DASD_CQR_FAILED) {
+ sense = dasd_get_sense(&init_cqr->irb);
+ if (sense && (sense[1] & SNS1_NO_REC_FOUND))
+ return INIT_CQR_UNFORMATTED;
+ else
+ return INIT_CQR_ERROR;
+ } else
+ return INIT_CQR_ERROR;
+}
+
/*
* This is the callback function for the init_analysis cqr. It saves
* the status of the initial analysis ccw before it frees it and kicks
@@ -1269,21 +1302,20 @@ dasd_eckd_analysis_ccw(struct dasd_device *device)
* dasd_eckd_do_analysis again (if the devices has not been marked
* for deletion in the meantime).
*/
-static void
-dasd_eckd_analysis_callback(struct dasd_ccw_req *init_cqr, void *data)
+static void dasd_eckd_analysis_callback(struct dasd_ccw_req *init_cqr,
+ void *data)
{
struct dasd_eckd_private *private;
struct dasd_device *device;
device = init_cqr->startdev;
private = (struct dasd_eckd_private *) device->private;
- private->init_cqr_status = init_cqr->status;
+ private->init_cqr_status = dasd_eckd_analysis_evaluation(init_cqr);
dasd_sfree_request(init_cqr, device);
dasd_kick_device(device);
}
-static int
-dasd_eckd_start_analysis(struct dasd_block *block)
+static int dasd_eckd_start_analysis(struct dasd_block *block)
{
struct dasd_eckd_private *private;
struct dasd_ccw_req *init_cqr;
@@ -1295,27 +1327,44 @@ dasd_eckd_start_analysis(struct dasd_block *block)
init_cqr->callback = dasd_eckd_analysis_callback;
init_cqr->callback_data = NULL;
init_cqr->expires = 5*HZ;
+ /* first try without ERP, so we can later handle unformatted
+ * devices as special case
+ */
+ clear_bit(DASD_CQR_FLAGS_USE_ERP, &init_cqr->flags);
+ init_cqr->retries = 0;
dasd_add_request_head(init_cqr);
return -EAGAIN;
}
-static int
-dasd_eckd_end_analysis(struct dasd_block *block)
+static int dasd_eckd_end_analysis(struct dasd_block *block)
{
struct dasd_device *device;
struct dasd_eckd_private *private;
struct eckd_count *count_area;
unsigned int sb, blk_per_trk;
int status, i;
+ struct dasd_ccw_req *init_cqr;
device = block->base;
private = (struct dasd_eckd_private *) device->private;
status = private->init_cqr_status;
private->init_cqr_status = -1;
- if (status != DASD_CQR_DONE) {
- dev_warn(&device->cdev->dev,
- "The DASD is not formatted\n");
+ if (status == INIT_CQR_ERROR) {
+ /* try again, this time with full ERP */
+ init_cqr = dasd_eckd_analysis_ccw(device);
+ dasd_sleep_on(init_cqr);
+ status = dasd_eckd_analysis_evaluation(init_cqr);
+ dasd_sfree_request(init_cqr, device);
+ }
+
+ if (status == INIT_CQR_UNFORMATTED) {
+ dev_warn(&device->cdev->dev, "The DASD is not formatted\n");
return -EMEDIUMTYPE;
+ } else if (status == INIT_CQR_ERROR) {
+ dev_err(&device->cdev->dev,
+ "Detecting the DASD disk layout failed because "
+ "of an I/O error\n");
+ return -EIO;
}
private->uses_cdl = 1;
@@ -1607,8 +1656,7 @@ dasd_eckd_format_device(struct dasd_device * device,
}
fcp->startdev = device;
fcp->memdev = device;
- clear_bit(DASD_CQR_FLAGS_USE_ERP, &fcp->flags);
- fcp->retries = 5; /* set retry counter to enable default ERP */
+ fcp->retries = 256;
fcp->buildclk = get_clock();
fcp->status = DASD_CQR_FILLED;
return fcp;
@@ -2690,6 +2738,7 @@ dasd_eckd_performance(struct dasd_device *device, void __user *argp)
cqr->startdev = device;
cqr->memdev = device;
cqr->retries = 0;
+ clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
cqr->expires = 10 * HZ;
/* Prepare for Read Subsystem Data */
@@ -3240,11 +3289,15 @@ int dasd_eckd_restore_device(struct dasd_device *device)
if (is_known < 0)
return is_known;
if (!is_known) {
- /* new lcu found */
- rc = dasd_eckd_validate_server(device); /* will switch pav on */
- if (rc)
- goto out_err;
- }
+ dasd_eckd_validate_server(device);
+ dasd_alias_lcu_setup_complete(device);
+ } else
+ dasd_alias_wait_for_lcu_setup(device);
+
+ /* RE-Read Configuration Data */
+ rc = dasd_eckd_read_conf(device);
+ if (rc)
+ goto out_err;
/* Read Feature Codes */
dasd_eckd_read_features(device);
@@ -3253,9 +3306,8 @@ int dasd_eckd_restore_device(struct dasd_device *device)
rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
&temp_rdc_data, 64);
if (rc) {
- DBF_EVENT(DBF_WARNING,
- "Read device characteristics failed, rc=%d for "
- "device: %s", rc, dev_name(&device->cdev->dev));
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
+ "Read device characteristic failed, rc=%d", rc);
goto out_err;
}
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h
index ad45bcac3ce..864d53c0420 100644
--- a/drivers/s390/block/dasd_eckd.h
+++ b/drivers/s390/block/dasd_eckd.h
@@ -414,6 +414,7 @@ struct alias_lcu {
struct summary_unit_check_work_data suc_data;
struct read_uac_work_data ruac_data;
struct dasd_ccw_req *rsu_cqr;
+ struct completion lcu_setup;
};
struct alias_pav_group {
@@ -460,5 +461,6 @@ int dasd_alias_remove_device(struct dasd_device *);
struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *);
void dasd_alias_handle_summary_unit_check(struct dasd_device *, struct irb *);
void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *);
-
+void dasd_alias_lcu_setup_complete(struct dasd_device *);
+void dasd_alias_wait_for_lcu_setup(struct dasd_device *);
#endif /* DASD_ECKD_H */
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index d96039eae59..1f3e967aaba 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -536,7 +536,6 @@ static int dasd_eer_open(struct inode *inp, struct file *filp)
eerb = kzalloc(sizeof(struct eerbuffer), GFP_KERNEL);
if (!eerb)
return -ENOMEM;
- lock_kernel();
eerb->buffer_page_count = eer_pages;
if (eerb->buffer_page_count < 1 ||
eerb->buffer_page_count > INT_MAX / PAGE_SIZE) {
@@ -544,7 +543,6 @@ static int dasd_eer_open(struct inode *inp, struct file *filp)
DBF_EVENT(DBF_WARNING, "can't open device since module "
"parameter eer_pages is smaller than 1 or"
" bigger than %d", (int)(INT_MAX / PAGE_SIZE));
- unlock_kernel();
return -EINVAL;
}
eerb->buffersize = eerb->buffer_page_count * PAGE_SIZE;
@@ -552,14 +550,12 @@ static int dasd_eer_open(struct inode *inp, struct file *filp)
GFP_KERNEL);
if (!eerb->buffer) {
kfree(eerb);
- unlock_kernel();
return -ENOMEM;
}
if (dasd_eer_allocate_buffer_pages(eerb->buffer,
eerb->buffer_page_count)) {
kfree(eerb->buffer);
kfree(eerb);
- unlock_kernel();
return -ENOMEM;
}
filp->private_data = eerb;
@@ -567,7 +563,6 @@ static int dasd_eer_open(struct inode *inp, struct file *filp)
list_add(&eerb->list, &bufferlist);
spin_unlock_irqrestore(&bufferlock, flags);
- unlock_kernel();
return nonseekable_open(inp,filp);
}
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index f245377e8e2..0f152444ac7 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -20,7 +20,6 @@
#include <asm/idals.h>
#include <asm/ebcdic.h>
#include <asm/io.h>
-#include <asm/todclk.h>
#include <asm/ccwdev.h>
#include "dasd_int.h"
@@ -141,9 +140,8 @@ dasd_fba_check_characteristics(struct dasd_device *device)
}
block = dasd_alloc_block();
if (IS_ERR(block)) {
- DBF_EVENT(DBF_WARNING, "could not allocate dasd block "
- "structure for device: %s",
- dev_name(&device->cdev->dev));
+ DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s", "could not allocate "
+ "dasd block structure");
device->private = NULL;
kfree(private);
return PTR_ERR(block);
@@ -155,9 +153,8 @@ dasd_fba_check_characteristics(struct dasd_device *device)
rc = dasd_generic_read_dev_chars(device, DASD_FBA_MAGIC,
&private->rdc_data, 32);
if (rc) {
- DBF_EVENT(DBF_WARNING, "Read device characteristics returned "
- "error %d for device: %s",
- rc, dev_name(&device->cdev->dev));
+ DBF_EVENT_DEVID(DBF_WARNING, cdev, "Read device "
+ "characteristics returned error %d", rc);
device->block = NULL;
dasd_free_block(block);
device->private = NULL;
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 8afd9fa0087..e4c2143dabf 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -108,6 +108,16 @@ do { \
d_data); \
} while(0)
+#define DBF_EVENT_DEVID(d_level, d_cdev, d_str, d_data...) \
+do { \
+ struct ccw_dev_id __dev_id; \
+ ccw_device_get_id(d_cdev, &__dev_id); \
+ debug_sprintf_event(dasd_debug_area, \
+ d_level, \
+ "0.%x.%04x " d_str "\n", \
+ __dev_id.ssid, __dev_id.devno, d_data); \
+} while (0)
+
#define DBF_EXC(d_level, d_str, d_data...)\
do { \
debug_sprintf_exception(dasd_debug_area, \
@@ -595,6 +605,9 @@ int dasd_generic_restore_device(struct ccw_device *);
int dasd_generic_read_dev_chars(struct dasd_device *, int, void *, int);
char *dasd_get_sense(struct irb *);
+void dasd_device_set_stop_bits(struct dasd_device *, int);
+void dasd_device_remove_stop_bits(struct dasd_device *, int);
+
/* externals in dasd_devmap.c */
extern int dasd_max_devindex;
extern int dasd_probeonly;
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index f756a1b0c57..478bcdb90b6 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -101,7 +101,7 @@ static int dasd_ioctl_quiesce(struct dasd_block *block)
pr_info("%s: The DASD has been put in the quiesce "
"state\n", dev_name(&base->cdev->dev));
spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
- base->stopped |= DASD_STOPPED_QUIESCE;
+ dasd_device_set_stop_bits(base, DASD_STOPPED_QUIESCE);
spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
return 0;
}
@@ -122,7 +122,7 @@ static int dasd_ioctl_resume(struct dasd_block *block)
pr_info("%s: I/O operations have been resumed "
"on the DASD\n", dev_name(&base->cdev->dev));
spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
- base->stopped &= ~DASD_STOPPED_QUIESCE;
+ dasd_device_remove_stop_bits(base, DASD_STOPPED_QUIESCE);
spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
dasd_schedule_block_bh(block);
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 21639d6c996..9d61683b563 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -857,7 +857,6 @@ static struct console con3215 = {
/*
* 3215 console initialization code called from console_init().
- * NOTE: This is called before kmalloc is available.
*/
static int __init con3215_init(void)
{
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
index bb838bdf829..6bca81aea39 100644
--- a/drivers/s390/char/con3270.c
+++ b/drivers/s390/char/con3270.c
@@ -572,7 +572,6 @@ static struct console con3270 = {
/*
* 3270 console initialization code called from console_init().
- * NOTE: This is called before kmalloc is available.
*/
static int __init
con3270_init(void)
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
index 097d3846a82..d449063c30f 100644
--- a/drivers/s390/char/fs3270.c
+++ b/drivers/s390/char/fs3270.c
@@ -38,6 +38,8 @@ struct fs3270 {
size_t rdbuf_size; /* size of data returned by RDBUF */
};
+static DEFINE_MUTEX(fs3270_mutex);
+
static void
fs3270_wake_up(struct raw3270_request *rq, void *data)
{
@@ -328,7 +330,7 @@ fs3270_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (!fp)
return -ENODEV;
rc = 0;
- lock_kernel();
+ mutex_lock(&fs3270_mutex);
switch (cmd) {
case TUBICMD:
fp->read_command = arg;
@@ -354,7 +356,7 @@ fs3270_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
rc = -EFAULT;
break;
}
- unlock_kernel();
+ mutex_unlock(&fs3270_mutex);
return rc;
}
@@ -437,7 +439,7 @@ fs3270_open(struct inode *inode, struct file *filp)
minor = tty->index + RAW3270_FIRSTMINOR;
tty_kref_put(tty);
}
- lock_kernel();
+ mutex_lock(&fs3270_mutex);
/* Check if some other program is already using fullscreen mode. */
fp = (struct fs3270 *) raw3270_find_view(&fs3270_fn, minor);
if (!IS_ERR(fp)) {
@@ -478,7 +480,7 @@ fs3270_open(struct inode *inode, struct file *filp)
}
filp->private_data = fp;
out:
- unlock_kernel();
+ mutex_unlock(&fs3270_mutex);
return rc;
}
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c
index 66e21dd2315..60473f86e1f 100644
--- a/drivers/s390/char/monreader.c
+++ b/drivers/s390/char/monreader.c
@@ -12,7 +12,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
-#include <linux/smp_lock.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/kernel.h>
@@ -283,7 +282,6 @@ static int mon_open(struct inode *inode, struct file *filp)
/*
* only one user allowed
*/
- lock_kernel();
rc = -EBUSY;
if (test_and_set_bit(MON_IN_USE, &mon_in_use))
goto out;
@@ -321,7 +319,6 @@ static int mon_open(struct inode *inode, struct file *filp)
}
filp->private_data = monpriv;
dev_set_drvdata(monreader_device, monpriv);
- unlock_kernel();
return nonseekable_open(inode, filp);
out_path:
@@ -331,7 +328,6 @@ out_priv:
out_use:
clear_bit(MON_IN_USE, &mon_in_use);
out:
- unlock_kernel();
return rc;
}
@@ -607,6 +603,10 @@ static int __init mon_init(void)
}
dcss_mkname(mon_dcss_name, &user_data_connect[8]);
+ /*
+ * misc_register() has to be the last action in module_init(), because
+ * file operations will be available right after this.
+ */
rc = misc_register(&mon_dev);
if (rc < 0 )
goto out;
diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c
index 66fb8eba93f..6532ed8b4af 100644
--- a/drivers/s390/char/monwriter.c
+++ b/drivers/s390/char/monwriter.c
@@ -13,7 +13,6 @@
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/errno.h>
-#include <linux/smp_lock.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/miscdevice.h>
@@ -185,13 +184,11 @@ static int monwrite_open(struct inode *inode, struct file *filp)
monpriv = kzalloc(sizeof(struct mon_private), GFP_KERNEL);
if (!monpriv)
return -ENOMEM;
- lock_kernel();
INIT_LIST_HEAD(&monpriv->list);
monpriv->hdr_to_read = sizeof(monpriv->hdr);
mutex_init(&monpriv->thread_mutex);
filp->private_data = monpriv;
list_add_tail(&monpriv->priv_list, &mon_priv_list);
- unlock_kernel();
return nonseekable_open(inode, filp);
}
@@ -364,6 +361,10 @@ static int __init mon_init(void)
goto out_driver;
}
+ /*
+ * misc_register() has to be the last action in module_init(), because
+ * file operations will be available right after this.
+ */
rc = misc_register(&mon_dev);
if (rc)
goto out_device;
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index 5cc11c636d3..28b5afc129c 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -84,6 +84,7 @@ static void __init sclp_read_info_early(void)
do {
memset(sccb, 0, sizeof(*sccb));
sccb->header.length = sizeof(*sccb);
+ sccb->header.function_code = 0x80;
sccb->header.control_mask[2] = 0x80;
rc = sclp_cmd_sync_early(commands[i], sccb);
} while (rc == -EBUSY);
diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h
index a2633377470..7a242f07363 100644
--- a/drivers/s390/char/tape.h
+++ b/drivers/s390/char/tape.h
@@ -212,6 +212,9 @@ struct tape_device {
struct tape_class_device * nt;
struct tape_class_device * rt;
+ /* Device mutex to serialize tape commands. */
+ struct mutex mutex;
+
/* Device discipline information. */
struct tape_discipline * discipline;
void * discdata;
@@ -292,9 +295,9 @@ extern int tape_generic_pm_suspend(struct ccw_device *);
extern int tape_generic_probe(struct ccw_device *);
extern void tape_generic_remove(struct ccw_device *);
-extern struct tape_device *tape_get_device(int devindex);
-extern struct tape_device *tape_get_device_reference(struct tape_device *);
-extern struct tape_device *tape_put_device(struct tape_device *);
+extern struct tape_device *tape_find_device(int devindex);
+extern struct tape_device *tape_get_device(struct tape_device *);
+extern void tape_put_device(struct tape_device *);
/* Externals from tape_char.c */
extern int tapechar_init(void);
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c
index 2fe45ff77b7..3657fe103c2 100644
--- a/drivers/s390/char/tape_34xx.c
+++ b/drivers/s390/char/tape_34xx.c
@@ -113,16 +113,16 @@ tape_34xx_work_handler(struct work_struct *work)
{
struct tape_34xx_work *p =
container_of(work, struct tape_34xx_work, work);
+ struct tape_device *device = p->device;
switch(p->op) {
case TO_MSEN:
- tape_34xx_medium_sense(p->device);
+ tape_34xx_medium_sense(device);
break;
default:
DBF_EVENT(3, "T34XX: internal error: unknown work\n");
}
-
- p->device = tape_put_device(p->device);
+ tape_put_device(device);
kfree(p);
}
@@ -136,7 +136,7 @@ tape_34xx_schedule_work(struct tape_device *device, enum tape_op op)
INIT_WORK(&p->work, tape_34xx_work_handler);
- p->device = tape_get_device_reference(device);
+ p->device = tape_get_device(device);
p->op = op;
schedule_work(&p->work);
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index e4cc3aae916..0c72aadb839 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -608,7 +608,7 @@ tape_3590_schedule_work(struct tape_device *device, enum tape_op op)
INIT_WORK(&p->work, tape_3590_work_handler);
- p->device = tape_get_device_reference(device);
+ p->device = tape_get_device(device);
p->op = op;
schedule_work(&p->work);
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index 0c0705b91c2..4799cc2f73c 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -54,7 +54,7 @@ static const struct block_device_operations tapeblock_fops = {
.owner = THIS_MODULE,
.open = tapeblock_open,
.release = tapeblock_release,
- .locked_ioctl = tapeblock_ioctl,
+ .ioctl = tapeblock_ioctl,
.media_changed = tapeblock_medium_changed,
.revalidate_disk = tapeblock_revalidate_disk,
};
@@ -239,7 +239,7 @@ tapeblock_setup_device(struct tape_device * device)
disk->major = tapeblock_major;
disk->first_minor = device->first_minor;
disk->fops = &tapeblock_fops;
- disk->private_data = tape_get_device_reference(device);
+ disk->private_data = tape_get_device(device);
disk->queue = blkdat->request_queue;
set_capacity(disk, 0);
sprintf(disk->disk_name, "btibm%d",
@@ -247,11 +247,11 @@ tapeblock_setup_device(struct tape_device * device)
blkdat->disk = disk;
blkdat->medium_changed = 1;
- blkdat->request_queue->queuedata = tape_get_device_reference(device);
+ blkdat->request_queue->queuedata = tape_get_device(device);
add_disk(disk);
- tape_get_device_reference(device);
+ tape_get_device(device);
INIT_WORK(&blkdat->requeue_task, tapeblock_requeue);
return 0;
@@ -274,13 +274,14 @@ tapeblock_cleanup_device(struct tape_device *device)
}
del_gendisk(device->blk_data.disk);
- device->blk_data.disk->private_data =
- tape_put_device(device->blk_data.disk->private_data);
+ device->blk_data.disk->private_data = NULL;
+ tape_put_device(device);
put_disk(device->blk_data.disk);
device->blk_data.disk = NULL;
cleanup_queue:
- device->blk_data.request_queue->queuedata = tape_put_device(device);
+ device->blk_data.request_queue->queuedata = NULL;
+ tape_put_device(device);
blk_cleanup_queue(device->blk_data.request_queue);
device->blk_data.request_queue = NULL;
@@ -363,7 +364,7 @@ tapeblock_open(struct block_device *bdev, fmode_t mode)
struct tape_device * device;
int rc;
- device = tape_get_device_reference(disk->private_data);
+ device = tape_get_device(disk->private_data);
if (device->required_tapemarks) {
DBF_EVENT(2, "TBLOCK: missing tapemarks\n");
diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c
index 31566c55adf..23d773a0d11 100644
--- a/drivers/s390/char/tape_char.c
+++ b/drivers/s390/char/tape_char.c
@@ -33,8 +33,7 @@ static ssize_t tapechar_read(struct file *, char __user *, size_t, loff_t *);
static ssize_t tapechar_write(struct file *, const char __user *, size_t, loff_t *);
static int tapechar_open(struct inode *,struct file *);
static int tapechar_release(struct inode *,struct file *);
-static int tapechar_ioctl(struct inode *, struct file *, unsigned int,
- unsigned long);
+static long tapechar_ioctl(struct file *, unsigned int, unsigned long);
static long tapechar_compat_ioctl(struct file *, unsigned int,
unsigned long);
@@ -43,7 +42,7 @@ static const struct file_operations tape_fops =
.owner = THIS_MODULE,
.read = tapechar_read,
.write = tapechar_write,
- .ioctl = tapechar_ioctl,
+ .unlocked_ioctl = tapechar_ioctl,
.compat_ioctl = tapechar_compat_ioctl,
.open = tapechar_open,
.release = tapechar_release,
@@ -170,7 +169,6 @@ tapechar_read(struct file *filp, char __user *data, size_t count, loff_t *ppos)
if (rc == 0) {
rc = block_size - request->rescnt;
DBF_EVENT(6, "TCHAR:rbytes: %x\n", rc);
- filp->f_pos += rc;
/* Copy data from idal buffer to user space. */
if (idal_buffer_to_user(device->char_data.idal_buf,
data, rc) != 0)
@@ -238,7 +236,6 @@ tapechar_write(struct file *filp, const char __user *data, size_t count, loff_t
break;
DBF_EVENT(6, "TCHAR:wbytes: %lx\n",
block_size - request->rescnt);
- filp->f_pos += block_size - request->rescnt;
written += block_size - request->rescnt;
if (request->rescnt != 0)
break;
@@ -286,26 +283,20 @@ tapechar_open (struct inode *inode, struct file *filp)
if (imajor(filp->f_path.dentry->d_inode) != tapechar_major)
return -ENODEV;
- lock_kernel();
minor = iminor(filp->f_path.dentry->d_inode);
- device = tape_get_device(minor / TAPE_MINORS_PER_DEV);
+ device = tape_find_device(minor / TAPE_MINORS_PER_DEV);
if (IS_ERR(device)) {
- DBF_EVENT(3, "TCHAR:open: tape_get_device() failed\n");
- rc = PTR_ERR(device);
- goto out;
+ DBF_EVENT(3, "TCHAR:open: tape_find_device() failed\n");
+ return PTR_ERR(device);
}
-
rc = tape_open(device);
if (rc == 0) {
filp->private_data = device;
- rc = nonseekable_open(inode, filp);
- }
- else
+ nonseekable_open(inode, filp);
+ } else
tape_put_device(device);
-out:
- unlock_kernel();
return rc;
}
@@ -342,7 +333,8 @@ tapechar_release(struct inode *inode, struct file *filp)
device->char_data.idal_buf = NULL;
}
tape_release(device);
- filp->private_data = tape_put_device(device);
+ filp->private_data = NULL;
+ tape_put_device(device);
return 0;
}
@@ -351,16 +343,11 @@ tapechar_release(struct inode *inode, struct file *filp)
* Tape device io controls.
*/
static int
-tapechar_ioctl(struct inode *inp, struct file *filp,
- unsigned int no, unsigned long data)
+__tapechar_ioctl(struct tape_device *device,
+ unsigned int no, unsigned long data)
{
- struct tape_device *device;
int rc;
- DBF_EVENT(6, "TCHAR:ioct\n");
-
- device = (struct tape_device *) filp->private_data;
-
if (no == MTIOCTOP) {
struct mtop op;
@@ -453,15 +440,30 @@ tapechar_ioctl(struct inode *inp, struct file *filp,
}
static long
+tapechar_ioctl(struct file *filp, unsigned int no, unsigned long data)
+{
+ struct tape_device *device;
+ long rc;
+
+ DBF_EVENT(6, "TCHAR:ioct\n");
+
+ device = (struct tape_device *) filp->private_data;
+ mutex_lock(&device->mutex);
+ rc = __tapechar_ioctl(device, no, data);
+ mutex_unlock(&device->mutex);
+ return rc;
+}
+
+static long
tapechar_compat_ioctl(struct file *filp, unsigned int no, unsigned long data)
{
struct tape_device *device = filp->private_data;
int rval = -ENOIOCTLCMD;
if (device->discipline->ioctl_fn) {
- lock_kernel();
+ mutex_lock(&device->mutex);
rval = device->discipline->ioctl_fn(device, no, data);
- unlock_kernel();
+ mutex_unlock(&device->mutex);
if (rval == -EINVAL)
rval = -ENOIOCTLCMD;
}
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index 5cd31e07164..f5d6802dc5d 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -492,6 +492,7 @@ tape_alloc_device(void)
kfree(device);
return ERR_PTR(-ENOMEM);
}
+ mutex_init(&device->mutex);
INIT_LIST_HEAD(&device->req_queue);
INIT_LIST_HEAD(&device->node);
init_waitqueue_head(&device->state_change_wq);
@@ -511,11 +512,12 @@ tape_alloc_device(void)
* increment the reference count.
*/
struct tape_device *
-tape_get_device_reference(struct tape_device *device)
+tape_get_device(struct tape_device *device)
{
- DBF_EVENT(4, "tape_get_device_reference(%p) = %i\n", device,
- atomic_inc_return(&device->ref_count));
+ int count;
+ count = atomic_inc_return(&device->ref_count);
+ DBF_EVENT(4, "tape_get_device(%p) = %i\n", device, count);
return device;
}
@@ -525,32 +527,25 @@ tape_get_device_reference(struct tape_device *device)
* The function returns a NULL pointer to be used by the caller
* for clearing reference pointers.
*/
-struct tape_device *
+void
tape_put_device(struct tape_device *device)
{
- int remain;
+ int count;
- remain = atomic_dec_return(&device->ref_count);
- if (remain > 0) {
- DBF_EVENT(4, "tape_put_device(%p) -> %i\n", device, remain);
- } else {
- if (remain < 0) {
- DBF_EVENT(4, "put device without reference\n");
- } else {
- DBF_EVENT(4, "tape_free_device(%p)\n", device);
- kfree(device->modeset_byte);
- kfree(device);
- }
+ count = atomic_dec_return(&device->ref_count);
+ DBF_EVENT(4, "tape_put_device(%p) -> %i\n", device, count);
+ BUG_ON(count < 0);
+ if (count == 0) {
+ kfree(device->modeset_byte);
+ kfree(device);
}
-
- return NULL;
}
/*
* Find tape device by a device index.
*/
struct tape_device *
-tape_get_device(int devindex)
+tape_find_device(int devindex)
{
struct tape_device *device, *tmp;
@@ -558,7 +553,7 @@ tape_get_device(int devindex)
read_lock(&tape_device_lock);
list_for_each_entry(tmp, &tape_device_list, node) {
if (tmp->first_minor / TAPE_MINORS_PER_DEV == devindex) {
- device = tape_get_device_reference(tmp);
+ device = tape_get_device(tmp);
break;
}
}
@@ -579,7 +574,8 @@ tape_generic_probe(struct ccw_device *cdev)
device = tape_alloc_device();
if (IS_ERR(device))
return -ENODEV;
- ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP);
+ ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP |
+ CCWDEV_DO_MULTIPATH);
ret = sysfs_create_group(&cdev->dev.kobj, &tape_attr_group);
if (ret) {
tape_put_device(device);
@@ -606,7 +602,8 @@ __tape_discard_requests(struct tape_device *device)
list_del(&request->list);
/* Decrease ref_count for removed request. */
- request->device = tape_put_device(device);
+ request->device = NULL;
+ tape_put_device(device);
request->rc = -EIO;
if (request->callback != NULL)
request->callback(request, request->callback_data);
@@ -664,9 +661,11 @@ tape_generic_remove(struct ccw_device *cdev)
tape_cleanup_device(device);
}
- if (!dev_get_drvdata(&cdev->dev)) {
+ device = dev_get_drvdata(&cdev->dev);
+ if (device) {
sysfs_remove_group(&cdev->dev.kobj, &tape_attr_group);
- dev_set_drvdata(&cdev->dev, tape_put_device(dev_get_drvdata(&cdev->dev)));
+ dev_set_drvdata(&cdev->dev, NULL);
+ tape_put_device(device);
}
}
@@ -721,9 +720,8 @@ tape_free_request (struct tape_request * request)
{
DBF_LH(6, "Free request %p\n", request);
- if (request->device != NULL) {
- request->device = tape_put_device(request->device);
- }
+ if (request->device)
+ tape_put_device(request->device);
kfree(request->cpdata);
kfree(request->cpaddr);
kfree(request);
@@ -838,7 +836,8 @@ static void tape_long_busy_timeout(unsigned long data)
BUG_ON(request->status != TAPE_REQUEST_LONG_BUSY);
DBF_LH(6, "%08x: Long busy timeout.\n", device->cdev_id);
__tape_start_next_request(device);
- device->lb_timeout.data = (unsigned long) tape_put_device(device);
+ device->lb_timeout.data = 0UL;
+ tape_put_device(device);
spin_unlock_irq(get_ccwdev_lock(device->cdev));
}
@@ -918,7 +917,7 @@ __tape_start_request(struct tape_device *device, struct tape_request *request)
}
/* Increase use count of device for the added request. */
- request->device = tape_get_device_reference(device);
+ request->device = tape_get_device(device);
if (list_empty(&device->req_queue)) {
/* No other requests are on the queue. Start this one. */
@@ -1117,8 +1116,8 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
if (req->status == TAPE_REQUEST_LONG_BUSY) {
DBF_EVENT(3, "(%08x): del timer\n", device->cdev_id);
if (del_timer(&device->lb_timeout)) {
- device->lb_timeout.data = (unsigned long)
- tape_put_device(device);
+ device->lb_timeout.data = 0UL;
+ tape_put_device(device);
__tape_start_next_request(device);
}
return;
@@ -1173,7 +1172,7 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
break;
case TAPE_IO_LONG_BUSY:
device->lb_timeout.data =
- (unsigned long)tape_get_device_reference(device);
+ (unsigned long) tape_get_device(device);
device->lb_timeout.expires = jiffies +
LONG_BUSY_TIMEOUT * HZ;
DBF_EVENT(3, "(%08x): add timer\n", device->cdev_id);
@@ -1326,7 +1325,7 @@ EXPORT_SYMBOL(tape_generic_online);
EXPORT_SYMBOL(tape_generic_offline);
EXPORT_SYMBOL(tape_generic_pm_suspend);
EXPORT_SYMBOL(tape_put_device);
-EXPORT_SYMBOL(tape_get_device_reference);
+EXPORT_SYMBOL(tape_get_device);
EXPORT_SYMBOL(tape_state_verbose);
EXPORT_SYMBOL(tape_op_verbose);
EXPORT_SYMBOL(tape_state_set);
diff --git a/drivers/s390/char/tape_proc.c b/drivers/s390/char/tape_proc.c
index 202f4213293..ebd820ccfb2 100644
--- a/drivers/s390/char/tape_proc.c
+++ b/drivers/s390/char/tape_proc.c
@@ -45,7 +45,7 @@ static int tape_proc_show(struct seq_file *m, void *v)
seq_printf(m, "TapeNo\tBusID CuType/Model\t"
"DevType/Model\tBlkSize\tState\tOp\tMedState\n");
}
- device = tape_get_device(n);
+ device = tape_find_device(n);
if (IS_ERR(device))
return 0;
spin_lock_irq(get_ccwdev_lock(device->cdev));
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
index 38385677c65..911822db614 100644
--- a/drivers/s390/char/tty3270.c
+++ b/drivers/s390/char/tty3270.c
@@ -19,6 +19,7 @@
#include <linux/slab.h>
#include <linux/bootmem.h>
+#include <linux/compat.h>
#include <asm/ccwdev.h>
#include <asm/cio.h>
@@ -1731,6 +1732,22 @@ tty3270_ioctl(struct tty_struct *tty, struct file *file,
return kbd_ioctl(tp->kbd, file, cmd, arg);
}
+#ifdef CONFIG_COMPAT
+static long
+tty3270_compat_ioctl(struct tty_struct *tty, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct tty3270 *tp;
+
+ tp = tty->driver_data;
+ if (!tp)
+ return -ENODEV;
+ if (tty->flags & (1 << TTY_IO_ERROR))
+ return -EIO;
+ return kbd_ioctl(tp->kbd, file, cmd, (unsigned long)compat_ptr(arg));
+}
+#endif
+
static const struct tty_operations tty3270_ops = {
.open = tty3270_open,
.close = tty3270_close,
@@ -1745,6 +1762,9 @@ static const struct tty_operations tty3270_ops = {
.hangup = tty3270_hangup,
.wait_until_sent = tty3270_wait_until_sent,
.ioctl = tty3270_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = tty3270_compat_ioctl,
+#endif
.set_termios = tty3270_set_termios
};
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index d1a142fa3eb..899aa795bf3 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -312,11 +312,9 @@ static int vmlogrdr_open (struct inode *inode, struct file *filp)
return -ENOSYS;
/* Besure this device hasn't already been opened */
- lock_kernel();
spin_lock_bh(&logptr->priv_lock);
if (logptr->dev_in_use) {
spin_unlock_bh(&logptr->priv_lock);
- unlock_kernel();
return -EBUSY;
}
logptr->dev_in_use = 1;
@@ -360,9 +358,8 @@ static int vmlogrdr_open (struct inode *inode, struct file *filp)
|| (logptr->iucv_path_severed));
if (logptr->iucv_path_severed)
goto out_record;
- ret = nonseekable_open(inode, filp);
- unlock_kernel();
- return ret;
+ nonseekable_open(inode, filp);
+ return 0;
out_record:
if (logptr->autorecording)
@@ -372,7 +369,6 @@ out_path:
logptr->path = NULL;
out_dev:
logptr->dev_in_use = 0;
- unlock_kernel();
return -EIO;
}
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
index 77571b68539..cc56fc708ba 100644
--- a/drivers/s390/char/vmur.c
+++ b/drivers/s390/char/vmur.c
@@ -695,7 +695,6 @@ static int ur_open(struct inode *inode, struct file *file)
if (accmode == O_RDWR)
return -EACCES;
- lock_kernel();
/*
* We treat the minor number as the devno of the ur device
* to find in the driver tree.
@@ -749,7 +748,6 @@ static int ur_open(struct inode *inode, struct file *file)
goto fail_urfile_free;
urf->file_reclen = rc;
file->private_data = urf;
- unlock_kernel();
return 0;
fail_urfile_free:
@@ -761,7 +759,6 @@ fail_unlock:
fail_put:
urdev_put(urd);
out:
- unlock_kernel();
return rc;
}
diff --git a/drivers/s390/char/vmwatchdog.c b/drivers/s390/char/vmwatchdog.c
index f2bc287b69e..c974058e48d 100644
--- a/drivers/s390/char/vmwatchdog.c
+++ b/drivers/s390/char/vmwatchdog.c
@@ -19,7 +19,6 @@
#include <linux/moduleparam.h>
#include <linux/suspend.h>
#include <linux/watchdog.h>
-#include <linux/smp_lock.h>
#include <asm/ebcdic.h>
#include <asm/io.h>
@@ -49,6 +48,8 @@ static unsigned int vmwdt_interval = 60;
static unsigned long vmwdt_is_open;
static int vmwdt_expect_close;
+static DEFINE_MUTEX(vmwdt_mutex);
+
#define VMWDT_OPEN 0 /* devnode is open or suspend in progress */
#define VMWDT_RUNNING 1 /* The watchdog is armed */
@@ -133,15 +134,11 @@ static int __init vmwdt_probe(void)
static int vmwdt_open(struct inode *i, struct file *f)
{
int ret;
- lock_kernel();
- if (test_and_set_bit(VMWDT_OPEN, &vmwdt_is_open)) {
- unlock_kernel();
+ if (test_and_set_bit(VMWDT_OPEN, &vmwdt_is_open))
return -EBUSY;
- }
ret = vmwdt_keepalive();
if (ret)
clear_bit(VMWDT_OPEN, &vmwdt_is_open);
- unlock_kernel();
return ret ? ret : nonseekable_open(i, f);
}
@@ -160,8 +157,7 @@ static struct watchdog_info vmwdt_info = {
.identity = "z/VM Watchdog Timer",
};
-static int vmwdt_ioctl(struct inode *i, struct file *f,
- unsigned int cmd, unsigned long arg)
+static int __vmwdt_ioctl(unsigned int cmd, unsigned long arg)
{
switch (cmd) {
case WDIOC_GETSUPPORT:
@@ -205,10 +201,19 @@ static int vmwdt_ioctl(struct inode *i, struct file *f,
case WDIOC_KEEPALIVE:
return vmwdt_keepalive();
}
-
return -EINVAL;
}
+static long vmwdt_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
+{
+ int rc;
+
+ mutex_lock(&vmwdt_mutex);
+ rc = __vmwdt_ioctl(cmd, arg);
+ mutex_unlock(&vmwdt_mutex);
+ return (long) rc;
+}
+
static ssize_t vmwdt_write(struct file *f, const char __user *buf,
size_t count, loff_t *ppos)
{
@@ -288,7 +293,7 @@ static struct notifier_block vmwdt_power_notifier = {
static const struct file_operations vmwdt_fops = {
.open = &vmwdt_open,
.release = &vmwdt_close,
- .ioctl = &vmwdt_ioctl,
+ .unlocked_ioctl = &vmwdt_ioctl,
.write = &vmwdt_write,
.owner = THIS_MODULE,
};
@@ -309,6 +314,10 @@ static int __init vmwdt_init(void)
ret = register_pm_notifier(&vmwdt_power_notifier);
if (ret)
return ret;
+ /*
+ * misc_register() has to be the last action in module_init(), because
+ * file operations will be available right after this.
+ */
ret = misc_register(&vmwdt_dev);
if (ret) {
unregister_pm_notifier(&vmwdt_power_notifier);
diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile
index fa4c9662f65..d033414f759 100644
--- a/drivers/s390/cio/Makefile
+++ b/drivers/s390/cio/Makefile
@@ -3,7 +3,7 @@
#
obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o \
- fcx.o itcw.o crw.o
+ fcx.o itcw.o crw.o ccwreq.o
ccw_device-objs += device.o device_fsm.o device_ops.o
ccw_device-objs += device_id.o device_pgid.o device_status.o
obj-y += ccw_device.o cmf.o
diff --git a/drivers/s390/cio/ccwreq.c b/drivers/s390/cio/ccwreq.c
new file mode 100644
index 00000000000..9509e386093
--- /dev/null
+++ b/drivers/s390/cio/ccwreq.c
@@ -0,0 +1,328 @@
+/*
+ * Handling of internal CCW device requests.
+ *
+ * Copyright IBM Corp. 2009
+ * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ */
+
+#include <linux/types.h>
+#include <linux/err.h>
+#include <asm/ccwdev.h>
+#include <asm/cio.h>
+
+#include "io_sch.h"
+#include "cio.h"
+#include "device.h"
+#include "cio_debug.h"
+
+/**
+ * lpm_adjust - adjust path mask
+ * @lpm: path mask to adjust
+ * @mask: mask of available paths
+ *
+ * Shift @lpm right until @lpm and @mask have at least one bit in common or
+ * until @lpm is zero. Return the resulting lpm.
+ */
+int lpm_adjust(int lpm, int mask)
+{
+ while (lpm && ((lpm & mask) == 0))
+ lpm >>= 1;
+ return lpm;
+}
+
+/*
+ * Adjust path mask to use next path and reset retry count. Return resulting
+ * path mask.
+ */
+static u16 ccwreq_next_path(struct ccw_device *cdev)
+{
+ struct ccw_request *req = &cdev->private->req;
+
+ req->retries = req->maxretries;
+ req->mask = lpm_adjust(req->mask >>= 1, req->lpm);
+
+ return req->mask;
+}
+
+/*
+ * Clean up device state and report to callback.
+ */
+static void ccwreq_stop(struct ccw_device *cdev, int rc)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_request *req = &cdev->private->req;
+
+ if (req->done)
+ return;
+ req->done = 1;
+ ccw_device_set_timeout(cdev, 0);
+ memset(&cdev->private->irb, 0, sizeof(struct irb));
+ sch->lpm = sch->schib.pmcw.pam;
+ if (rc && rc != -ENODEV && req->drc)
+ rc = req->drc;
+ req->callback(cdev, req->data, rc);
+}
+
+/*
+ * (Re-)Start the operation until retries and paths are exhausted.
+ */
+static void ccwreq_do(struct ccw_device *cdev)
+{
+ struct ccw_request *req = &cdev->private->req;
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw1 *cp = req->cp;
+ int rc = -EACCES;
+
+ while (req->mask) {
+ if (req->retries-- == 0) {
+ /* Retries exhausted, try next path. */
+ ccwreq_next_path(cdev);
+ continue;
+ }
+ /* Perform start function. */
+ sch->lpm = 0xff;
+ memset(&cdev->private->irb, 0, sizeof(struct irb));
+ rc = cio_start(sch, cp, (u8) req->mask);
+ if (rc == 0) {
+ /* I/O started successfully. */
+ ccw_device_set_timeout(cdev, req->timeout);
+ return;
+ }
+ if (rc == -ENODEV) {
+ /* Permanent device error. */
+ break;
+ }
+ if (rc == -EACCES) {
+ /* Permant path error. */
+ ccwreq_next_path(cdev);
+ continue;
+ }
+ /* Temporary improper status. */
+ rc = cio_clear(sch);
+ if (rc)
+ break;
+ return;
+ }
+ ccwreq_stop(cdev, rc);
+}
+
+/**
+ * ccw_request_start - perform I/O request
+ * @cdev: ccw device
+ *
+ * Perform the I/O request specified by cdev->req.
+ */
+void ccw_request_start(struct ccw_device *cdev)
+{
+ struct ccw_request *req = &cdev->private->req;
+
+ /* Try all paths twice to counter link flapping. */
+ req->mask = 0x8080;
+ req->retries = req->maxretries;
+ req->mask = lpm_adjust(req->mask, req->lpm);
+ req->drc = 0;
+ req->done = 0;
+ req->cancel = 0;
+ if (!req->mask)
+ goto out_nopath;
+ ccwreq_do(cdev);
+ return;
+
+out_nopath:
+ ccwreq_stop(cdev, -EACCES);
+}
+
+/**
+ * ccw_request_cancel - cancel running I/O request
+ * @cdev: ccw device
+ *
+ * Cancel the I/O request specified by cdev->req. Return non-zero if request
+ * has already finished, zero otherwise.
+ */
+int ccw_request_cancel(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_request *req = &cdev->private->req;
+ int rc;
+
+ if (req->done)
+ return 1;
+ req->cancel = 1;
+ rc = cio_clear(sch);
+ if (rc)
+ ccwreq_stop(cdev, rc);
+ return 0;
+}
+
+/*
+ * Return the status of the internal I/O started on the specified ccw device.
+ * Perform BASIC SENSE if required.
+ */
+static enum io_status ccwreq_status(struct ccw_device *cdev, struct irb *lcirb)
+{
+ struct irb *irb = &cdev->private->irb;
+ struct cmd_scsw *scsw = &irb->scsw.cmd;
+
+ /* Perform BASIC SENSE if needed. */
+ if (ccw_device_accumulate_and_sense(cdev, lcirb))
+ return IO_RUNNING;
+ /* Check for halt/clear interrupt. */
+ if (scsw->fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC))
+ return IO_KILLED;
+ /* Check for path error. */
+ if (scsw->cc == 3 || scsw->pno)
+ return IO_PATH_ERROR;
+ /* Handle BASIC SENSE data. */
+ if (irb->esw.esw0.erw.cons) {
+ CIO_TRACE_EVENT(2, "sensedata");
+ CIO_HEX_EVENT(2, &cdev->private->dev_id,
+ sizeof(struct ccw_dev_id));
+ CIO_HEX_EVENT(2, &cdev->private->irb.ecw, SENSE_MAX_COUNT);
+ /* Check for command reject. */
+ if (irb->ecw[0] & SNS0_CMD_REJECT)
+ return IO_REJECTED;
+ /* Assume that unexpected SENSE data implies an error. */
+ return IO_STATUS_ERROR;
+ }
+ /* Check for channel errors. */
+ if (scsw->cstat != 0)
+ return IO_STATUS_ERROR;
+ /* Check for device errors. */
+ if (scsw->dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END))
+ return IO_STATUS_ERROR;
+ /* Check for final state. */
+ if (!(scsw->dstat & DEV_STAT_DEV_END))
+ return IO_RUNNING;
+ /* Check for other improper status. */
+ if (scsw->cc == 1 && (scsw->stctl & SCSW_STCTL_ALERT_STATUS))
+ return IO_STATUS_ERROR;
+ return IO_DONE;
+}
+
+/*
+ * Log ccw request status.
+ */
+static void ccwreq_log_status(struct ccw_device *cdev, enum io_status status)
+{
+ struct ccw_request *req = &cdev->private->req;
+ struct {
+ struct ccw_dev_id dev_id;
+ u16 retries;
+ u8 lpm;
+ u8 status;
+ } __attribute__ ((packed)) data;
+ data.dev_id = cdev->private->dev_id;
+ data.retries = req->retries;
+ data.lpm = (u8) req->mask;
+ data.status = (u8) status;
+ CIO_TRACE_EVENT(2, "reqstat");
+ CIO_HEX_EVENT(2, &data, sizeof(data));
+}
+
+/**
+ * ccw_request_handler - interrupt handler for I/O request procedure.
+ * @cdev: ccw device
+ *
+ * Handle interrupt during I/O request procedure.
+ */
+void ccw_request_handler(struct ccw_device *cdev)
+{
+ struct ccw_request *req = &cdev->private->req;
+ struct irb *irb = (struct irb *) __LC_IRB;
+ enum io_status status;
+ int rc = -EOPNOTSUPP;
+
+ /* Check status of I/O request. */
+ status = ccwreq_status(cdev, irb);
+ if (req->filter)
+ status = req->filter(cdev, req->data, irb, status);
+ if (status != IO_RUNNING)
+ ccw_device_set_timeout(cdev, 0);
+ if (status != IO_DONE && status != IO_RUNNING)
+ ccwreq_log_status(cdev, status);
+ switch (status) {
+ case IO_DONE:
+ break;
+ case IO_RUNNING:
+ return;
+ case IO_REJECTED:
+ goto err;
+ case IO_PATH_ERROR:
+ goto out_next_path;
+ case IO_STATUS_ERROR:
+ goto out_restart;
+ case IO_KILLED:
+ /* Check if request was cancelled on purpose. */
+ if (req->cancel) {
+ rc = -EIO;
+ goto err;
+ }
+ goto out_restart;
+ }
+ /* Check back with request initiator. */
+ if (!req->check)
+ goto out;
+ switch (req->check(cdev, req->data)) {
+ case 0:
+ break;
+ case -EAGAIN:
+ goto out_restart;
+ case -EACCES:
+ goto out_next_path;
+ default:
+ goto err;
+ }
+out:
+ ccwreq_stop(cdev, 0);
+ return;
+
+out_next_path:
+ /* Try next path and restart I/O. */
+ if (!ccwreq_next_path(cdev)) {
+ rc = -EACCES;
+ goto err;
+ }
+out_restart:
+ /* Restart. */
+ ccwreq_do(cdev);
+ return;
+err:
+ ccwreq_stop(cdev, rc);
+}
+
+
+/**
+ * ccw_request_timeout - timeout handler for I/O request procedure
+ * @cdev: ccw device
+ *
+ * Handle timeout during I/O request procedure.
+ */
+void ccw_request_timeout(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_request *req = &cdev->private->req;
+ int rc;
+
+ if (!ccwreq_next_path(cdev)) {
+ /* set the final return code for this request */
+ req->drc = -ETIME;
+ }
+ rc = cio_clear(sch);
+ if (rc)
+ goto err;
+ return;
+
+err:
+ ccwreq_stop(cdev, rc);
+}
+
+/**
+ * ccw_request_notoper - notoper handler for I/O request procedure
+ * @cdev: ccw device
+ *
+ * Handle timeout during I/O request procedure.
+ */
+void ccw_request_notoper(struct ccw_device *cdev)
+{
+ ccwreq_stop(cdev, -ENODEV);
+}
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 138124fcfca..126f240715a 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -618,6 +618,7 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
old_regs = set_irq_regs(regs);
s390_idle_check();
irq_enter();
+ __get_cpu_var(s390_idle).nohz_delay = 1;
if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
/* Serve timer interrupts first. */
clock_comparator_work();
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index 2e43558c704..bf7f80f5a33 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -68,6 +68,11 @@ struct schib {
__u8 mda[4]; /* model dependent area */
} __attribute__ ((packed,aligned(4)));
+enum sch_todo {
+ SCH_TODO_NOTHING,
+ SCH_TODO_UNREG,
+};
+
/* subchannel data structure used by I/O subroutines */
struct subchannel {
struct subchannel_id schid;
@@ -95,7 +100,8 @@ struct subchannel {
struct device dev; /* entry in device tree */
struct css_driver *driver;
void *private; /* private per subchannel type data */
- struct work_struct work;
+ enum sch_todo todo;
+ struct work_struct todo_work;
struct schib_config config;
} __attribute__ ((aligned(8)));
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 91c25706fa8..92ff88ac110 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -133,6 +133,8 @@ out:
return rc;
}
+static void css_sch_todo(struct work_struct *work);
+
static struct subchannel *
css_alloc_subchannel(struct subchannel_id schid)
{
@@ -147,6 +149,7 @@ css_alloc_subchannel(struct subchannel_id schid)
kfree(sch);
return ERR_PTR(ret);
}
+ INIT_WORK(&sch->todo_work, css_sch_todo);
return sch;
}
@@ -190,6 +193,51 @@ void css_sch_device_unregister(struct subchannel *sch)
}
EXPORT_SYMBOL_GPL(css_sch_device_unregister);
+static void css_sch_todo(struct work_struct *work)
+{
+ struct subchannel *sch;
+ enum sch_todo todo;
+
+ sch = container_of(work, struct subchannel, todo_work);
+ /* Find out todo. */
+ spin_lock_irq(sch->lock);
+ todo = sch->todo;
+ CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
+ sch->schid.sch_no, todo);
+ sch->todo = SCH_TODO_NOTHING;
+ spin_unlock_irq(sch->lock);
+ /* Perform todo. */
+ if (todo == SCH_TODO_UNREG)
+ css_sch_device_unregister(sch);
+ /* Release workqueue ref. */
+ put_device(&sch->dev);
+}
+
+/**
+ * css_sched_sch_todo - schedule a subchannel operation
+ * @sch: subchannel
+ * @todo: todo
+ *
+ * Schedule the operation identified by @todo to be performed on the slow path
+ * workqueue. Do nothing if another operation with higher priority is already
+ * scheduled. Needs to be called with subchannel lock held.
+ */
+void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
+{
+ CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
+ sch->schid.ssid, sch->schid.sch_no, todo);
+ if (sch->todo >= todo)
+ return;
+ /* Get workqueue ref. */
+ if (!get_device(&sch->dev))
+ return;
+ sch->todo = todo;
+ if (!queue_work(slow_path_wq, &sch->todo_work)) {
+ /* Already queued, release workqueue ref. */
+ put_device(&sch->dev);
+ }
+}
+
static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
{
int i;
@@ -376,8 +424,8 @@ static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
/* Unusable - ignore. */
return 0;
}
- CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, unknown, "
- "slow path.\n", schid.ssid, schid.sch_no, CIO_OPER);
+ CIO_MSG_EVENT(4, "event: sch 0.%x.%04x, new\n", schid.ssid,
+ schid.sch_no);
return css_probe_device(schid);
}
@@ -394,6 +442,10 @@ static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
"Got subchannel machine check but "
"no sch_event handler provided.\n");
}
+ if (ret != 0 && ret != -EAGAIN) {
+ CIO_MSG_EVENT(2, "eval: sch 0.%x.%04x, rc=%d\n",
+ sch->schid.ssid, sch->schid.sch_no, ret);
+ }
return ret;
}
@@ -684,6 +736,7 @@ static int __init setup_css(int nr)
css->pseudo_subchannel->dev.parent = &css->device;
css->pseudo_subchannel->dev.release = css_subchannel_release;
dev_set_name(&css->pseudo_subchannel->dev, "defunct");
+ mutex_init(&css->pseudo_subchannel->reg_mutex);
ret = cio_create_sch_lock(css->pseudo_subchannel);
if (ret) {
kfree(css->pseudo_subchannel);
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index 68d6b0bf151..fe84b92cde6 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -11,6 +11,8 @@
#include <asm/chpid.h>
#include <asm/schid.h>
+#include "cio.h"
+
/*
* path grouping stuff
*/
@@ -151,4 +153,5 @@ int css_sch_is_valid(struct schib *);
extern struct workqueue_struct *slow_path_wq;
void css_wait_for_slow_path(void);
+void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo);
#endif
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 2490b741e16..9fecfb4223a 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -7,6 +7,10 @@
* Cornelia Huck (cornelia.huck@de.ibm.com)
* Martin Schwidefsky (schwidefsky@de.ibm.com)
*/
+
+#define KMSG_COMPONENT "cio"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/spinlock.h>
@@ -299,53 +303,18 @@ int ccw_device_is_orphan(struct ccw_device *cdev)
static void ccw_device_unregister(struct ccw_device *cdev)
{
- if (test_and_clear_bit(1, &cdev->private->registered)) {
+ if (device_is_registered(&cdev->dev)) {
+ /* Undo device_add(). */
device_del(&cdev->dev);
+ }
+ if (cdev->private->flags.initialized) {
+ cdev->private->flags.initialized = 0;
/* Release reference from device_initialize(). */
put_device(&cdev->dev);
}
}
-static void ccw_device_remove_orphan_cb(struct work_struct *work)
-{
- struct ccw_device_private *priv;
- struct ccw_device *cdev;
-
- priv = container_of(work, struct ccw_device_private, kick_work);
- cdev = priv->cdev;
- ccw_device_unregister(cdev);
- /* Release cdev reference for workqueue processing. */
- put_device(&cdev->dev);
-}
-
-static void
-ccw_device_remove_disconnected(struct ccw_device *cdev)
-{
- unsigned long flags;
-
- /*
- * Forced offline in disconnected state means
- * 'throw away device'.
- */
- if (ccw_device_is_orphan(cdev)) {
- /*
- * Deregister ccw device.
- * Unfortunately, we cannot do this directly from the
- * attribute method.
- */
- /* Get cdev reference for workqueue processing. */
- if (!get_device(&cdev->dev))
- return;
- spin_lock_irqsave(cdev->ccwlock, flags);
- cdev->private->state = DEV_STATE_NOT_OPER;
- spin_unlock_irqrestore(cdev->ccwlock, flags);
- PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_remove_orphan_cb);
- queue_work(slow_path_wq, &cdev->private->kick_work);
- } else
- /* Deregister subchannel, which will kill the ccw device. */
- ccw_device_schedule_sch_unregister(cdev);
-}
+static void io_subchannel_quiesce(struct subchannel *);
/**
* ccw_device_set_offline() - disable a ccw device for I/O
@@ -360,7 +329,8 @@ ccw_device_remove_disconnected(struct ccw_device *cdev)
*/
int ccw_device_set_offline(struct ccw_device *cdev)
{
- int ret;
+ struct subchannel *sch;
+ int ret, state;
if (!cdev)
return -ENODEV;
@@ -374,6 +344,7 @@ int ccw_device_set_offline(struct ccw_device *cdev)
}
cdev->online = 0;
spin_lock_irq(cdev->ccwlock);
+ sch = to_subchannel(cdev->dev.parent);
/* Wait until a final state or DISCONNECTED is reached */
while (!dev_fsm_final_state(cdev) &&
cdev->private->state != DEV_STATE_DISCONNECTED) {
@@ -382,20 +353,37 @@ int ccw_device_set_offline(struct ccw_device *cdev)
cdev->private->state == DEV_STATE_DISCONNECTED));
spin_lock_irq(cdev->ccwlock);
}
- ret = ccw_device_offline(cdev);
- if (ret)
- goto error;
+ do {
+ ret = ccw_device_offline(cdev);
+ if (!ret)
+ break;
+ CIO_MSG_EVENT(0, "ccw_device_offline returned %d, device "
+ "0.%x.%04x\n", ret, cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno);
+ if (ret != -EBUSY)
+ goto error;
+ state = cdev->private->state;
+ spin_unlock_irq(cdev->ccwlock);
+ io_subchannel_quiesce(sch);
+ spin_lock_irq(cdev->ccwlock);
+ cdev->private->state = state;
+ } while (ret == -EBUSY);
spin_unlock_irq(cdev->ccwlock);
wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
cdev->private->state == DEV_STATE_DISCONNECTED));
+ /* Inform the user if set offline failed. */
+ if (cdev->private->state == DEV_STATE_BOXED) {
+ pr_warning("%s: The device entered boxed state while "
+ "being set offline\n", dev_name(&cdev->dev));
+ } else if (cdev->private->state == DEV_STATE_NOT_OPER) {
+ pr_warning("%s: The device stopped operating while "
+ "being set offline\n", dev_name(&cdev->dev));
+ }
/* Give up reference from ccw_device_set_online(). */
put_device(&cdev->dev);
return 0;
error:
- CIO_MSG_EVENT(0, "ccw_device_offline returned %d, device 0.%x.%04x\n",
- ret, cdev->private->dev_id.ssid,
- cdev->private->dev_id.devno);
cdev->private->state = DEV_STATE_OFFLINE;
dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
spin_unlock_irq(cdev->ccwlock);
@@ -448,6 +436,16 @@ int ccw_device_set_online(struct ccw_device *cdev)
if ((cdev->private->state != DEV_STATE_ONLINE) &&
(cdev->private->state != DEV_STATE_W4SENSE)) {
spin_unlock_irq(cdev->ccwlock);
+ /* Inform the user that set online failed. */
+ if (cdev->private->state == DEV_STATE_BOXED) {
+ pr_warning("%s: Setting the device online failed "
+ "because it is boxed\n",
+ dev_name(&cdev->dev));
+ } else if (cdev->private->state == DEV_STATE_NOT_OPER) {
+ pr_warning("%s: Setting the device online failed "
+ "because it is not operational\n",
+ dev_name(&cdev->dev));
+ }
/* Give up online reference since onlining failed. */
put_device(&cdev->dev);
return -ENODEV;
@@ -494,27 +492,22 @@ error:
static int online_store_handle_offline(struct ccw_device *cdev)
{
- if (cdev->private->state == DEV_STATE_DISCONNECTED)
- ccw_device_remove_disconnected(cdev);
- else if (cdev->online && cdev->drv && cdev->drv->set_offline)
+ if (cdev->private->state == DEV_STATE_DISCONNECTED) {
+ spin_lock_irq(cdev->ccwlock);
+ ccw_device_sched_todo(cdev, CDEV_TODO_UNREG_EVAL);
+ spin_unlock_irq(cdev->ccwlock);
+ } else if (cdev->online && cdev->drv && cdev->drv->set_offline)
return ccw_device_set_offline(cdev);
return 0;
}
static int online_store_recog_and_online(struct ccw_device *cdev)
{
- int ret;
-
/* Do device recognition, if needed. */
if (cdev->private->state == DEV_STATE_BOXED) {
- ret = ccw_device_recognition(cdev);
- if (ret) {
- CIO_MSG_EVENT(0, "Couldn't start recognition "
- "for device 0.%x.%04x (ret=%d)\n",
- cdev->private->dev_id.ssid,
- cdev->private->dev_id.devno, ret);
- return ret;
- }
+ spin_lock_irq(cdev->ccwlock);
+ ccw_device_recognition(cdev);
+ spin_unlock_irq(cdev->ccwlock);
wait_event(cdev->private->wait_q,
cdev->private->flags.recog_done);
if (cdev->private->state != DEV_STATE_OFFLINE)
@@ -553,11 +546,10 @@ static ssize_t online_store (struct device *dev, struct device_attribute *attr,
int force, ret;
unsigned long i;
- if ((cdev->private->state != DEV_STATE_OFFLINE &&
- cdev->private->state != DEV_STATE_ONLINE &&
- cdev->private->state != DEV_STATE_BOXED &&
- cdev->private->state != DEV_STATE_DISCONNECTED) ||
- atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0)
+ if (!dev_fsm_final_state(cdev) &&
+ cdev->private->state != DEV_STATE_DISCONNECTED)
+ return -EAGAIN;
+ if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0)
return -EAGAIN;
if (cdev->drv && !try_module_get(cdev->drv->owner)) {
@@ -665,81 +657,31 @@ static int ccw_device_register(struct ccw_device *cdev)
cdev->private->dev_id.devno);
if (ret)
return ret;
- ret = device_add(dev);
- if (ret)
- return ret;
-
- set_bit(1, &cdev->private->registered);
- return ret;
+ return device_add(dev);
}
-struct match_data {
- struct ccw_dev_id dev_id;
- struct ccw_device * sibling;
-};
-
-static int
-match_devno(struct device * dev, void * data)
-{
- struct match_data * d = data;
- struct ccw_device * cdev;
-
- cdev = to_ccwdev(dev);
- if ((cdev->private->state == DEV_STATE_DISCONNECTED) &&
- !ccw_device_is_orphan(cdev) &&
- ccw_dev_id_is_equal(&cdev->private->dev_id, &d->dev_id) &&
- (cdev != d->sibling))
- return 1;
- return 0;
-}
-
-static struct ccw_device * get_disc_ccwdev_by_dev_id(struct ccw_dev_id *dev_id,
- struct ccw_device *sibling)
-{
- struct device *dev;
- struct match_data data;
-
- data.dev_id = *dev_id;
- data.sibling = sibling;
- dev = bus_find_device(&ccw_bus_type, NULL, &data, match_devno);
-
- return dev ? to_ccwdev(dev) : NULL;
-}
-
-static int match_orphan(struct device *dev, void *data)
+static int match_dev_id(struct device *dev, void *data)
{
- struct ccw_dev_id *dev_id;
- struct ccw_device *cdev;
+ struct ccw_device *cdev = to_ccwdev(dev);
+ struct ccw_dev_id *dev_id = data;
- dev_id = data;
- cdev = to_ccwdev(dev);
return ccw_dev_id_is_equal(&cdev->private->dev_id, dev_id);
}
-static struct ccw_device *
-get_orphaned_ccwdev_by_dev_id(struct channel_subsystem *css,
- struct ccw_dev_id *dev_id)
+static struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id)
{
struct device *dev;
- dev = device_find_child(&css->pseudo_subchannel->dev, dev_id,
- match_orphan);
+ dev = bus_find_device(&ccw_bus_type, NULL, dev_id, match_dev_id);
return dev ? to_ccwdev(dev) : NULL;
}
-void ccw_device_do_unbind_bind(struct work_struct *work)
+static void ccw_device_do_unbind_bind(struct ccw_device *cdev)
{
- struct ccw_device_private *priv;
- struct ccw_device *cdev;
- struct subchannel *sch;
int ret;
- priv = container_of(work, struct ccw_device_private, kick_work);
- cdev = priv->cdev;
- sch = to_subchannel(cdev->dev.parent);
-
- if (test_bit(1, &cdev->private->registered)) {
+ if (device_is_registered(&cdev->dev)) {
device_release_driver(&cdev->dev);
ret = device_attach(&cdev->dev);
WARN_ON(ret == -ENODEV);
@@ -773,6 +715,8 @@ static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch)
return ERR_PTR(-ENOMEM);
}
+static void ccw_device_todo(struct work_struct *work);
+
static int io_subchannel_initialize_dev(struct subchannel *sch,
struct ccw_device *cdev)
{
@@ -780,7 +724,7 @@ static int io_subchannel_initialize_dev(struct subchannel *sch,
atomic_set(&cdev->private->onoff, 0);
cdev->dev.parent = &sch->dev;
cdev->dev.release = ccw_device_release;
- INIT_WORK(&cdev->private->kick_work, NULL);
+ INIT_WORK(&cdev->private->todo_work, ccw_device_todo);
cdev->dev.groups = ccwdev_attr_groups;
/* Do first half of device_register. */
device_initialize(&cdev->dev);
@@ -789,6 +733,7 @@ static int io_subchannel_initialize_dev(struct subchannel *sch,
put_device(&cdev->dev);
return -ENODEV;
}
+ cdev->private->flags.initialized = 1;
return 0;
}
@@ -806,76 +751,7 @@ static struct ccw_device * io_subchannel_create_ccwdev(struct subchannel *sch)
return cdev;
}
-static int io_subchannel_recog(struct ccw_device *, struct subchannel *);
-
-static void sch_attach_device(struct subchannel *sch,
- struct ccw_device *cdev)
-{
- css_update_ssd_info(sch);
- spin_lock_irq(sch->lock);
- sch_set_cdev(sch, cdev);
- cdev->private->schid = sch->schid;
- cdev->ccwlock = sch->lock;
- ccw_device_trigger_reprobe(cdev);
- spin_unlock_irq(sch->lock);
-}
-
-static void sch_attach_disconnected_device(struct subchannel *sch,
- struct ccw_device *cdev)
-{
- struct subchannel *other_sch;
- int ret;
-
- /* Get reference for new parent. */
- if (!get_device(&sch->dev))
- return;
- other_sch = to_subchannel(cdev->dev.parent);
- /* Note: device_move() changes cdev->dev.parent */
- ret = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV);
- if (ret) {
- CIO_MSG_EVENT(0, "Moving disconnected device 0.%x.%04x failed "
- "(ret=%d)!\n", cdev->private->dev_id.ssid,
- cdev->private->dev_id.devno, ret);
- /* Put reference for new parent. */
- put_device(&sch->dev);
- return;
- }
- sch_set_cdev(other_sch, NULL);
- /* No need to keep a subchannel without ccw device around. */
- css_sch_device_unregister(other_sch);
- sch_attach_device(sch, cdev);
- /* Put reference for old parent. */
- put_device(&other_sch->dev);
-}
-
-static void sch_attach_orphaned_device(struct subchannel *sch,
- struct ccw_device *cdev)
-{
- int ret;
- struct subchannel *pseudo_sch;
-
- /* Get reference for new parent. */
- if (!get_device(&sch->dev))
- return;
- pseudo_sch = to_subchannel(cdev->dev.parent);
- /*
- * Try to move the ccw device to its new subchannel.
- * Note: device_move() changes cdev->dev.parent
- */
- ret = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV);
- if (ret) {
- CIO_MSG_EVENT(0, "Moving device 0.%x.%04x from orphanage "
- "failed (ret=%d)!\n",
- cdev->private->dev_id.ssid,
- cdev->private->dev_id.devno, ret);
- /* Put reference for new parent. */
- put_device(&sch->dev);
- return;
- }
- sch_attach_device(sch, cdev);
- /* Put reference on pseudo subchannel. */
- put_device(&pseudo_sch->dev);
-}
+static void io_subchannel_recog(struct ccw_device *, struct subchannel *);
static void sch_create_and_recog_new_device(struct subchannel *sch)
{
@@ -888,100 +764,19 @@ static void sch_create_and_recog_new_device(struct subchannel *sch)
css_sch_device_unregister(sch);
return;
}
- spin_lock_irq(sch->lock);
- sch_set_cdev(sch, cdev);
- spin_unlock_irq(sch->lock);
/* Start recognition for the new ccw device. */
- if (io_subchannel_recog(cdev, sch)) {
- spin_lock_irq(sch->lock);
- sch_set_cdev(sch, NULL);
- spin_unlock_irq(sch->lock);
- css_sch_device_unregister(sch);
- /* Put reference from io_subchannel_create_ccwdev(). */
- put_device(&sch->dev);
- /* Give up initial reference. */
- put_device(&cdev->dev);
- }
-}
-
-
-void ccw_device_move_to_orphanage(struct work_struct *work)
-{
- struct ccw_device_private *priv;
- struct ccw_device *cdev;
- struct ccw_device *replacing_cdev;
- struct subchannel *sch;
- int ret;
- struct channel_subsystem *css;
- struct ccw_dev_id dev_id;
-
- priv = container_of(work, struct ccw_device_private, kick_work);
- cdev = priv->cdev;
- sch = to_subchannel(cdev->dev.parent);
- css = to_css(sch->dev.parent);
- dev_id.devno = sch->schib.pmcw.dev;
- dev_id.ssid = sch->schid.ssid;
-
- /* Increase refcount for pseudo subchannel. */
- get_device(&css->pseudo_subchannel->dev);
- /*
- * Move the orphaned ccw device to the orphanage so the replacing
- * ccw device can take its place on the subchannel.
- * Note: device_move() changes cdev->dev.parent
- */
- ret = device_move(&cdev->dev, &css->pseudo_subchannel->dev,
- DPM_ORDER_NONE);
- if (ret) {
- CIO_MSG_EVENT(0, "Moving device 0.%x.%04x to orphanage failed "
- "(ret=%d)!\n", cdev->private->dev_id.ssid,
- cdev->private->dev_id.devno, ret);
- /* Decrease refcount for pseudo subchannel again. */
- put_device(&css->pseudo_subchannel->dev);
- return;
- }
- cdev->ccwlock = css->pseudo_subchannel->lock;
- /*
- * Search for the replacing ccw device
- * - among the disconnected devices
- * - in the orphanage
- */
- replacing_cdev = get_disc_ccwdev_by_dev_id(&dev_id, cdev);
- if (replacing_cdev) {
- sch_attach_disconnected_device(sch, replacing_cdev);
- /* Release reference from get_disc_ccwdev_by_dev_id() */
- put_device(&replacing_cdev->dev);
- /* Release reference of subchannel from old cdev. */
- put_device(&sch->dev);
- return;
- }
- replacing_cdev = get_orphaned_ccwdev_by_dev_id(css, &dev_id);
- if (replacing_cdev) {
- sch_attach_orphaned_device(sch, replacing_cdev);
- /* Release reference from get_orphaned_ccwdev_by_dev_id() */
- put_device(&replacing_cdev->dev);
- /* Release reference of subchannel from old cdev. */
- put_device(&sch->dev);
- return;
- }
- sch_create_and_recog_new_device(sch);
- /* Release reference of subchannel from old cdev. */
- put_device(&sch->dev);
+ io_subchannel_recog(cdev, sch);
}
/*
* Register recognized device.
*/
-static void
-io_subchannel_register(struct work_struct *work)
+static void io_subchannel_register(struct ccw_device *cdev)
{
- struct ccw_device_private *priv;
- struct ccw_device *cdev;
struct subchannel *sch;
int ret;
unsigned long flags;
- priv = container_of(work, struct ccw_device_private, kick_work);
- cdev = priv->cdev;
sch = to_subchannel(cdev->dev.parent);
/*
* Check if subchannel is still registered. It may have become
@@ -1033,41 +828,23 @@ out:
cdev->private->flags.recog_done = 1;
wake_up(&cdev->private->wait_q);
out_err:
- /* Release reference for workqueue processing. */
- put_device(&cdev->dev);
if (atomic_dec_and_test(&ccw_device_init_count))
wake_up(&ccw_device_init_wq);
}
-static void ccw_device_call_sch_unregister(struct work_struct *work)
+static void ccw_device_call_sch_unregister(struct ccw_device *cdev)
{
- struct ccw_device_private *priv;
- struct ccw_device *cdev;
struct subchannel *sch;
- priv = container_of(work, struct ccw_device_private, kick_work);
- cdev = priv->cdev;
/* Get subchannel reference for local processing. */
if (!get_device(cdev->dev.parent))
return;
sch = to_subchannel(cdev->dev.parent);
css_sch_device_unregister(sch);
- /* Release cdev reference for workqueue processing.*/
- put_device(&cdev->dev);
/* Release subchannel reference for local processing. */
put_device(&sch->dev);
}
-void ccw_device_schedule_sch_unregister(struct ccw_device *cdev)
-{
- /* Get cdev reference for workqueue processing. */
- if (!get_device(&cdev->dev))
- return;
- PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_call_sch_unregister);
- queue_work(slow_path_wq, &cdev->private->kick_work);
-}
-
/*
* subchannel recognition done. Called from the state machine.
*/
@@ -1083,7 +860,8 @@ io_subchannel_recog_done(struct ccw_device *cdev)
/* Device did not respond in time. */
case DEV_STATE_NOT_OPER:
cdev->private->flags.recog_done = 1;
- ccw_device_schedule_sch_unregister(cdev);
+ /* Remove device found not operational. */
+ ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
if (atomic_dec_and_test(&ccw_device_init_count))
wake_up(&ccw_device_init_wq);
break;
@@ -1092,22 +870,15 @@ io_subchannel_recog_done(struct ccw_device *cdev)
* We can't register the device in interrupt context so
* we schedule a work item.
*/
- if (!get_device(&cdev->dev))
- break;
- PREPARE_WORK(&cdev->private->kick_work,
- io_subchannel_register);
- queue_work(slow_path_wq, &cdev->private->kick_work);
+ ccw_device_sched_todo(cdev, CDEV_TODO_REGISTER);
break;
}
}
-static int
-io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
+static void io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
{
- int rc;
struct ccw_device_private *priv;
- sch_set_cdev(sch, cdev);
cdev->ccwlock = sch->lock;
/* Init private data. */
@@ -1125,62 +896,81 @@ io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
/* Start async. device sensing. */
spin_lock_irq(sch->lock);
- rc = ccw_device_recognition(cdev);
+ sch_set_cdev(sch, cdev);
+ ccw_device_recognition(cdev);
spin_unlock_irq(sch->lock);
- if (rc) {
- if (atomic_dec_and_test(&ccw_device_init_count))
- wake_up(&ccw_device_init_wq);
- }
- return rc;
}
-static void ccw_device_move_to_sch(struct work_struct *work)
+static int ccw_device_move_to_sch(struct ccw_device *cdev,
+ struct subchannel *sch)
{
- struct ccw_device_private *priv;
- int rc;
- struct subchannel *sch;
- struct ccw_device *cdev;
- struct subchannel *former_parent;
+ struct subchannel *old_sch;
+ int rc, old_enabled = 0;
- priv = container_of(work, struct ccw_device_private, kick_work);
- sch = priv->sch;
- cdev = priv->cdev;
- former_parent = to_subchannel(cdev->dev.parent);
- /* Get reference for new parent. */
+ old_sch = to_subchannel(cdev->dev.parent);
+ /* Obtain child reference for new parent. */
if (!get_device(&sch->dev))
- return;
+ return -ENODEV;
+
+ if (!sch_is_pseudo_sch(old_sch)) {
+ spin_lock_irq(old_sch->lock);
+ old_enabled = old_sch->schib.pmcw.ena;
+ rc = 0;
+ if (old_enabled)
+ rc = cio_disable_subchannel(old_sch);
+ spin_unlock_irq(old_sch->lock);
+ if (rc == -EBUSY) {
+ /* Release child reference for new parent. */
+ put_device(&sch->dev);
+ return rc;
+ }
+ }
+
mutex_lock(&sch->reg_mutex);
- /*
- * Try to move the ccw device to its new subchannel.
- * Note: device_move() changes cdev->dev.parent
- */
rc = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV);
mutex_unlock(&sch->reg_mutex);
if (rc) {
- CIO_MSG_EVENT(0, "Moving device 0.%x.%04x to subchannel "
- "0.%x.%04x failed (ret=%d)!\n",
+ CIO_MSG_EVENT(0, "device_move(0.%x.%04x,0.%x.%04x)=%d\n",
cdev->private->dev_id.ssid,
cdev->private->dev_id.devno, sch->schid.ssid,
- sch->schid.sch_no, rc);
- css_sch_device_unregister(sch);
- /* Put reference for new parent again. */
+ sch->schib.pmcw.dev, rc);
+ if (old_enabled) {
+ /* Try to reenable the old subchannel. */
+ spin_lock_irq(old_sch->lock);
+ cio_enable_subchannel(old_sch, (u32)(addr_t)old_sch);
+ spin_unlock_irq(old_sch->lock);
+ }
+ /* Release child reference for new parent. */
put_device(&sch->dev);
- goto out;
+ return rc;
}
- if (!sch_is_pseudo_sch(former_parent)) {
- spin_lock_irq(former_parent->lock);
- sch_set_cdev(former_parent, NULL);
- spin_unlock_irq(former_parent->lock);
- css_sch_device_unregister(former_parent);
- /* Reset intparm to zeroes. */
- former_parent->config.intparm = 0;
- cio_commit_config(former_parent);
+ /* Clean up old subchannel. */
+ if (!sch_is_pseudo_sch(old_sch)) {
+ spin_lock_irq(old_sch->lock);
+ sch_set_cdev(old_sch, NULL);
+ spin_unlock_irq(old_sch->lock);
+ css_schedule_eval(old_sch->schid);
}
- sch_attach_device(sch, cdev);
-out:
- /* Put reference for old parent. */
- put_device(&former_parent->dev);
- put_device(&cdev->dev);
+ /* Release child reference for old parent. */
+ put_device(&old_sch->dev);
+ /* Initialize new subchannel. */
+ spin_lock_irq(sch->lock);
+ cdev->private->schid = sch->schid;
+ cdev->ccwlock = sch->lock;
+ if (!sch_is_pseudo_sch(sch))
+ sch_set_cdev(sch, cdev);
+ spin_unlock_irq(sch->lock);
+ if (!sch_is_pseudo_sch(sch))
+ css_update_ssd_info(sch);
+ return 0;
+}
+
+static int ccw_device_move_to_orph(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct channel_subsystem *css = to_css(sch->dev.parent);
+
+ return ccw_device_move_to_sch(cdev, css->pseudo_subchannel);
}
static void io_subchannel_irq(struct subchannel *sch)
@@ -1199,9 +989,6 @@ void io_subchannel_init_config(struct subchannel *sch)
{
memset(&sch->config, 0, sizeof(sch->config));
sch->config.csense = 1;
- /* Use subchannel mp mode when there is more than 1 installed CHPID. */
- if ((sch->schib.pmcw.pim & (sch->schib.pmcw.pim - 1)) != 0)
- sch->config.mp = 1;
}
static void io_subchannel_init_fields(struct subchannel *sch)
@@ -1222,23 +1009,6 @@ static void io_subchannel_init_fields(struct subchannel *sch)
io_subchannel_init_config(sch);
}
-static void io_subchannel_do_unreg(struct work_struct *work)
-{
- struct subchannel *sch;
-
- sch = container_of(work, struct subchannel, work);
- css_sch_device_unregister(sch);
- put_device(&sch->dev);
-}
-
-/* Schedule unregister if we have no cdev. */
-static void io_subchannel_schedule_removal(struct subchannel *sch)
-{
- get_device(&sch->dev);
- INIT_WORK(&sch->work, io_subchannel_do_unreg);
- queue_work(slow_path_wq, &sch->work);
-}
-
/*
* Note: We always return 0 so that we bind to the device even on error.
* This is needed so that our remove function is called on unregister.
@@ -1247,8 +1017,6 @@ static int io_subchannel_probe(struct subchannel *sch)
{
struct ccw_device *cdev;
int rc;
- unsigned long flags;
- struct ccw_dev_id dev_id;
if (cio_is_console(sch->schid)) {
rc = sysfs_create_group(&sch->dev.kobj,
@@ -1268,6 +1036,7 @@ static int io_subchannel_probe(struct subchannel *sch)
cdev = sch_get_cdev(sch);
cdev->dev.groups = ccwdev_attr_groups;
device_initialize(&cdev->dev);
+ cdev->private->flags.initialized = 1;
ccw_device_register(cdev);
/*
* Check if the device is already online. If it is
@@ -1292,44 +1061,14 @@ static int io_subchannel_probe(struct subchannel *sch)
sch->private = kzalloc(sizeof(struct io_subchannel_private),
GFP_KERNEL | GFP_DMA);
if (!sch->private)
- goto out_err;
- /*
- * First check if a fitting device may be found amongst the
- * disconnected devices or in the orphanage.
- */
- dev_id.devno = sch->schib.pmcw.dev;
- dev_id.ssid = sch->schid.ssid;
- cdev = get_disc_ccwdev_by_dev_id(&dev_id, NULL);
- if (!cdev)
- cdev = get_orphaned_ccwdev_by_dev_id(to_css(sch->dev.parent),
- &dev_id);
- if (cdev) {
- /*
- * Schedule moving the device until when we have a registered
- * subchannel to move to and succeed the probe. We can
- * unregister later again, when the probe is through.
- */
- cdev->private->sch = sch;
- PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_move_to_sch);
- queue_work(slow_path_wq, &cdev->private->kick_work);
- return 0;
- }
- cdev = io_subchannel_create_ccwdev(sch);
- if (IS_ERR(cdev))
- goto out_err;
- rc = io_subchannel_recog(cdev, sch);
- if (rc) {
- spin_lock_irqsave(sch->lock, flags);
- io_subchannel_recog_done(cdev);
- spin_unlock_irqrestore(sch->lock, flags);
- }
+ goto out_schedule;
+ css_schedule_eval(sch->schid);
return 0;
-out_err:
- kfree(sch->private);
- sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
+
out_schedule:
- io_subchannel_schedule_removal(sch);
+ spin_lock_irq(sch->lock);
+ css_sched_sch_todo(sch, SCH_TODO_UNREG);
+ spin_unlock_irq(sch->lock);
return 0;
}
@@ -1337,32 +1076,23 @@ static int
io_subchannel_remove (struct subchannel *sch)
{
struct ccw_device *cdev;
- unsigned long flags;
cdev = sch_get_cdev(sch);
if (!cdev)
- return 0;
+ goto out_free;
+ io_subchannel_quiesce(sch);
/* Set ccw device to not operational and drop reference. */
- spin_lock_irqsave(cdev->ccwlock, flags);
+ spin_lock_irq(cdev->ccwlock);
sch_set_cdev(sch, NULL);
cdev->private->state = DEV_STATE_NOT_OPER;
- spin_unlock_irqrestore(cdev->ccwlock, flags);
+ spin_unlock_irq(cdev->ccwlock);
ccw_device_unregister(cdev);
+out_free:
kfree(sch->private);
sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
return 0;
}
-static int io_subchannel_notify(struct subchannel *sch, int event)
-{
- struct ccw_device *cdev;
-
- cdev = sch_get_cdev(sch);
- if (!cdev)
- return 0;
- return ccw_device_notify(cdev, event);
-}
-
static void io_subchannel_verify(struct subchannel *sch)
{
struct ccw_device *cdev;
@@ -1372,36 +1102,6 @@ static void io_subchannel_verify(struct subchannel *sch)
dev_fsm_event(cdev, DEV_EVENT_VERIFY);
}
-static int check_for_io_on_path(struct subchannel *sch, int mask)
-{
- if (cio_update_schib(sch))
- return 0;
- if (scsw_actl(&sch->schib.scsw) && sch->schib.pmcw.lpum == mask)
- return 1;
- return 0;
-}
-
-static void terminate_internal_io(struct subchannel *sch,
- struct ccw_device *cdev)
-{
- if (cio_clear(sch)) {
- /* Recheck device in case clear failed. */
- sch->lpm = 0;
- if (cdev->online)
- dev_fsm_event(cdev, DEV_EVENT_VERIFY);
- else
- css_schedule_eval(sch->schid);
- return;
- }
- cdev->private->state = DEV_STATE_CLEAR_VERIFY;
- /* Request retry of internal operation. */
- cdev->private->flags.intretry = 1;
- /* Call handler. */
- if (cdev->handler)
- cdev->handler(cdev, cdev->private->intparm,
- ERR_PTR(-EIO));
-}
-
static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask)
{
struct ccw_device *cdev;
@@ -1409,18 +1109,24 @@ static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask)
cdev = sch_get_cdev(sch);
if (!cdev)
return;
- if (check_for_io_on_path(sch, mask)) {
- if (cdev->private->state == DEV_STATE_ONLINE)
- ccw_device_kill_io(cdev);
- else {
- terminate_internal_io(sch, cdev);
- /* Re-start path verification. */
- dev_fsm_event(cdev, DEV_EVENT_VERIFY);
- }
- } else
- /* trigger path verification. */
- dev_fsm_event(cdev, DEV_EVENT_VERIFY);
+ if (cio_update_schib(sch))
+ goto err;
+ /* Check for I/O on path. */
+ if (scsw_actl(&sch->schib.scsw) == 0 || sch->schib.pmcw.lpum != mask)
+ goto out;
+ if (cdev->private->state == DEV_STATE_ONLINE) {
+ ccw_device_kill_io(cdev);
+ goto out;
+ }
+ if (cio_clear(sch))
+ goto err;
+out:
+ /* Trigger path verification. */
+ dev_fsm_event(cdev, DEV_EVENT_VERIFY);
+ return;
+err:
+ dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
}
static int io_subchannel_chp_event(struct subchannel *sch,
@@ -1457,46 +1163,41 @@ static int io_subchannel_chp_event(struct subchannel *sch,
return 0;
}
-static void
-io_subchannel_shutdown(struct subchannel *sch)
+static void io_subchannel_quiesce(struct subchannel *sch)
{
struct ccw_device *cdev;
int ret;
+ spin_lock_irq(sch->lock);
cdev = sch_get_cdev(sch);
-
if (cio_is_console(sch->schid))
- return;
+ goto out_unlock;
if (!sch->schib.pmcw.ena)
- /* Nothing to do. */
- return;
+ goto out_unlock;
ret = cio_disable_subchannel(sch);
if (ret != -EBUSY)
- /* Subchannel is disabled, we're done. */
- return;
- cdev->private->state = DEV_STATE_QUIESCE;
+ goto out_unlock;
if (cdev->handler)
- cdev->handler(cdev, cdev->private->intparm,
- ERR_PTR(-EIO));
- ret = ccw_device_cancel_halt_clear(cdev);
- if (ret == -EBUSY) {
- ccw_device_set_timeout(cdev, HZ/10);
- wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
+ cdev->handler(cdev, cdev->private->intparm, ERR_PTR(-EIO));
+ while (ret == -EBUSY) {
+ cdev->private->state = DEV_STATE_QUIESCE;
+ ret = ccw_device_cancel_halt_clear(cdev);
+ if (ret == -EBUSY) {
+ ccw_device_set_timeout(cdev, HZ/10);
+ spin_unlock_irq(sch->lock);
+ wait_event(cdev->private->wait_q,
+ cdev->private->state != DEV_STATE_QUIESCE);
+ spin_lock_irq(sch->lock);
+ }
+ ret = cio_disable_subchannel(sch);
}
- cio_disable_subchannel(sch);
+out_unlock:
+ spin_unlock_irq(sch->lock);
}
-static int io_subchannel_get_status(struct subchannel *sch)
+static void io_subchannel_shutdown(struct subchannel *sch)
{
- struct schib schib;
-
- if (stsch(sch->schid, &schib) || !schib.pmcw.dnv)
- return CIO_GONE;
- if (sch->schib.pmcw.dnv && (schib.pmcw.dev != sch->schib.pmcw.dev))
- return CIO_REVALIDATE;
- if (!sch->lpm)
- return CIO_NO_PATH;
- return CIO_OPER;
+ io_subchannel_quiesce(sch);
}
static int device_is_disconnected(struct ccw_device *cdev)
@@ -1575,20 +1276,16 @@ static void ccw_device_schedule_recovery(void)
static int purge_fn(struct device *dev, void *data)
{
struct ccw_device *cdev = to_ccwdev(dev);
- struct ccw_device_private *priv = cdev->private;
- int unreg;
+ struct ccw_dev_id *id = &cdev->private->dev_id;
spin_lock_irq(cdev->ccwlock);
- unreg = is_blacklisted(priv->dev_id.ssid, priv->dev_id.devno) &&
- (priv->state == DEV_STATE_OFFLINE);
+ if (is_blacklisted(id->ssid, id->devno) &&
+ (cdev->private->state == DEV_STATE_OFFLINE)) {
+ CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", id->ssid,
+ id->devno);
+ ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
+ }
spin_unlock_irq(cdev->ccwlock);
- if (!unreg)
- goto out;
- CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", priv->dev_id.ssid,
- priv->dev_id.devno);
- ccw_device_schedule_sch_unregister(cdev);
-
-out:
/* Abort loop in case of pending signal. */
if (signal_pending(current))
return -EINTR;
@@ -1630,91 +1327,169 @@ void ccw_device_set_notoper(struct ccw_device *cdev)
cdev->private->state = DEV_STATE_NOT_OPER;
}
-static int io_subchannel_sch_event(struct subchannel *sch, int slow)
+enum io_sch_action {
+ IO_SCH_UNREG,
+ IO_SCH_ORPH_UNREG,
+ IO_SCH_ATTACH,
+ IO_SCH_UNREG_ATTACH,
+ IO_SCH_ORPH_ATTACH,
+ IO_SCH_REPROBE,
+ IO_SCH_VERIFY,
+ IO_SCH_DISC,
+ IO_SCH_NOP,
+};
+
+static enum io_sch_action sch_get_action(struct subchannel *sch)
+{
+ struct ccw_device *cdev;
+
+ cdev = sch_get_cdev(sch);
+ if (cio_update_schib(sch)) {
+ /* Not operational. */
+ if (!cdev)
+ return IO_SCH_UNREG;
+ if (!ccw_device_notify(cdev, CIO_GONE))
+ return IO_SCH_UNREG;
+ return IO_SCH_ORPH_UNREG;
+ }
+ /* Operational. */
+ if (!cdev)
+ return IO_SCH_ATTACH;
+ if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
+ if (!ccw_device_notify(cdev, CIO_GONE))
+ return IO_SCH_UNREG_ATTACH;
+ return IO_SCH_ORPH_ATTACH;
+ }
+ if ((sch->schib.pmcw.pam & sch->opm) == 0) {
+ if (!ccw_device_notify(cdev, CIO_NO_PATH))
+ return IO_SCH_UNREG;
+ return IO_SCH_DISC;
+ }
+ if (device_is_disconnected(cdev))
+ return IO_SCH_REPROBE;
+ if (cdev->online)
+ return IO_SCH_VERIFY;
+ return IO_SCH_NOP;
+}
+
+/**
+ * io_subchannel_sch_event - process subchannel event
+ * @sch: subchannel
+ * @process: non-zero if function is called in process context
+ *
+ * An unspecified event occurred for this subchannel. Adjust data according
+ * to the current operational state of the subchannel and device. Return
+ * zero when the event has been handled sufficiently or -EAGAIN when this
+ * function should be called again in process context.
+ */
+static int io_subchannel_sch_event(struct subchannel *sch, int process)
{
- int event, ret, disc;
unsigned long flags;
- enum { NONE, UNREGISTER, UNREGISTER_PROBE, REPROBE, DISC } action;
struct ccw_device *cdev;
+ struct ccw_dev_id dev_id;
+ enum io_sch_action action;
+ int rc = -EAGAIN;
spin_lock_irqsave(sch->lock, flags);
+ if (!device_is_registered(&sch->dev))
+ goto out_unlock;
+ if (work_pending(&sch->todo_work))
+ goto out_unlock;
cdev = sch_get_cdev(sch);
- disc = device_is_disconnected(cdev);
- if (disc && slow) {
- /* Disconnected devices are evaluated directly only.*/
- spin_unlock_irqrestore(sch->lock, flags);
- return 0;
- }
- /* No interrupt after machine check - kill pending timers. */
- if (cdev)
- ccw_device_set_timeout(cdev, 0);
- if (!disc && !slow) {
- /* Non-disconnected devices are evaluated on the slow path. */
- spin_unlock_irqrestore(sch->lock, flags);
- return -EAGAIN;
+ if (cdev && work_pending(&cdev->private->todo_work))
+ goto out_unlock;
+ action = sch_get_action(sch);
+ CIO_MSG_EVENT(2, "event: sch 0.%x.%04x, process=%d, action=%d\n",
+ sch->schid.ssid, sch->schid.sch_no, process,
+ action);
+ /* Perform immediate actions while holding the lock. */
+ switch (action) {
+ case IO_SCH_REPROBE:
+ /* Trigger device recognition. */
+ ccw_device_trigger_reprobe(cdev);
+ rc = 0;
+ goto out_unlock;
+ case IO_SCH_VERIFY:
+ /* Trigger path verification. */
+ io_subchannel_verify(sch);
+ rc = 0;
+ goto out_unlock;
+ case IO_SCH_DISC:
+ ccw_device_set_disconnected(cdev);
+ rc = 0;
+ goto out_unlock;
+ case IO_SCH_ORPH_UNREG:
+ case IO_SCH_ORPH_ATTACH:
+ ccw_device_set_disconnected(cdev);
+ break;
+ case IO_SCH_UNREG_ATTACH:
+ case IO_SCH_UNREG:
+ if (cdev)
+ ccw_device_set_notoper(cdev);
+ break;
+ case IO_SCH_NOP:
+ rc = 0;
+ goto out_unlock;
+ default:
+ break;
}
- event = io_subchannel_get_status(sch);
- CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, %s, %s path.\n",
- sch->schid.ssid, sch->schid.sch_no, event,
- disc ? "disconnected" : "normal",
- slow ? "slow" : "fast");
- /* Analyze subchannel status. */
- action = NONE;
- switch (event) {
- case CIO_NO_PATH:
- if (disc) {
- /* Check if paths have become available. */
- action = REPROBE;
- break;
- }
- /* fall through */
- case CIO_GONE:
- /* Ask driver what to do with device. */
- if (io_subchannel_notify(sch, event))
- action = DISC;
- else
- action = UNREGISTER;
+ spin_unlock_irqrestore(sch->lock, flags);
+ /* All other actions require process context. */
+ if (!process)
+ goto out;
+ /* Handle attached ccw device. */
+ switch (action) {
+ case IO_SCH_ORPH_UNREG:
+ case IO_SCH_ORPH_ATTACH:
+ /* Move ccw device to orphanage. */
+ rc = ccw_device_move_to_orph(cdev);
+ if (rc)
+ goto out;
break;
- case CIO_REVALIDATE:
- /* Device will be removed, so no notify necessary. */
- if (disc)
- /* Reprobe because immediate unregister might block. */
- action = REPROBE;
- else
- action = UNREGISTER_PROBE;
+ case IO_SCH_UNREG_ATTACH:
+ /* Unregister ccw device. */
+ ccw_device_unregister(cdev);
break;
- case CIO_OPER:
- if (disc)
- /* Get device operational again. */
- action = REPROBE;
+ default:
break;
}
- /* Perform action. */
- ret = 0;
+ /* Handle subchannel. */
switch (action) {
- case UNREGISTER:
- case UNREGISTER_PROBE:
- ccw_device_set_notoper(cdev);
- /* Unregister device (will use subchannel lock). */
- spin_unlock_irqrestore(sch->lock, flags);
+ case IO_SCH_ORPH_UNREG:
+ case IO_SCH_UNREG:
css_sch_device_unregister(sch);
- spin_lock_irqsave(sch->lock, flags);
break;
- case REPROBE:
+ case IO_SCH_ORPH_ATTACH:
+ case IO_SCH_UNREG_ATTACH:
+ case IO_SCH_ATTACH:
+ dev_id.ssid = sch->schid.ssid;
+ dev_id.devno = sch->schib.pmcw.dev;
+ cdev = get_ccwdev_by_dev_id(&dev_id);
+ if (!cdev) {
+ sch_create_and_recog_new_device(sch);
+ break;
+ }
+ rc = ccw_device_move_to_sch(cdev, sch);
+ if (rc) {
+ /* Release reference from get_ccwdev_by_dev_id() */
+ put_device(&cdev->dev);
+ goto out;
+ }
+ spin_lock_irqsave(sch->lock, flags);
ccw_device_trigger_reprobe(cdev);
- break;
- case DISC:
- ccw_device_set_disconnected(cdev);
+ spin_unlock_irqrestore(sch->lock, flags);
+ /* Release reference from get_ccwdev_by_dev_id() */
+ put_device(&cdev->dev);
break;
default:
break;
}
- spin_unlock_irqrestore(sch->lock, flags);
- /* Probe if necessary. */
- if (action == UNREGISTER_PROBE)
- ret = css_probe_device(sch->schid);
+ return 0;
- return ret;
+out_unlock:
+ spin_unlock_irqrestore(sch->lock, flags);
+out:
+ return rc;
}
#ifdef CONFIG_CCW_CONSOLE
@@ -1744,10 +1519,7 @@ static int ccw_device_console_enable(struct ccw_device *cdev,
sch->driver = &io_subchannel_driver;
/* Initialize the ccw_device structure. */
cdev->dev.parent= &sch->dev;
- rc = io_subchannel_recog(cdev, sch);
- if (rc)
- return rc;
-
+ io_subchannel_recog(cdev, sch);
/* Now wait for the async. recognition to come to an end. */
spin_lock_irq(cdev->ccwlock);
while (!dev_fsm_final_state(cdev))
@@ -1763,7 +1535,7 @@ static int ccw_device_console_enable(struct ccw_device *cdev,
rc = 0;
out_unlock:
spin_unlock_irq(cdev->ccwlock);
- return 0;
+ return rc;
}
struct ccw_device *
@@ -1919,7 +1691,7 @@ static int ccw_device_pm_prepare(struct device *dev)
{
struct ccw_device *cdev = to_ccwdev(dev);
- if (work_pending(&cdev->private->kick_work))
+ if (work_pending(&cdev->private->todo_work))
return -EAGAIN;
/* Fail while device is being set online/offline. */
if (atomic_read(&cdev->private->onoff))
@@ -2005,7 +1777,6 @@ static int ccw_device_pm_thaw(struct device *dev)
static void __ccw_device_pm_restore(struct ccw_device *cdev)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
- int ret;
if (cio_is_console(sch->schid))
goto out;
@@ -2015,22 +1786,10 @@ static void __ccw_device_pm_restore(struct ccw_device *cdev)
*/
spin_lock_irq(sch->lock);
cdev->private->flags.resuming = 1;
- ret = ccw_device_recognition(cdev);
+ ccw_device_recognition(cdev);
spin_unlock_irq(sch->lock);
- if (ret) {
- CIO_MSG_EVENT(0, "Couldn't start recognition for device "
- "0.%x.%04x (ret=%d)\n",
- cdev->private->dev_id.ssid,
- cdev->private->dev_id.devno, ret);
- spin_lock_irq(sch->lock);
- cdev->private->state = DEV_STATE_DISCONNECTED;
- spin_unlock_irq(sch->lock);
- /* notify driver after the resume cb */
- goto out;
- }
wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev) ||
cdev->private->state == DEV_STATE_DISCONNECTED);
-
out:
cdev->private->flags.resuming = 0;
}
@@ -2040,7 +1799,7 @@ static int resume_handle_boxed(struct ccw_device *cdev)
cdev->private->state = DEV_STATE_BOXED;
if (ccw_device_notify(cdev, CIO_BOXED))
return 0;
- ccw_device_schedule_sch_unregister(cdev);
+ ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
return -ENODEV;
}
@@ -2049,7 +1808,7 @@ static int resume_handle_disc(struct ccw_device *cdev)
cdev->private->state = DEV_STATE_DISCONNECTED;
if (ccw_device_notify(cdev, CIO_GONE))
return 0;
- ccw_device_schedule_sch_unregister(cdev);
+ ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
return -ENODEV;
}
@@ -2094,9 +1853,7 @@ static int ccw_device_pm_restore(struct device *dev)
/* check if the device type has changed */
if (!ccw_device_test_sense_data(cdev)) {
ccw_device_update_sense_data(cdev);
- PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_do_unbind_bind);
- queue_work(ccw_device_work, &cdev->private->kick_work);
+ ccw_device_sched_todo(cdev, CDEV_TODO_REBIND);
ret = -ENODEV;
goto out_unlock;
}
@@ -2140,7 +1897,7 @@ out_disc_unlock:
goto out_restore;
out_unreg_unlock:
- ccw_device_schedule_sch_unregister(cdev);
+ ccw_device_sched_todo(cdev, CDEV_TODO_UNREG_EVAL);
ret = -ENODEV;
out_unlock:
spin_unlock_irq(sch->lock);
@@ -2205,6 +1962,77 @@ ccw_device_get_subchannel_id(struct ccw_device *cdev)
return sch->schid;
}
+static void ccw_device_todo(struct work_struct *work)
+{
+ struct ccw_device_private *priv;
+ struct ccw_device *cdev;
+ struct subchannel *sch;
+ enum cdev_todo todo;
+
+ priv = container_of(work, struct ccw_device_private, todo_work);
+ cdev = priv->cdev;
+ sch = to_subchannel(cdev->dev.parent);
+ /* Find out todo. */
+ spin_lock_irq(cdev->ccwlock);
+ todo = priv->todo;
+ priv->todo = CDEV_TODO_NOTHING;
+ CIO_MSG_EVENT(4, "cdev_todo: cdev=0.%x.%04x todo=%d\n",
+ priv->dev_id.ssid, priv->dev_id.devno, todo);
+ spin_unlock_irq(cdev->ccwlock);
+ /* Perform todo. */
+ switch (todo) {
+ case CDEV_TODO_ENABLE_CMF:
+ cmf_reenable(cdev);
+ break;
+ case CDEV_TODO_REBIND:
+ ccw_device_do_unbind_bind(cdev);
+ break;
+ case CDEV_TODO_REGISTER:
+ io_subchannel_register(cdev);
+ break;
+ case CDEV_TODO_UNREG_EVAL:
+ if (!sch_is_pseudo_sch(sch))
+ css_schedule_eval(sch->schid);
+ /* fall-through */
+ case CDEV_TODO_UNREG:
+ if (sch_is_pseudo_sch(sch))
+ ccw_device_unregister(cdev);
+ else
+ ccw_device_call_sch_unregister(cdev);
+ break;
+ default:
+ break;
+ }
+ /* Release workqueue ref. */
+ put_device(&cdev->dev);
+}
+
+/**
+ * ccw_device_sched_todo - schedule ccw device operation
+ * @cdev: ccw device
+ * @todo: todo
+ *
+ * Schedule the operation identified by @todo to be performed on the slow path
+ * workqueue. Do nothing if another operation with higher priority is already
+ * scheduled. Needs to be called with ccwdev lock held.
+ */
+void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo)
+{
+ CIO_MSG_EVENT(4, "cdev_todo: sched cdev=0.%x.%04x todo=%d\n",
+ cdev->private->dev_id.ssid, cdev->private->dev_id.devno,
+ todo);
+ if (cdev->private->todo >= todo)
+ return;
+ cdev->private->todo = todo;
+ /* Get workqueue ref. */
+ if (!get_device(&cdev->dev))
+ return;
+ if (!queue_work(slow_path_wq, &cdev->private->todo_work)) {
+ /* Already queued, release workqueue ref. */
+ put_device(&cdev->dev);
+ }
+}
+
MODULE_LICENSE("GPL");
EXPORT_SYMBOL(ccw_device_set_online);
EXPORT_SYMBOL(ccw_device_set_offline);
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index 246c6482842..bcfe13e4263 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -21,7 +21,6 @@ enum dev_state {
DEV_STATE_DISBAND_PGID,
DEV_STATE_BOXED,
/* states to wait for i/o completion before doing something */
- DEV_STATE_CLEAR_VERIFY,
DEV_STATE_TIMEOUT_KILL,
DEV_STATE_QUIESCE,
/* special states for devices gone not operational */
@@ -29,6 +28,7 @@ enum dev_state {
DEV_STATE_DISCONNECTED_SENSE_ID,
DEV_STATE_CMFCHANGE,
DEV_STATE_CMFUPDATE,
+ DEV_STATE_STEAL_LOCK,
/* last element! */
NR_DEV_STATES
};
@@ -81,17 +81,16 @@ void io_subchannel_init_config(struct subchannel *sch);
int ccw_device_cancel_halt_clear(struct ccw_device *);
-void ccw_device_do_unbind_bind(struct work_struct *);
-void ccw_device_move_to_orphanage(struct work_struct *);
int ccw_device_is_orphan(struct ccw_device *);
-int ccw_device_recognition(struct ccw_device *);
+void ccw_device_recognition(struct ccw_device *);
int ccw_device_online(struct ccw_device *);
int ccw_device_offline(struct ccw_device *);
void ccw_device_update_sense_data(struct ccw_device *);
int ccw_device_test_sense_data(struct ccw_device *);
void ccw_device_schedule_sch_unregister(struct ccw_device *);
int ccw_purge_blacklisted(void);
+void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo);
/* Function prototypes for device status and basic sense stuff. */
void ccw_device_accumulate_irb(struct ccw_device *, struct irb *);
@@ -99,24 +98,28 @@ void ccw_device_accumulate_basic_sense(struct ccw_device *, struct irb *);
int ccw_device_accumulate_and_sense(struct ccw_device *, struct irb *);
int ccw_device_do_sense(struct ccw_device *, struct irb *);
+/* Function prototype for internal request handling. */
+int lpm_adjust(int lpm, int mask);
+void ccw_request_start(struct ccw_device *);
+int ccw_request_cancel(struct ccw_device *cdev);
+void ccw_request_handler(struct ccw_device *cdev);
+void ccw_request_timeout(struct ccw_device *cdev);
+void ccw_request_notoper(struct ccw_device *cdev);
+
/* Function prototypes for sense id stuff. */
void ccw_device_sense_id_start(struct ccw_device *);
-void ccw_device_sense_id_irq(struct ccw_device *, enum dev_event);
void ccw_device_sense_id_done(struct ccw_device *, int);
/* Function prototypes for path grouping stuff. */
-void ccw_device_sense_pgid_start(struct ccw_device *);
-void ccw_device_sense_pgid_irq(struct ccw_device *, enum dev_event);
-void ccw_device_sense_pgid_done(struct ccw_device *, int);
-
void ccw_device_verify_start(struct ccw_device *);
-void ccw_device_verify_irq(struct ccw_device *, enum dev_event);
void ccw_device_verify_done(struct ccw_device *, int);
void ccw_device_disband_start(struct ccw_device *);
-void ccw_device_disband_irq(struct ccw_device *, enum dev_event);
void ccw_device_disband_done(struct ccw_device *, int);
+void ccw_device_stlck_start(struct ccw_device *, void *, void *, void *);
+void ccw_device_stlck_done(struct ccw_device *, void *, int);
+
int ccw_device_call_handler(struct ccw_device *);
int ccw_device_stlck(struct ccw_device *);
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index b9613d7df9e..ae760658a13 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -229,8 +229,8 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
sch = to_subchannel(cdev->dev.parent);
- ccw_device_set_timeout(cdev, 0);
- cio_disable_subchannel(sch);
+ if (cio_disable_subchannel(sch))
+ state = DEV_STATE_NOT_OPER;
/*
* Now that we tried recognition, we have performed device selection
* through ssch() and the path information is up to date.
@@ -263,22 +263,10 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
}
switch (state) {
case DEV_STATE_NOT_OPER:
- CIO_MSG_EVENT(2, "SenseID : unknown device %04x on "
- "subchannel 0.%x.%04x\n",
- cdev->private->dev_id.devno,
- sch->schid.ssid, sch->schid.sch_no);
break;
case DEV_STATE_OFFLINE:
if (!cdev->online) {
ccw_device_update_sense_data(cdev);
- /* Issue device info message. */
- CIO_MSG_EVENT(4, "SenseID : device 0.%x.%04x reports: "
- "CU Type/Mod = %04X/%02X, Dev Type/Mod "
- "= %04X/%02X\n",
- cdev->private->dev_id.ssid,
- cdev->private->dev_id.devno,
- cdev->id.cu_type, cdev->id.cu_model,
- cdev->id.dev_type, cdev->id.dev_model);
break;
}
cdev->private->state = DEV_STATE_OFFLINE;
@@ -289,16 +277,10 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
wake_up(&cdev->private->wait_q);
} else {
ccw_device_update_sense_data(cdev);
- PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_do_unbind_bind);
- queue_work(ccw_device_work, &cdev->private->kick_work);
+ ccw_device_sched_todo(cdev, CDEV_TODO_REBIND);
}
return;
case DEV_STATE_BOXED:
- CIO_MSG_EVENT(0, "SenseID : boxed device %04x on "
- " subchannel 0.%x.%04x\n",
- cdev->private->dev_id.devno,
- sch->schid.ssid, sch->schid.sch_no);
if (cdev->id.cu_type != 0) { /* device was recognized before */
cdev->private->flags.recog_done = 1;
cdev->private->state = DEV_STATE_BOXED;
@@ -343,28 +325,16 @@ int ccw_device_notify(struct ccw_device *cdev, int event)
return cdev->drv->notify ? cdev->drv->notify(cdev, event) : 0;
}
-static void cmf_reenable_delayed(struct work_struct *work)
-{
- struct ccw_device_private *priv;
- struct ccw_device *cdev;
-
- priv = container_of(work, struct ccw_device_private, kick_work);
- cdev = priv->cdev;
- cmf_reenable(cdev);
-}
-
static void ccw_device_oper_notify(struct ccw_device *cdev)
{
if (ccw_device_notify(cdev, CIO_OPER)) {
/* Reenable channel measurements, if needed. */
- PREPARE_WORK(&cdev->private->kick_work, cmf_reenable_delayed);
- queue_work(ccw_device_work, &cdev->private->kick_work);
+ ccw_device_sched_todo(cdev, CDEV_TODO_ENABLE_CMF);
return;
}
/* Driver doesn't want device back. */
ccw_device_set_notoper(cdev);
- PREPARE_WORK(&cdev->private->kick_work, ccw_device_do_unbind_bind);
- queue_work(ccw_device_work, &cdev->private->kick_work);
+ ccw_device_sched_todo(cdev, CDEV_TODO_REBIND);
}
/*
@@ -392,14 +362,14 @@ ccw_device_done(struct ccw_device *cdev, int state)
CIO_MSG_EVENT(0, "Boxed device %04x on subchannel %04x\n",
cdev->private->dev_id.devno, sch->schid.sch_no);
if (cdev->online && !ccw_device_notify(cdev, CIO_BOXED))
- ccw_device_schedule_sch_unregister(cdev);
+ ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
cdev->private->flags.donotify = 0;
break;
case DEV_STATE_NOT_OPER:
CIO_MSG_EVENT(0, "Device %04x gone on subchannel %04x\n",
cdev->private->dev_id.devno, sch->schid.sch_no);
if (!ccw_device_notify(cdev, CIO_GONE))
- ccw_device_schedule_sch_unregister(cdev);
+ ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
else
ccw_device_set_disconnected(cdev);
cdev->private->flags.donotify = 0;
@@ -409,7 +379,7 @@ ccw_device_done(struct ccw_device *cdev, int state)
"%04x\n", cdev->private->dev_id.devno,
sch->schid.sch_no);
if (!ccw_device_notify(cdev, CIO_NO_PATH))
- ccw_device_schedule_sch_unregister(cdev);
+ ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
else
ccw_device_set_disconnected(cdev);
cdev->private->flags.donotify = 0;
@@ -425,107 +395,12 @@ ccw_device_done(struct ccw_device *cdev, int state)
wake_up(&cdev->private->wait_q);
}
-static int cmp_pgid(struct pgid *p1, struct pgid *p2)
-{
- char *c1;
- char *c2;
-
- c1 = (char *)p1;
- c2 = (char *)p2;
-
- return memcmp(c1 + 1, c2 + 1, sizeof(struct pgid) - 1);
-}
-
-static void __ccw_device_get_common_pgid(struct ccw_device *cdev)
-{
- int i;
- int last;
-
- last = 0;
- for (i = 0; i < 8; i++) {
- if (cdev->private->pgid[i].inf.ps.state1 == SNID_STATE1_RESET)
- /* No PGID yet */
- continue;
- if (cdev->private->pgid[last].inf.ps.state1 ==
- SNID_STATE1_RESET) {
- /* First non-zero PGID */
- last = i;
- continue;
- }
- if (cmp_pgid(&cdev->private->pgid[i],
- &cdev->private->pgid[last]) == 0)
- /* Non-conflicting PGIDs */
- continue;
-
- /* PGID mismatch, can't pathgroup. */
- CIO_MSG_EVENT(0, "SNID - pgid mismatch for device "
- "0.%x.%04x, can't pathgroup\n",
- cdev->private->dev_id.ssid,
- cdev->private->dev_id.devno);
- cdev->private->options.pgroup = 0;
- return;
- }
- if (cdev->private->pgid[last].inf.ps.state1 ==
- SNID_STATE1_RESET)
- /* No previous pgid found */
- memcpy(&cdev->private->pgid[0],
- &channel_subsystems[0]->global_pgid,
- sizeof(struct pgid));
- else
- /* Use existing pgid */
- memcpy(&cdev->private->pgid[0], &cdev->private->pgid[last],
- sizeof(struct pgid));
-}
-
-/*
- * Function called from device_pgid.c after sense path ground has completed.
- */
-void
-ccw_device_sense_pgid_done(struct ccw_device *cdev, int err)
-{
- struct subchannel *sch;
-
- sch = to_subchannel(cdev->dev.parent);
- switch (err) {
- case -EOPNOTSUPP: /* path grouping not supported, use nop instead. */
- cdev->private->options.pgroup = 0;
- break;
- case 0: /* success */
- case -EACCES: /* partial success, some paths not operational */
- /* Check if all pgids are equal or 0. */
- __ccw_device_get_common_pgid(cdev);
- break;
- case -ETIME: /* Sense path group id stopped by timeout. */
- case -EUSERS: /* device is reserved for someone else. */
- ccw_device_done(cdev, DEV_STATE_BOXED);
- return;
- default:
- ccw_device_done(cdev, DEV_STATE_NOT_OPER);
- return;
- }
- /* Start Path Group verification. */
- cdev->private->state = DEV_STATE_VERIFY;
- cdev->private->flags.doverify = 0;
- ccw_device_verify_start(cdev);
-}
-
/*
* Start device recognition.
*/
-int
-ccw_device_recognition(struct ccw_device *cdev)
+void ccw_device_recognition(struct ccw_device *cdev)
{
- struct subchannel *sch;
- int ret;
-
- sch = to_subchannel(cdev->dev.parent);
- ret = cio_enable_subchannel(sch, (u32)(addr_t)sch);
- if (ret != 0)
- /* Couldn't enable the subchannel for i/o. Sick device. */
- return ret;
-
- /* After 60s the device recognition is considered to have failed. */
- ccw_device_set_timeout(cdev, 60*HZ);
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
/*
* We used to start here with a sense pgid to find out whether a device
@@ -537,32 +412,33 @@ ccw_device_recognition(struct ccw_device *cdev)
*/
cdev->private->flags.recog_done = 0;
cdev->private->state = DEV_STATE_SENSE_ID;
+ if (cio_enable_subchannel(sch, (u32) (addr_t) sch)) {
+ ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
+ return;
+ }
ccw_device_sense_id_start(cdev);
- return 0;
}
/*
- * Handle timeout in device recognition.
+ * Handle events for states that use the ccw request infrastructure.
*/
-static void
-ccw_device_recog_timeout(struct ccw_device *cdev, enum dev_event dev_event)
+static void ccw_device_request_event(struct ccw_device *cdev, enum dev_event e)
{
- int ret;
-
- ret = ccw_device_cancel_halt_clear(cdev);
- switch (ret) {
- case 0:
- ccw_device_recog_done(cdev, DEV_STATE_BOXED);
+ switch (e) {
+ case DEV_EVENT_NOTOPER:
+ ccw_request_notoper(cdev);
break;
- case -ENODEV:
- ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
+ case DEV_EVENT_INTERRUPT:
+ ccw_request_handler(cdev);
+ break;
+ case DEV_EVENT_TIMEOUT:
+ ccw_request_timeout(cdev);
break;
default:
- ccw_device_set_timeout(cdev, 3*HZ);
+ break;
}
}
-
void
ccw_device_verify_done(struct ccw_device *cdev, int err)
{
@@ -571,21 +447,18 @@ ccw_device_verify_done(struct ccw_device *cdev, int err)
sch = to_subchannel(cdev->dev.parent);
/* Update schib - pom may have changed. */
if (cio_update_schib(sch)) {
- cdev->private->flags.donotify = 0;
- ccw_device_done(cdev, DEV_STATE_NOT_OPER);
- return;
+ err = -ENODEV;
+ goto callback;
}
/* Update lpm with verified path mask. */
sch->lpm = sch->vpm;
/* Repeat path verification? */
if (cdev->private->flags.doverify) {
- cdev->private->flags.doverify = 0;
ccw_device_verify_start(cdev);
return;
}
+callback:
switch (err) {
- case -EOPNOTSUPP: /* path grouping not supported, just set online. */
- cdev->private->options.pgroup = 0;
case 0:
ccw_device_done(cdev, DEV_STATE_ONLINE);
/* Deliver fake irb to device driver, if needed. */
@@ -604,18 +477,20 @@ ccw_device_verify_done(struct ccw_device *cdev, int err)
}
break;
case -ETIME:
+ case -EUSERS:
/* Reset oper notify indication after verify error. */
cdev->private->flags.donotify = 0;
ccw_device_done(cdev, DEV_STATE_BOXED);
break;
+ case -EACCES:
+ /* Reset oper notify indication after verify error. */
+ cdev->private->flags.donotify = 0;
+ ccw_device_done(cdev, DEV_STATE_DISCONNECTED);
+ break;
default:
/* Reset oper notify indication after verify error. */
cdev->private->flags.donotify = 0;
- if (cdev->online) {
- ccw_device_set_timeout(cdev, 0);
- dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
- } else
- ccw_device_done(cdev, DEV_STATE_NOT_OPER);
+ ccw_device_done(cdev, DEV_STATE_NOT_OPER);
break;
}
}
@@ -640,17 +515,9 @@ ccw_device_online(struct ccw_device *cdev)
dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
return ret;
}
- /* Do we want to do path grouping? */
- if (!cdev->private->options.pgroup) {
- /* Start initial path verification. */
- cdev->private->state = DEV_STATE_VERIFY;
- cdev->private->flags.doverify = 0;
- ccw_device_verify_start(cdev);
- return 0;
- }
- /* Do a SensePGID first. */
- cdev->private->state = DEV_STATE_SENSE_PGID;
- ccw_device_sense_pgid_start(cdev);
+ /* Start initial path verification. */
+ cdev->private->state = DEV_STATE_VERIFY;
+ ccw_device_verify_start(cdev);
return 0;
}
@@ -666,7 +533,6 @@ ccw_device_disband_done(struct ccw_device *cdev, int err)
break;
default:
cdev->private->flags.donotify = 0;
- dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
ccw_device_done(cdev, DEV_STATE_NOT_OPER);
break;
}
@@ -703,7 +569,7 @@ ccw_device_offline(struct ccw_device *cdev)
if (cdev->private->state != DEV_STATE_ONLINE)
return -EINVAL;
/* Are we doing path grouping? */
- if (!cdev->private->options.pgroup) {
+ if (!cdev->private->flags.pgroup) {
/* No, set state offline immediately. */
ccw_device_done(cdev, DEV_STATE_OFFLINE);
return 0;
@@ -715,43 +581,13 @@ ccw_device_offline(struct ccw_device *cdev)
}
/*
- * Handle timeout in device online/offline process.
- */
-static void
-ccw_device_onoff_timeout(struct ccw_device *cdev, enum dev_event dev_event)
-{
- int ret;
-
- ret = ccw_device_cancel_halt_clear(cdev);
- switch (ret) {
- case 0:
- ccw_device_done(cdev, DEV_STATE_BOXED);
- break;
- case -ENODEV:
- ccw_device_done(cdev, DEV_STATE_NOT_OPER);
- break;
- default:
- ccw_device_set_timeout(cdev, 3*HZ);
- }
-}
-
-/*
- * Handle not oper event in device recognition.
- */
-static void
-ccw_device_recog_notoper(struct ccw_device *cdev, enum dev_event dev_event)
-{
- ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
-}
-
-/*
* Handle not operational event in non-special state.
*/
static void ccw_device_generic_notoper(struct ccw_device *cdev,
enum dev_event dev_event)
{
if (!ccw_device_notify(cdev, CIO_GONE))
- ccw_device_schedule_sch_unregister(cdev);
+ ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
else
ccw_device_set_disconnected(cdev);
}
@@ -802,11 +638,27 @@ ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
}
/* Device is idle, we can do the path verification. */
cdev->private->state = DEV_STATE_VERIFY;
- cdev->private->flags.doverify = 0;
ccw_device_verify_start(cdev);
}
/*
+ * Handle path verification event in boxed state.
+ */
+static void ccw_device_boxed_verify(struct ccw_device *cdev,
+ enum dev_event dev_event)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+
+ if (cdev->online) {
+ if (cio_enable_subchannel(sch, (u32) (addr_t) sch))
+ ccw_device_done(cdev, DEV_STATE_NOT_OPER);
+ else
+ ccw_device_online_verify(cdev, dev_event);
+ } else
+ css_schedule_eval(sch->schid);
+}
+
+/*
* Got an interrupt for a normal io (state online).
*/
static void
@@ -904,12 +756,6 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
*/
if (scsw_fctl(&irb->scsw) &
(SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) {
- /* Retry Basic Sense if requested. */
- if (cdev->private->flags.intretry) {
- cdev->private->flags.intretry = 0;
- ccw_device_do_sense(cdev, irb);
- return;
- }
cdev->private->flags.dosense = 0;
memset(&cdev->private->irb, 0, sizeof(struct irb));
ccw_device_accumulate_irb(cdev, irb);
@@ -933,21 +779,6 @@ call_handler:
}
static void
-ccw_device_clear_verify(struct ccw_device *cdev, enum dev_event dev_event)
-{
- struct irb *irb;
-
- irb = (struct irb *) __LC_IRB;
- /* Accumulate status. We don't do basic sense. */
- ccw_device_accumulate_irb(cdev, irb);
- /* Remember to clear irb to avoid residuals. */
- memset(&cdev->private->irb, 0, sizeof(struct irb));
- /* Try to start delayed device verification. */
- ccw_device_online_verify(cdev, 0);
- /* Note: Don't call handler for cio initiated clear! */
-}
-
-static void
ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
{
struct subchannel *sch;
@@ -1004,32 +835,6 @@ ccw_device_delay_verify(struct ccw_device *cdev, enum dev_event dev_event)
}
static void
-ccw_device_stlck_done(struct ccw_device *cdev, enum dev_event dev_event)
-{
- struct irb *irb;
-
- switch (dev_event) {
- case DEV_EVENT_INTERRUPT:
- irb = (struct irb *) __LC_IRB;
- /* Check for unsolicited interrupt. */
- if ((scsw_stctl(&irb->scsw) ==
- (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) &&
- (!scsw_cc(&irb->scsw)))
- /* FIXME: we should restart stlck here, but this
- * is extremely unlikely ... */
- goto out_wakeup;
-
- ccw_device_accumulate_irb(cdev, irb);
- /* We don't care about basic sense etc. */
- break;
- default: /* timeout */
- break;
- }
-out_wakeup:
- wake_up(&cdev->private->wait_q);
-}
-
-static void
ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event)
{
struct subchannel *sch;
@@ -1038,10 +843,6 @@ ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event)
if (cio_enable_subchannel(sch, (u32)(addr_t)sch) != 0)
/* Couldn't enable the subchannel for i/o. Sick device. */
return;
-
- /* After 60s the device recognition is considered to have failed. */
- ccw_device_set_timeout(cdev, 60*HZ);
-
cdev->private->state = DEV_STATE_DISCONNECTED_SENSE_ID;
ccw_device_sense_id_start(cdev);
}
@@ -1072,22 +873,20 @@ void ccw_device_trigger_reprobe(struct ccw_device *cdev)
/* We should also udate ssd info, but this has to wait. */
/* Check if this is another device which appeared on the same sch. */
- if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
- PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_move_to_orphanage);
- queue_work(slow_path_wq, &cdev->private->kick_work);
- } else
+ if (sch->schib.pmcw.dev != cdev->private->dev_id.devno)
+ css_schedule_eval(sch->schid);
+ else
ccw_device_start_id(cdev, 0);
}
-static void
-ccw_device_offline_irq(struct ccw_device *cdev, enum dev_event dev_event)
+static void ccw_device_disabled_irq(struct ccw_device *cdev,
+ enum dev_event dev_event)
{
struct subchannel *sch;
sch = to_subchannel(cdev->dev.parent);
/*
- * An interrupt in state offline means a previous disable was not
+ * An interrupt in a disabled state means a previous disable was not
* successful - should not happen, but we try to disable again.
*/
cio_disable_subchannel(sch);
@@ -1113,10 +912,7 @@ static void
ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event)
{
ccw_device_set_timeout(cdev, 0);
- if (dev_event == DEV_EVENT_NOTOPER)
- cdev->private->state = DEV_STATE_NOT_OPER;
- else
- cdev->private->state = DEV_STATE_OFFLINE;
+ cdev->private->state = DEV_STATE_NOT_OPER;
wake_up(&cdev->private->wait_q);
}
@@ -1126,17 +922,11 @@ ccw_device_quiesce_timeout(struct ccw_device *cdev, enum dev_event dev_event)
int ret;
ret = ccw_device_cancel_halt_clear(cdev);
- switch (ret) {
- case 0:
- cdev->private->state = DEV_STATE_OFFLINE;
- wake_up(&cdev->private->wait_q);
- break;
- case -ENODEV:
+ if (ret == -EBUSY) {
+ ccw_device_set_timeout(cdev, HZ/10);
+ } else {
cdev->private->state = DEV_STATE_NOT_OPER;
wake_up(&cdev->private->wait_q);
- break;
- default:
- ccw_device_set_timeout(cdev, HZ/10);
}
}
@@ -1150,50 +940,37 @@ ccw_device_nop(struct ccw_device *cdev, enum dev_event dev_event)
}
/*
- * Bug operation action.
- */
-static void
-ccw_device_bug(struct ccw_device *cdev, enum dev_event dev_event)
-{
- CIO_MSG_EVENT(0, "Internal state [%i][%i] not handled for device "
- "0.%x.%04x\n", cdev->private->state, dev_event,
- cdev->private->dev_id.ssid,
- cdev->private->dev_id.devno);
- BUG();
-}
-
-/*
* device statemachine
*/
fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
[DEV_STATE_NOT_OPER] = {
[DEV_EVENT_NOTOPER] = ccw_device_nop,
- [DEV_EVENT_INTERRUPT] = ccw_device_bug,
+ [DEV_EVENT_INTERRUPT] = ccw_device_disabled_irq,
[DEV_EVENT_TIMEOUT] = ccw_device_nop,
[DEV_EVENT_VERIFY] = ccw_device_nop,
},
[DEV_STATE_SENSE_PGID] = {
- [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
- [DEV_EVENT_INTERRUPT] = ccw_device_sense_pgid_irq,
- [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout,
+ [DEV_EVENT_NOTOPER] = ccw_device_request_event,
+ [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
+ [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
[DEV_EVENT_VERIFY] = ccw_device_nop,
},
[DEV_STATE_SENSE_ID] = {
- [DEV_EVENT_NOTOPER] = ccw_device_recog_notoper,
- [DEV_EVENT_INTERRUPT] = ccw_device_sense_id_irq,
- [DEV_EVENT_TIMEOUT] = ccw_device_recog_timeout,
+ [DEV_EVENT_NOTOPER] = ccw_device_request_event,
+ [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
+ [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
[DEV_EVENT_VERIFY] = ccw_device_nop,
},
[DEV_STATE_OFFLINE] = {
[DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
- [DEV_EVENT_INTERRUPT] = ccw_device_offline_irq,
+ [DEV_EVENT_INTERRUPT] = ccw_device_disabled_irq,
[DEV_EVENT_TIMEOUT] = ccw_device_nop,
[DEV_EVENT_VERIFY] = ccw_device_offline_verify,
},
[DEV_STATE_VERIFY] = {
- [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
- [DEV_EVENT_INTERRUPT] = ccw_device_verify_irq,
- [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout,
+ [DEV_EVENT_NOTOPER] = ccw_device_request_event,
+ [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
+ [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
[DEV_EVENT_VERIFY] = ccw_device_delay_verify,
},
[DEV_STATE_ONLINE] = {
@@ -1209,24 +986,18 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
[DEV_EVENT_VERIFY] = ccw_device_online_verify,
},
[DEV_STATE_DISBAND_PGID] = {
- [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
- [DEV_EVENT_INTERRUPT] = ccw_device_disband_irq,
- [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout,
+ [DEV_EVENT_NOTOPER] = ccw_device_request_event,
+ [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
+ [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
[DEV_EVENT_VERIFY] = ccw_device_nop,
},
[DEV_STATE_BOXED] = {
[DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
- [DEV_EVENT_INTERRUPT] = ccw_device_stlck_done,
- [DEV_EVENT_TIMEOUT] = ccw_device_stlck_done,
- [DEV_EVENT_VERIFY] = ccw_device_nop,
- },
- /* states to wait for i/o completion before doing something */
- [DEV_STATE_CLEAR_VERIFY] = {
- [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
- [DEV_EVENT_INTERRUPT] = ccw_device_clear_verify,
+ [DEV_EVENT_INTERRUPT] = ccw_device_nop,
[DEV_EVENT_TIMEOUT] = ccw_device_nop,
- [DEV_EVENT_VERIFY] = ccw_device_nop,
+ [DEV_EVENT_VERIFY] = ccw_device_boxed_verify,
},
+ /* states to wait for i/o completion before doing something */
[DEV_STATE_TIMEOUT_KILL] = {
[DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
[DEV_EVENT_INTERRUPT] = ccw_device_killing_irq,
@@ -1243,13 +1014,13 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
[DEV_STATE_DISCONNECTED] = {
[DEV_EVENT_NOTOPER] = ccw_device_nop,
[DEV_EVENT_INTERRUPT] = ccw_device_start_id,
- [DEV_EVENT_TIMEOUT] = ccw_device_bug,
+ [DEV_EVENT_TIMEOUT] = ccw_device_nop,
[DEV_EVENT_VERIFY] = ccw_device_start_id,
},
[DEV_STATE_DISCONNECTED_SENSE_ID] = {
- [DEV_EVENT_NOTOPER] = ccw_device_recog_notoper,
- [DEV_EVENT_INTERRUPT] = ccw_device_sense_id_irq,
- [DEV_EVENT_TIMEOUT] = ccw_device_recog_timeout,
+ [DEV_EVENT_NOTOPER] = ccw_device_request_event,
+ [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
+ [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
[DEV_EVENT_VERIFY] = ccw_device_nop,
},
[DEV_STATE_CMFCHANGE] = {
@@ -1264,6 +1035,12 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
[DEV_EVENT_TIMEOUT] = ccw_device_update_cmfblock,
[DEV_EVENT_VERIFY] = ccw_device_update_cmfblock,
},
+ [DEV_STATE_STEAL_LOCK] = {
+ [DEV_EVENT_NOTOPER] = ccw_device_request_event,
+ [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
+ [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
+ [DEV_EVENT_VERIFY] = ccw_device_nop,
+ },
};
EXPORT_SYMBOL_GPL(ccw_device_set_timeout);
diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c
index 1bdaa614e34..78a0b43862c 100644
--- a/drivers/s390/cio/device_id.c
+++ b/drivers/s390/cio/device_id.c
@@ -1,40 +1,39 @@
/*
- * drivers/s390/cio/device_id.c
+ * CCW device SENSE ID I/O handling.
*
- * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
- * IBM Corporation
- * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
- * Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * Sense ID functions.
+ * Copyright IBM Corp. 2002,2009
+ * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
*/
-#include <linux/module.h>
-#include <linux/init.h>
#include <linux/kernel.h>
-
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/errno.h>
#include <asm/ccwdev.h>
-#include <asm/delay.h>
+#include <asm/setup.h>
#include <asm/cio.h>
-#include <asm/lowcore.h>
#include <asm/diag.h>
#include "cio.h"
#include "cio_debug.h"
-#include "css.h"
#include "device.h"
-#include "ioasm.h"
#include "io_sch.h"
+#define SENSE_ID_RETRIES 256
+#define SENSE_ID_TIMEOUT (10 * HZ)
+#define SENSE_ID_MIN_LEN 4
+#define SENSE_ID_BASIC_LEN 7
+
/**
- * vm_vdev_to_cu_type - Convert vm virtual device into control unit type
- * for certain devices.
- * @class: virtual device class
- * @type: virtual device type
+ * diag210_to_senseid - convert diag 0x210 data to sense id information
+ * @senseid: sense id
+ * @diag: diag 0x210 data
*
- * Returns control unit type if a match was made or %0xffff otherwise.
+ * Return 0 on success, non-zero otherwise.
*/
-static int vm_vdev_to_cu_type(int class, int type)
+static int diag210_to_senseid(struct senseid *senseid, struct diag210 *diag)
{
static struct {
int class, type, cu_type;
@@ -71,253 +70,153 @@ static int vm_vdev_to_cu_type(int class, int type)
};
int i;
- for (i = 0; i < ARRAY_SIZE(vm_devices); i++)
- if (class == vm_devices[i].class && type == vm_devices[i].type)
- return vm_devices[i].cu_type;
+ /* Special case for osa devices. */
+ if (diag->vrdcvcla == 0x02 && diag->vrdcvtyp == 0x20) {
+ senseid->cu_type = 0x3088;
+ senseid->cu_model = 0x60;
+ senseid->reserved = 0xff;
+ return 0;
+ }
+ for (i = 0; i < ARRAY_SIZE(vm_devices); i++) {
+ if (diag->vrdcvcla == vm_devices[i].class &&
+ diag->vrdcvtyp == vm_devices[i].type) {
+ senseid->cu_type = vm_devices[i].cu_type;
+ senseid->reserved = 0xff;
+ return 0;
+ }
+ }
- return 0xffff;
+ return -ENODEV;
}
/**
- * diag_get_dev_info - retrieve device information via DIAG X'210'
- * @devno: device number
- * @ps: pointer to sense ID data area
+ * diag_get_dev_info - retrieve device information via diag 0x210
+ * @cdev: ccw device
*
* Returns zero on success, non-zero otherwise.
*/
-static int diag_get_dev_info(u16 devno, struct senseid *ps)
+static int diag210_get_dev_info(struct ccw_device *cdev)
{
+ struct ccw_dev_id *dev_id = &cdev->private->dev_id;
+ struct senseid *senseid = &cdev->private->senseid;
struct diag210 diag_data;
- int ccode;
-
- CIO_TRACE_EVENT (4, "VMvdinf");
-
- diag_data = (struct diag210) {
- .vrdcdvno = devno,
- .vrdclen = sizeof (diag_data),
- };
-
- ccode = diag210 (&diag_data);
- if ((ccode == 0) || (ccode == 2)) {
- ps->reserved = 0xff;
-
- /* Special case for osa devices. */
- if (diag_data.vrdcvcla == 0x02 && diag_data.vrdcvtyp == 0x20) {
- ps->cu_type = 0x3088;
- ps->cu_model = 0x60;
- return 0;
- }
- ps->cu_type = vm_vdev_to_cu_type(diag_data.vrdcvcla,
- diag_data.vrdcvtyp);
- if (ps->cu_type != 0xffff)
- return 0;
- }
-
- CIO_MSG_EVENT(0, "DIAG X'210' for device %04X returned (cc = %d):"
- "vdev class : %02X, vdev type : %04X \n ... "
- "rdev class : %02X, rdev type : %04X, "
- "rdev model: %02X\n",
- devno, ccode,
- diag_data.vrdcvcla, diag_data.vrdcvtyp,
- diag_data.vrdcrccl, diag_data.vrdccrty,
- diag_data.vrdccrmd);
-
+ int rc;
+
+ if (dev_id->ssid != 0)
+ return -ENODEV;
+ memset(&diag_data, 0, sizeof(diag_data));
+ diag_data.vrdcdvno = dev_id->devno;
+ diag_data.vrdclen = sizeof(diag_data);
+ rc = diag210(&diag_data);
+ CIO_TRACE_EVENT(4, "diag210");
+ CIO_HEX_EVENT(4, &rc, sizeof(rc));
+ CIO_HEX_EVENT(4, &diag_data, sizeof(diag_data));
+ if (rc != 0 && rc != 2)
+ goto err_failed;
+ if (diag210_to_senseid(senseid, &diag_data))
+ goto err_unknown;
+ return 0;
+
+err_unknown:
+ CIO_MSG_EVENT(0, "snsid: device 0.%x.%04x: unknown diag210 data\n",
+ dev_id->ssid, dev_id->devno);
+ return -ENODEV;
+err_failed:
+ CIO_MSG_EVENT(0, "snsid: device 0.%x.%04x: diag210 failed (rc=%d)\n",
+ dev_id->ssid, dev_id->devno, rc);
return -ENODEV;
}
/*
- * Start Sense ID helper function.
- * Try to obtain the 'control unit'/'device type' information
- * associated with the subchannel.
+ * Initialize SENSE ID data.
*/
-static int
-__ccw_device_sense_id_start(struct ccw_device *cdev)
-{
- struct subchannel *sch;
- struct ccw1 *ccw;
- int ret;
-
- sch = to_subchannel(cdev->dev.parent);
- /* Setup sense channel program. */
- ccw = cdev->private->iccws;
- ccw->cmd_code = CCW_CMD_SENSE_ID;
- ccw->cda = (__u32) __pa (&cdev->private->senseid);
- ccw->count = sizeof (struct senseid);
- ccw->flags = CCW_FLAG_SLI;
-
- /* Reset device status. */
- memset(&cdev->private->irb, 0, sizeof(struct irb));
-
- /* Try on every path. */
- ret = -ENODEV;
- while (cdev->private->imask != 0) {
- cdev->private->senseid.cu_type = 0xFFFF;
- if ((sch->opm & cdev->private->imask) != 0 &&
- cdev->private->iretry > 0) {
- cdev->private->iretry--;
- /* Reset internal retry indication. */
- cdev->private->flags.intretry = 0;
- ret = cio_start (sch, cdev->private->iccws,
- cdev->private->imask);
- /* ret is 0, -EBUSY, -EACCES or -ENODEV */
- if (ret != -EACCES)
- return ret;
- }
- cdev->private->imask >>= 1;
- cdev->private->iretry = 5;
- }
- return ret;
-}
-
-void
-ccw_device_sense_id_start(struct ccw_device *cdev)
+static void snsid_init(struct ccw_device *cdev)
{
- int ret;
-
- memset (&cdev->private->senseid, 0, sizeof (struct senseid));
- cdev->private->imask = 0x80;
- cdev->private->iretry = 5;
- ret = __ccw_device_sense_id_start(cdev);
- if (ret && ret != -EBUSY)
- ccw_device_sense_id_done(cdev, ret);
+ cdev->private->flags.esid = 0;
+ memset(&cdev->private->senseid, 0, sizeof(cdev->private->senseid));
+ cdev->private->senseid.cu_type = 0xffff;
}
/*
- * Called from interrupt context to check if a valid answer
- * to Sense ID was received.
+ * Check for complete SENSE ID data.
*/
-static int
-ccw_device_check_sense_id(struct ccw_device *cdev)
+static int snsid_check(struct ccw_device *cdev, void *data)
{
- struct subchannel *sch;
- struct irb *irb;
-
- sch = to_subchannel(cdev->dev.parent);
- irb = &cdev->private->irb;
-
- /* Check the error cases. */
- if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
- /* Retry Sense ID if requested. */
- if (cdev->private->flags.intretry) {
- cdev->private->flags.intretry = 0;
- return -EAGAIN;
- }
- return -ETIME;
- }
- if (irb->esw.esw0.erw.cons && (irb->ecw[0] & SNS0_CMD_REJECT)) {
- /*
- * if the device doesn't support the SenseID
- * command further retries wouldn't help ...
- * NB: We don't check here for intervention required like we
- * did before, because tape devices with no tape inserted
- * may present this status *in conjunction with* the
- * sense id information. So, for intervention required,
- * we use the "whack it until it talks" strategy...
- */
- CIO_MSG_EVENT(0, "SenseID : device %04x on Subchannel "
- "0.%x.%04x reports cmd reject\n",
- cdev->private->dev_id.devno, sch->schid.ssid,
- sch->schid.sch_no);
+ struct cmd_scsw *scsw = &cdev->private->irb.scsw.cmd;
+ int len = sizeof(struct senseid) - scsw->count;
+
+ /* Check for incomplete SENSE ID data. */
+ if (len < SENSE_ID_MIN_LEN)
+ goto out_restart;
+ if (cdev->private->senseid.cu_type == 0xffff)
+ goto out_restart;
+ /* Check for incompatible SENSE ID data. */
+ if (cdev->private->senseid.reserved != 0xff)
return -EOPNOTSUPP;
- }
- if (irb->esw.esw0.erw.cons) {
- CIO_MSG_EVENT(2, "SenseID : UC on dev 0.%x.%04x, "
- "lpum %02X, cnt %02d, sns :"
- " %02X%02X%02X%02X %02X%02X%02X%02X ...\n",
- cdev->private->dev_id.ssid,
- cdev->private->dev_id.devno,
- irb->esw.esw0.sublog.lpum,
- irb->esw.esw0.erw.scnt,
- irb->ecw[0], irb->ecw[1],
- irb->ecw[2], irb->ecw[3],
- irb->ecw[4], irb->ecw[5],
- irb->ecw[6], irb->ecw[7]);
- return -EAGAIN;
- }
- if (irb->scsw.cmd.cc == 3) {
- u8 lpm;
+ /* Check for extended-identification information. */
+ if (len > SENSE_ID_BASIC_LEN)
+ cdev->private->flags.esid = 1;
+ return 0;
- lpm = to_io_private(sch)->orb.cmd.lpm;
- if ((lpm & sch->schib.pmcw.pim & sch->schib.pmcw.pam) != 0)
- CIO_MSG_EVENT(4, "SenseID : path %02X for device %04x "
- "on subchannel 0.%x.%04x is "
- "'not operational'\n", lpm,
- cdev->private->dev_id.devno,
- sch->schid.ssid, sch->schid.sch_no);
- return -EACCES;
- }
-
- /* Did we get a proper answer ? */
- if (irb->scsw.cmd.cc == 0 && cdev->private->senseid.cu_type != 0xFFFF &&
- cdev->private->senseid.reserved == 0xFF) {
- if (irb->scsw.cmd.count < sizeof(struct senseid) - 8)
- cdev->private->flags.esid = 1;
- return 0; /* Success */
- }
-
- /* Hmm, whatever happened, try again. */
- CIO_MSG_EVENT(2, "SenseID : start_IO() for device %04x on "
- "subchannel 0.%x.%04x returns status %02X%02X\n",
- cdev->private->dev_id.devno, sch->schid.ssid,
- sch->schid.sch_no,
- irb->scsw.cmd.dstat, irb->scsw.cmd.cstat);
+out_restart:
+ snsid_init(cdev);
return -EAGAIN;
}
/*
- * Got interrupt for Sense ID.
+ * Process SENSE ID request result.
*/
-void
-ccw_device_sense_id_irq(struct ccw_device *cdev, enum dev_event dev_event)
+static void snsid_callback(struct ccw_device *cdev, void *data, int rc)
{
- struct subchannel *sch;
- struct irb *irb;
- int ret;
-
- sch = to_subchannel(cdev->dev.parent);
- irb = (struct irb *) __LC_IRB;
- /* Retry sense id, if needed. */
- if (irb->scsw.cmd.stctl ==
- (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
- if ((irb->scsw.cmd.cc == 1) || !irb->scsw.cmd.actl) {
- ret = __ccw_device_sense_id_start(cdev);
- if (ret && ret != -EBUSY)
- ccw_device_sense_id_done(cdev, ret);
+ struct ccw_dev_id *id = &cdev->private->dev_id;
+ struct senseid *senseid = &cdev->private->senseid;
+ int vm = 0;
+
+ if (rc && MACHINE_IS_VM) {
+ /* Try diag 0x210 fallback on z/VM. */
+ snsid_init(cdev);
+ if (diag210_get_dev_info(cdev) == 0) {
+ rc = 0;
+ vm = 1;
}
- return;
}
- if (ccw_device_accumulate_and_sense(cdev, irb) != 0)
- return;
- ret = ccw_device_check_sense_id(cdev);
- memset(&cdev->private->irb, 0, sizeof(struct irb));
- switch (ret) {
- /* 0, -ETIME, -EOPNOTSUPP, -EAGAIN or -EACCES */
- case 0: /* Sense id succeeded. */
- case -ETIME: /* Sense id stopped by timeout. */
- ccw_device_sense_id_done(cdev, ret);
- break;
- case -EACCES: /* channel is not operational. */
- sch->lpm &= ~cdev->private->imask;
- cdev->private->imask >>= 1;
- cdev->private->iretry = 5;
- /* fall through. */
- case -EAGAIN: /* try again. */
- ret = __ccw_device_sense_id_start(cdev);
- if (ret == 0 || ret == -EBUSY)
- break;
- /* fall through. */
- default: /* Sense ID failed. Try asking VM. */
- if (MACHINE_IS_VM)
- ret = diag_get_dev_info(cdev->private->dev_id.devno,
- &cdev->private->senseid);
- else
- /*
- * If we can't couldn't identify the device type we
- * consider the device "not operational".
- */
- ret = -ENODEV;
+ CIO_MSG_EVENT(2, "snsid: device 0.%x.%04x: rc=%d %04x/%02x "
+ "%04x/%02x%s\n", id->ssid, id->devno, rc,
+ senseid->cu_type, senseid->cu_model, senseid->dev_type,
+ senseid->dev_model, vm ? " (diag210)" : "");
+ ccw_device_sense_id_done(cdev, rc);
+}
- ccw_device_sense_id_done(cdev, ret);
- break;
- }
+/**
+ * ccw_device_sense_id_start - perform SENSE ID
+ * @cdev: ccw device
+ *
+ * Execute a SENSE ID channel program on @cdev to update its sense id
+ * information. When finished, call ccw_device_sense_id_done with a
+ * return code specifying the result.
+ */
+void ccw_device_sense_id_start(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_request *req = &cdev->private->req;
+ struct ccw1 *cp = cdev->private->iccws;
+
+ CIO_TRACE_EVENT(4, "snsid");
+ CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
+ /* Data setup. */
+ snsid_init(cdev);
+ /* Channel program setup. */
+ cp->cmd_code = CCW_CMD_SENSE_ID;
+ cp->cda = (u32) (addr_t) &cdev->private->senseid;
+ cp->count = sizeof(struct senseid);
+ cp->flags = CCW_FLAG_SLI;
+ /* Request setup. */
+ memset(req, 0, sizeof(*req));
+ req->cp = cp;
+ req->timeout = SENSE_ID_TIMEOUT;
+ req->maxretries = SENSE_ID_RETRIES;
+ req->lpm = sch->schib.pmcw.pam & sch->opm;
+ req->check = snsid_check;
+ req->callback = snsid_callback;
+ ccw_request_start(cdev);
}
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index 2d0efee8a29..6da84543dfe 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -11,6 +11,7 @@
#include <linux/list.h>
#include <linux/device.h>
#include <linux/delay.h>
+#include <linux/completion.h>
#include <asm/ccwdev.h>
#include <asm/idals.h>
@@ -46,6 +47,7 @@ int ccw_device_set_options_mask(struct ccw_device *cdev, unsigned long flags)
cdev->private->options.repall = (flags & CCWDEV_REPORT_ALL) != 0;
cdev->private->options.pgroup = (flags & CCWDEV_DO_PATHGROUP) != 0;
cdev->private->options.force = (flags & CCWDEV_ALLOW_FORCE) != 0;
+ cdev->private->options.mpath = (flags & CCWDEV_DO_MULTIPATH) != 0;
return 0;
}
@@ -74,6 +76,7 @@ int ccw_device_set_options(struct ccw_device *cdev, unsigned long flags)
cdev->private->options.repall |= (flags & CCWDEV_REPORT_ALL) != 0;
cdev->private->options.pgroup |= (flags & CCWDEV_DO_PATHGROUP) != 0;
cdev->private->options.force |= (flags & CCWDEV_ALLOW_FORCE) != 0;
+ cdev->private->options.mpath |= (flags & CCWDEV_DO_MULTIPATH) != 0;
return 0;
}
@@ -90,9 +93,34 @@ void ccw_device_clear_options(struct ccw_device *cdev, unsigned long flags)
cdev->private->options.repall &= (flags & CCWDEV_REPORT_ALL) == 0;
cdev->private->options.pgroup &= (flags & CCWDEV_DO_PATHGROUP) == 0;
cdev->private->options.force &= (flags & CCWDEV_ALLOW_FORCE) == 0;
+ cdev->private->options.mpath &= (flags & CCWDEV_DO_MULTIPATH) == 0;
}
/**
+ * ccw_device_is_pathgroup - determine if paths to this device are grouped
+ * @cdev: ccw device
+ *
+ * Return non-zero if there is a path group, zero otherwise.
+ */
+int ccw_device_is_pathgroup(struct ccw_device *cdev)
+{
+ return cdev->private->flags.pgroup;
+}
+EXPORT_SYMBOL(ccw_device_is_pathgroup);
+
+/**
+ * ccw_device_is_multipath - determine if device is operating in multipath mode
+ * @cdev: ccw device
+ *
+ * Return non-zero if device is operating in multipath mode, zero otherwise.
+ */
+int ccw_device_is_multipath(struct ccw_device *cdev)
+{
+ return cdev->private->flags.mpath;
+}
+EXPORT_SYMBOL(ccw_device_is_multipath);
+
+/**
* ccw_device_clear() - terminate I/O request processing
* @cdev: target ccw device
* @intparm: interruption parameter; value is only used if no I/O is
@@ -167,8 +195,7 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
return -EINVAL;
if (cdev->private->state == DEV_STATE_NOT_OPER)
return -ENODEV;
- if (cdev->private->state == DEV_STATE_VERIFY ||
- cdev->private->state == DEV_STATE_CLEAR_VERIFY) {
+ if (cdev->private->state == DEV_STATE_VERIFY) {
/* Remember to fake irb when finished. */
if (!cdev->private->flags.fake_irb) {
cdev->private->flags.fake_irb = 1;
@@ -478,74 +505,65 @@ __u8 ccw_device_get_path_mask(struct ccw_device *cdev)
return sch->lpm;
}
-/*
- * Try to break the lock on a boxed device.
- */
-int
-ccw_device_stlck(struct ccw_device *cdev)
-{
- void *buf, *buf2;
- unsigned long flags;
- struct subchannel *sch;
- int ret;
+struct stlck_data {
+ struct completion done;
+ int rc;
+};
- if (!cdev)
- return -ENODEV;
+void ccw_device_stlck_done(struct ccw_device *cdev, void *data, int rc)
+{
+ struct stlck_data *sdata = data;
- if (cdev->drv && !cdev->private->options.force)
- return -EINVAL;
+ sdata->rc = rc;
+ complete(&sdata->done);
+}
- sch = to_subchannel(cdev->dev.parent);
-
- CIO_TRACE_EVENT(2, "stl lock");
- CIO_TRACE_EVENT(2, dev_name(&cdev->dev));
+/*
+ * Perform unconditional reserve + release.
+ */
+int ccw_device_stlck(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct stlck_data data;
+ u8 *buffer;
+ int rc;
- buf = kmalloc(32*sizeof(char), GFP_DMA|GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
- buf2 = kmalloc(32*sizeof(char), GFP_DMA|GFP_KERNEL);
- if (!buf2) {
- kfree(buf);
- return -ENOMEM;
+ /* Check if steal lock operation is valid for this device. */
+ if (cdev->drv) {
+ if (!cdev->private->options.force)
+ return -EINVAL;
}
- spin_lock_irqsave(sch->lock, flags);
- ret = cio_enable_subchannel(sch, (u32)(addr_t)sch);
- if (ret)
- goto out_unlock;
- /*
- * Setup ccw. We chain an unconditional reserve and a release so we
- * only break the lock.
- */
- cdev->private->iccws[0].cmd_code = CCW_CMD_STLCK;
- cdev->private->iccws[0].cda = (__u32) __pa(buf);
- cdev->private->iccws[0].count = 32;
- cdev->private->iccws[0].flags = CCW_FLAG_CC;
- cdev->private->iccws[1].cmd_code = CCW_CMD_RELEASE;
- cdev->private->iccws[1].cda = (__u32) __pa(buf2);
- cdev->private->iccws[1].count = 32;
- cdev->private->iccws[1].flags = 0;
- ret = cio_start(sch, cdev->private->iccws, 0);
- if (ret) {
- cio_disable_subchannel(sch); //FIXME: return code?
+ buffer = kzalloc(64, GFP_DMA | GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+ init_completion(&data.done);
+ data.rc = -EIO;
+ spin_lock_irq(sch->lock);
+ rc = cio_enable_subchannel(sch, (u32) (addr_t) sch);
+ if (rc)
goto out_unlock;
+ /* Perform operation. */
+ cdev->private->state = DEV_STATE_STEAL_LOCK,
+ ccw_device_stlck_start(cdev, &data, &buffer[0], &buffer[32]);
+ spin_unlock_irq(sch->lock);
+ /* Wait for operation to finish. */
+ if (wait_for_completion_interruptible(&data.done)) {
+ /* Got a signal. */
+ spin_lock_irq(sch->lock);
+ ccw_request_cancel(cdev);
+ spin_unlock_irq(sch->lock);
+ wait_for_completion(&data.done);
}
- cdev->private->irb.scsw.cmd.actl |= SCSW_ACTL_START_PEND;
- spin_unlock_irqrestore(sch->lock, flags);
- wait_event(cdev->private->wait_q,
- cdev->private->irb.scsw.cmd.actl == 0);
- spin_lock_irqsave(sch->lock, flags);
- cio_disable_subchannel(sch); //FIXME: return code?
- if ((cdev->private->irb.scsw.cmd.dstat !=
- (DEV_STAT_CHN_END|DEV_STAT_DEV_END)) ||
- (cdev->private->irb.scsw.cmd.cstat != 0))
- ret = -EIO;
- /* Clear irb. */
- memset(&cdev->private->irb, 0, sizeof(struct irb));
+ rc = data.rc;
+ /* Check results. */
+ spin_lock_irq(sch->lock);
+ cio_disable_subchannel(sch);
+ cdev->private->state = DEV_STATE_BOXED;
out_unlock:
- kfree(buf);
- kfree(buf2);
- spin_unlock_irqrestore(sch->lock, flags);
- return ret;
+ spin_unlock_irq(sch->lock);
+ kfree(buffer);
+
+ return rc;
}
void *ccw_device_get_chp_desc(struct ccw_device *cdev, int chp_no)
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
index fc5ca1dd52b..aad188e43b4 100644
--- a/drivers/s390/cio/device_pgid.c
+++ b/drivers/s390/cio/device_pgid.c
@@ -1,594 +1,561 @@
/*
- * drivers/s390/cio/device_pgid.c
+ * CCW device PGID and path verification I/O handling.
*
- * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
- * IBM Corporation
- * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
- * Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * Path Group ID functions.
+ * Copyright IBM Corp. 2002,2009
+ * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
*/
-#include <linux/module.h>
-#include <linux/init.h>
-
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/bitops.h>
#include <asm/ccwdev.h>
#include <asm/cio.h>
-#include <asm/delay.h>
-#include <asm/lowcore.h>
#include "cio.h"
#include "cio_debug.h"
-#include "css.h"
#include "device.h"
-#include "ioasm.h"
#include "io_sch.h"
+#define PGID_RETRIES 256
+#define PGID_TIMEOUT (10 * HZ)
+
/*
- * Helper function called from interrupt context to decide whether an
- * operation should be tried again.
+ * Process path verification data and report result.
*/
-static int __ccw_device_should_retry(union scsw *scsw)
+static void verify_done(struct ccw_device *cdev, int rc)
{
- /* CC is only valid if start function bit is set. */
- if ((scsw->cmd.fctl & SCSW_FCTL_START_FUNC) && scsw->cmd.cc == 1)
- return 1;
- /* No more activity. For sense and set PGID we stubbornly try again. */
- if (!scsw->cmd.actl)
- return 1;
- return 0;
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_dev_id *id = &cdev->private->dev_id;
+ int mpath = cdev->private->flags.mpath;
+ int pgroup = cdev->private->flags.pgroup;
+
+ if (rc)
+ goto out;
+ /* Ensure consistent multipathing state at device and channel. */
+ if (sch->config.mp != mpath) {
+ sch->config.mp = mpath;
+ rc = cio_commit_config(sch);
+ }
+out:
+ CIO_MSG_EVENT(2, "vrfy: device 0.%x.%04x: rc=%d pgroup=%d mpath=%d "
+ "vpm=%02x\n", id->ssid, id->devno, rc, pgroup, mpath,
+ sch->vpm);
+ ccw_device_verify_done(cdev, rc);
}
/*
- * Start Sense Path Group ID helper function. Used in ccw_device_recog
- * and ccw_device_sense_pgid.
+ * Create channel program to perform a NOOP.
*/
-static int
-__ccw_device_sense_pgid_start(struct ccw_device *cdev)
+static void nop_build_cp(struct ccw_device *cdev)
{
- struct subchannel *sch;
- struct ccw1 *ccw;
- int ret;
- int i;
-
- sch = to_subchannel(cdev->dev.parent);
- /* Return if we already checked on all paths. */
- if (cdev->private->imask == 0)
- return (sch->lpm == 0) ? -ENODEV : -EACCES;
- i = 8 - ffs(cdev->private->imask);
-
- /* Setup sense path group id channel program. */
- ccw = cdev->private->iccws;
- ccw->cmd_code = CCW_CMD_SENSE_PGID;
- ccw->count = sizeof (struct pgid);
- ccw->flags = CCW_FLAG_SLI;
-
- /* Reset device status. */
- memset(&cdev->private->irb, 0, sizeof(struct irb));
- /* Try on every path. */
- ret = -ENODEV;
- while (cdev->private->imask != 0) {
- /* Try every path multiple times. */
- ccw->cda = (__u32) __pa (&cdev->private->pgid[i]);
- if (cdev->private->iretry > 0) {
- cdev->private->iretry--;
- /* Reset internal retry indication. */
- cdev->private->flags.intretry = 0;
- ret = cio_start (sch, cdev->private->iccws,
- cdev->private->imask);
- /* ret is 0, -EBUSY, -EACCES or -ENODEV */
- if (ret != -EACCES)
- return ret;
- CIO_MSG_EVENT(3, "SNID - Device %04x on Subchannel "
- "0.%x.%04x, lpm %02X, became 'not "
- "operational'\n",
- cdev->private->dev_id.devno,
- sch->schid.ssid,
- sch->schid.sch_no, cdev->private->imask);
-
- }
- cdev->private->imask >>= 1;
- cdev->private->iretry = 5;
- i++;
- }
-
- return ret;
+ struct ccw_request *req = &cdev->private->req;
+ struct ccw1 *cp = cdev->private->iccws;
+
+ cp->cmd_code = CCW_CMD_NOOP;
+ cp->cda = 0;
+ cp->count = 0;
+ cp->flags = CCW_FLAG_SLI;
+ req->cp = cp;
}
-void
-ccw_device_sense_pgid_start(struct ccw_device *cdev)
+/*
+ * Perform NOOP on a single path.
+ */
+static void nop_do(struct ccw_device *cdev)
{
- int ret;
-
- /* Set a timeout of 60s */
- ccw_device_set_timeout(cdev, 60*HZ);
-
- cdev->private->state = DEV_STATE_SENSE_PGID;
- cdev->private->imask = 0x80;
- cdev->private->iretry = 5;
- memset (&cdev->private->pgid, 0, sizeof (cdev->private->pgid));
- ret = __ccw_device_sense_pgid_start(cdev);
- if (ret && ret != -EBUSY)
- ccw_device_sense_pgid_done(cdev, ret);
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_request *req = &cdev->private->req;
+
+ /* Adjust lpm. */
+ req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & sch->opm);
+ if (!req->lpm)
+ goto out_nopath;
+ nop_build_cp(cdev);
+ ccw_request_start(cdev);
+ return;
+
+out_nopath:
+ verify_done(cdev, sch->vpm ? 0 : -EACCES);
}
/*
- * Called from interrupt context to check if a valid answer
- * to Sense Path Group ID was received.
+ * Adjust NOOP I/O status.
*/
-static int
-__ccw_device_check_sense_pgid(struct ccw_device *cdev)
+static enum io_status nop_filter(struct ccw_device *cdev, void *data,
+ struct irb *irb, enum io_status status)
{
- struct subchannel *sch;
- struct irb *irb;
- int i;
-
- sch = to_subchannel(cdev->dev.parent);
- irb = &cdev->private->irb;
- if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
- /* Retry Sense PGID if requested. */
- if (cdev->private->flags.intretry) {
- cdev->private->flags.intretry = 0;
- return -EAGAIN;
- }
- return -ETIME;
- }
- if (irb->esw.esw0.erw.cons &&
- (irb->ecw[0]&(SNS0_CMD_REJECT|SNS0_INTERVENTION_REQ))) {
- /*
- * If the device doesn't support the Sense Path Group ID
- * command further retries wouldn't help ...
- */
- return -EOPNOTSUPP;
- }
- if (irb->esw.esw0.erw.cons) {
- CIO_MSG_EVENT(2, "SNID - device 0.%x.%04x, unit check, "
- "lpum %02X, cnt %02d, sns : "
- "%02X%02X%02X%02X %02X%02X%02X%02X ...\n",
- cdev->private->dev_id.ssid,
- cdev->private->dev_id.devno,
- irb->esw.esw0.sublog.lpum,
- irb->esw.esw0.erw.scnt,
- irb->ecw[0], irb->ecw[1],
- irb->ecw[2], irb->ecw[3],
- irb->ecw[4], irb->ecw[5],
- irb->ecw[6], irb->ecw[7]);
- return -EAGAIN;
- }
- if (irb->scsw.cmd.cc == 3) {
- u8 lpm;
-
- lpm = to_io_private(sch)->orb.cmd.lpm;
- CIO_MSG_EVENT(3, "SNID - Device %04x on Subchannel 0.%x.%04x,"
- " lpm %02X, became 'not operational'\n",
- cdev->private->dev_id.devno, sch->schid.ssid,
- sch->schid.sch_no, lpm);
- return -EACCES;
- }
- i = 8 - ffs(cdev->private->imask);
- if (cdev->private->pgid[i].inf.ps.state2 == SNID_STATE2_RESVD_ELSE) {
- CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel 0.%x.%04x "
- "is reserved by someone else\n",
- cdev->private->dev_id.devno, sch->schid.ssid,
- sch->schid.sch_no);
- return -EUSERS;
- }
- return 0;
+ /* Only subchannel status might indicate a path error. */
+ if (status == IO_STATUS_ERROR && irb->scsw.cmd.cstat == 0)
+ return IO_DONE;
+ return status;
}
/*
- * Got interrupt for Sense Path Group ID.
+ * Process NOOP request result for a single path.
*/
-void
-ccw_device_sense_pgid_irq(struct ccw_device *cdev, enum dev_event dev_event)
+static void nop_callback(struct ccw_device *cdev, void *data, int rc)
{
- struct subchannel *sch;
- struct irb *irb;
- int ret;
-
- irb = (struct irb *) __LC_IRB;
-
- if (irb->scsw.cmd.stctl ==
- (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
- if (__ccw_device_should_retry(&irb->scsw)) {
- ret = __ccw_device_sense_pgid_start(cdev);
- if (ret && ret != -EBUSY)
- ccw_device_sense_pgid_done(cdev, ret);
- }
- return;
- }
- if (ccw_device_accumulate_and_sense(cdev, irb) != 0)
- return;
- sch = to_subchannel(cdev->dev.parent);
- ret = __ccw_device_check_sense_pgid(cdev);
- memset(&cdev->private->irb, 0, sizeof(struct irb));
- switch (ret) {
- /* 0, -ETIME, -EOPNOTSUPP, -EAGAIN, -EACCES or -EUSERS */
- case -EOPNOTSUPP: /* Sense Path Group ID not supported */
- ccw_device_sense_pgid_done(cdev, -EOPNOTSUPP);
- break;
- case -ETIME: /* Sense path group id stopped by timeout. */
- ccw_device_sense_pgid_done(cdev, -ETIME);
- break;
- case -EACCES: /* channel is not operational. */
- sch->lpm &= ~cdev->private->imask;
- /* Fall through. */
- case 0: /* Sense Path Group ID successful. */
- cdev->private->imask >>= 1;
- cdev->private->iretry = 5;
- /* Fall through. */
- case -EAGAIN: /* Try again. */
- ret = __ccw_device_sense_pgid_start(cdev);
- if (ret != 0 && ret != -EBUSY)
- ccw_device_sense_pgid_done(cdev, ret);
- break;
- case -EUSERS: /* device is reserved for someone else. */
- ccw_device_sense_pgid_done(cdev, -EUSERS);
- break;
- }
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_request *req = &cdev->private->req;
+
+ if (rc == 0)
+ sch->vpm |= req->lpm;
+ else if (rc != -EACCES)
+ goto err;
+ req->lpm >>= 1;
+ nop_do(cdev);
+ return;
+
+err:
+ verify_done(cdev, rc);
}
/*
- * Path Group ID helper function.
+ * Create channel program to perform SET PGID on a single path.
*/
-static int
-__ccw_device_do_pgid(struct ccw_device *cdev, __u8 func)
+static void spid_build_cp(struct ccw_device *cdev, u8 fn)
{
- struct subchannel *sch;
- struct ccw1 *ccw;
- int ret;
-
- sch = to_subchannel(cdev->dev.parent);
-
- /* Setup sense path group id channel program. */
- cdev->private->pgid[0].inf.fc = func;
- ccw = cdev->private->iccws;
- if (cdev->private->flags.pgid_single)
- cdev->private->pgid[0].inf.fc |= SPID_FUNC_SINGLE_PATH;
- else
- cdev->private->pgid[0].inf.fc |= SPID_FUNC_MULTI_PATH;
- ccw->cmd_code = CCW_CMD_SET_PGID;
- ccw->cda = (__u32) __pa (&cdev->private->pgid[0]);
- ccw->count = sizeof (struct pgid);
- ccw->flags = CCW_FLAG_SLI;
-
- /* Reset device status. */
- memset(&cdev->private->irb, 0, sizeof(struct irb));
-
- /* Try multiple times. */
- ret = -EACCES;
- if (cdev->private->iretry > 0) {
- cdev->private->iretry--;
- /* Reset internal retry indication. */
- cdev->private->flags.intretry = 0;
- ret = cio_start (sch, cdev->private->iccws,
- cdev->private->imask);
- /* We expect an interrupt in case of success or busy
- * indication. */
- if ((ret == 0) || (ret == -EBUSY))
- return ret;
- }
- /* PGID command failed on this path. */
- CIO_MSG_EVENT(3, "SPID - Device %04x on Subchannel "
- "0.%x.%04x, lpm %02X, became 'not operational'\n",
- cdev->private->dev_id.devno, sch->schid.ssid,
- sch->schid.sch_no, cdev->private->imask);
- return ret;
+ struct ccw_request *req = &cdev->private->req;
+ struct ccw1 *cp = cdev->private->iccws;
+ int i = 8 - ffs(req->lpm);
+ struct pgid *pgid = &cdev->private->pgid[i];
+
+ pgid->inf.fc = fn;
+ cp->cmd_code = CCW_CMD_SET_PGID;
+ cp->cda = (u32) (addr_t) pgid;
+ cp->count = sizeof(*pgid);
+ cp->flags = CCW_FLAG_SLI;
+ req->cp = cp;
}
/*
- * Helper function to send a nop ccw down a path.
+ * Perform establish/resign SET PGID on a single path.
*/
-static int __ccw_device_do_nop(struct ccw_device *cdev)
+static void spid_do(struct ccw_device *cdev)
{
- struct subchannel *sch;
- struct ccw1 *ccw;
- int ret;
-
- sch = to_subchannel(cdev->dev.parent);
-
- /* Setup nop channel program. */
- ccw = cdev->private->iccws;
- ccw->cmd_code = CCW_CMD_NOOP;
- ccw->cda = 0;
- ccw->count = 0;
- ccw->flags = CCW_FLAG_SLI;
-
- /* Reset device status. */
- memset(&cdev->private->irb, 0, sizeof(struct irb));
-
- /* Try multiple times. */
- ret = -EACCES;
- if (cdev->private->iretry > 0) {
- cdev->private->iretry--;
- /* Reset internal retry indication. */
- cdev->private->flags.intretry = 0;
- ret = cio_start (sch, cdev->private->iccws,
- cdev->private->imask);
- /* We expect an interrupt in case of success or busy
- * indication. */
- if ((ret == 0) || (ret == -EBUSY))
- return ret;
- }
- /* nop command failed on this path. */
- CIO_MSG_EVENT(3, "NOP - Device %04x on Subchannel "
- "0.%x.%04x, lpm %02X, became 'not operational'\n",
- cdev->private->dev_id.devno, sch->schid.ssid,
- sch->schid.sch_no, cdev->private->imask);
- return ret;
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_request *req = &cdev->private->req;
+ u8 fn;
+
+ /* Use next available path that is not already in correct state. */
+ req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & ~sch->vpm);
+ if (!req->lpm)
+ goto out_nopath;
+ /* Channel program setup. */
+ if (req->lpm & sch->opm)
+ fn = SPID_FUNC_ESTABLISH;
+ else
+ fn = SPID_FUNC_RESIGN;
+ if (cdev->private->flags.mpath)
+ fn |= SPID_FUNC_MULTI_PATH;
+ spid_build_cp(cdev, fn);
+ ccw_request_start(cdev);
+ return;
+
+out_nopath:
+ verify_done(cdev, sch->vpm ? 0 : -EACCES);
}
+static void verify_start(struct ccw_device *cdev);
/*
- * Called from interrupt context to check if a valid answer
- * to Set Path Group ID was received.
+ * Process SET PGID request result for a single path.
*/
-static int
-__ccw_device_check_pgid(struct ccw_device *cdev)
+static void spid_callback(struct ccw_device *cdev, void *data, int rc)
{
- struct subchannel *sch;
- struct irb *irb;
-
- sch = to_subchannel(cdev->dev.parent);
- irb = &cdev->private->irb;
- if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
- /* Retry Set PGID if requested. */
- if (cdev->private->flags.intretry) {
- cdev->private->flags.intretry = 0;
- return -EAGAIN;
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_request *req = &cdev->private->req;
+
+ switch (rc) {
+ case 0:
+ sch->vpm |= req->lpm & sch->opm;
+ break;
+ case -EACCES:
+ break;
+ case -EOPNOTSUPP:
+ if (cdev->private->flags.mpath) {
+ /* Try without multipathing. */
+ cdev->private->flags.mpath = 0;
+ goto out_restart;
}
- return -ETIME;
+ /* Try without pathgrouping. */
+ cdev->private->flags.pgroup = 0;
+ goto out_restart;
+ default:
+ goto err;
}
- if (irb->esw.esw0.erw.cons) {
- if (irb->ecw[0] & SNS0_CMD_REJECT)
- return -EOPNOTSUPP;
- /* Hmm, whatever happened, try again. */
- CIO_MSG_EVENT(2, "SPID - device 0.%x.%04x, unit check, "
- "cnt %02d, "
- "sns : %02X%02X%02X%02X %02X%02X%02X%02X ...\n",
- cdev->private->dev_id.ssid,
- cdev->private->dev_id.devno,
- irb->esw.esw0.erw.scnt,
- irb->ecw[0], irb->ecw[1],
- irb->ecw[2], irb->ecw[3],
- irb->ecw[4], irb->ecw[5],
- irb->ecw[6], irb->ecw[7]);
- return -EAGAIN;
- }
- if (irb->scsw.cmd.cc == 3) {
- CIO_MSG_EVENT(3, "SPID - Device %04x on Subchannel 0.%x.%04x,"
- " lpm %02X, became 'not operational'\n",
- cdev->private->dev_id.devno, sch->schid.ssid,
- sch->schid.sch_no, cdev->private->imask);
- return -EACCES;
- }
- return 0;
+ req->lpm >>= 1;
+ spid_do(cdev);
+ return;
+
+out_restart:
+ verify_start(cdev);
+ return;
+err:
+ verify_done(cdev, rc);
+}
+
+static void spid_start(struct ccw_device *cdev)
+{
+ struct ccw_request *req = &cdev->private->req;
+
+ /* Initialize request data. */
+ memset(req, 0, sizeof(*req));
+ req->timeout = PGID_TIMEOUT;
+ req->maxretries = PGID_RETRIES;
+ req->lpm = 0x80;
+ req->callback = spid_callback;
+ spid_do(cdev);
+}
+
+static int pgid_cmp(struct pgid *p1, struct pgid *p2)
+{
+ return memcmp((char *) p1 + 1, (char *) p2 + 1,
+ sizeof(struct pgid) - 1);
}
/*
- * Called from interrupt context to check the path status after a nop has
- * been send.
+ * Determine pathgroup state from PGID data.
*/
-static int __ccw_device_check_nop(struct ccw_device *cdev)
+static void pgid_analyze(struct ccw_device *cdev, struct pgid **p,
+ int *mismatch, int *reserved, int *reset)
{
- struct subchannel *sch;
- struct irb *irb;
-
- sch = to_subchannel(cdev->dev.parent);
- irb = &cdev->private->irb;
- if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
- /* Retry NOP if requested. */
- if (cdev->private->flags.intretry) {
- cdev->private->flags.intretry = 0;
- return -EAGAIN;
+ struct pgid *pgid = &cdev->private->pgid[0];
+ struct pgid *first = NULL;
+ int lpm;
+ int i;
+
+ *mismatch = 0;
+ *reserved = 0;
+ *reset = 0;
+ for (i = 0, lpm = 0x80; i < 8; i++, pgid++, lpm >>= 1) {
+ if ((cdev->private->pgid_valid_mask & lpm) == 0)
+ continue;
+ if (pgid->inf.ps.state2 == SNID_STATE2_RESVD_ELSE)
+ *reserved = 1;
+ if (pgid->inf.ps.state1 == SNID_STATE1_RESET) {
+ /* A PGID was reset. */
+ *reset = 1;
+ continue;
}
- return -ETIME;
- }
- if (irb->scsw.cmd.cc == 3) {
- CIO_MSG_EVENT(3, "NOP - Device %04x on Subchannel 0.%x.%04x,"
- " lpm %02X, became 'not operational'\n",
- cdev->private->dev_id.devno, sch->schid.ssid,
- sch->schid.sch_no, cdev->private->imask);
- return -EACCES;
+ if (!first) {
+ first = pgid;
+ continue;
+ }
+ if (pgid_cmp(pgid, first) != 0)
+ *mismatch = 1;
}
- return 0;
+ if (!first)
+ first = &channel_subsystems[0]->global_pgid;
+ *p = first;
}
-static void
-__ccw_device_verify_start(struct ccw_device *cdev)
+static u8 pgid_to_vpm(struct ccw_device *cdev)
{
- struct subchannel *sch;
- __u8 func;
- int ret;
-
- sch = to_subchannel(cdev->dev.parent);
- /* Repeat for all paths. */
- for (; cdev->private->imask; cdev->private->imask >>= 1,
- cdev->private->iretry = 5) {
- if ((cdev->private->imask & sch->schib.pmcw.pam) == 0)
- /* Path not available, try next. */
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct pgid *pgid;
+ int i;
+ int lpm;
+ u8 vpm = 0;
+
+ /* Set VPM bits for paths which are already in the target state. */
+ for (i = 0; i < 8; i++) {
+ lpm = 0x80 >> i;
+ if ((cdev->private->pgid_valid_mask & lpm) == 0)
continue;
- if (cdev->private->options.pgroup) {
- if (sch->opm & cdev->private->imask)
- func = SPID_FUNC_ESTABLISH;
- else
- func = SPID_FUNC_RESIGN;
- ret = __ccw_device_do_pgid(cdev, func);
- } else
- ret = __ccw_device_do_nop(cdev);
- /* We expect an interrupt in case of success or busy
- * indication. */
- if (ret == 0 || ret == -EBUSY)
- return;
- /* Permanent path failure, try next. */
+ pgid = &cdev->private->pgid[i];
+ if (sch->opm & lpm) {
+ if (pgid->inf.ps.state1 != SNID_STATE1_GROUPED)
+ continue;
+ } else {
+ if (pgid->inf.ps.state1 != SNID_STATE1_UNGROUPED)
+ continue;
+ }
+ if (cdev->private->flags.mpath) {
+ if (pgid->inf.ps.state3 != SNID_STATE3_MULTI_PATH)
+ continue;
+ } else {
+ if (pgid->inf.ps.state3 != SNID_STATE3_SINGLE_PATH)
+ continue;
+ }
+ vpm |= lpm;
}
- /* Done with all paths. */
- ccw_device_verify_done(cdev, (sch->vpm != 0) ? 0 : -ENODEV);
+
+ return vpm;
}
-
-/*
- * Got interrupt for Set Path Group ID.
- */
-void
-ccw_device_verify_irq(struct ccw_device *cdev, enum dev_event dev_event)
+
+static void pgid_fill(struct ccw_device *cdev, struct pgid *pgid)
{
- struct subchannel *sch;
- struct irb *irb;
- int ret;
+ int i;
- irb = (struct irb *) __LC_IRB;
+ for (i = 0; i < 8; i++)
+ memcpy(&cdev->private->pgid[i], pgid, sizeof(struct pgid));
+}
- if (irb->scsw.cmd.stctl ==
- (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
- if (__ccw_device_should_retry(&irb->scsw))
- __ccw_device_verify_start(cdev);
- return;
+/*
+ * Process SENSE PGID data and report result.
+ */
+static void snid_done(struct ccw_device *cdev, int rc)
+{
+ struct ccw_dev_id *id = &cdev->private->dev_id;
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct pgid *pgid;
+ int mismatch = 0;
+ int reserved = 0;
+ int reset = 0;
+
+ if (rc)
+ goto out;
+ pgid_analyze(cdev, &pgid, &mismatch, &reserved, &reset);
+ if (reserved)
+ rc = -EUSERS;
+ else if (mismatch)
+ rc = -EOPNOTSUPP;
+ else {
+ sch->vpm = pgid_to_vpm(cdev);
+ pgid_fill(cdev, pgid);
}
- if (ccw_device_accumulate_and_sense(cdev, irb) != 0)
- return;
- sch = to_subchannel(cdev->dev.parent);
- if (cdev->private->options.pgroup)
- ret = __ccw_device_check_pgid(cdev);
- else
- ret = __ccw_device_check_nop(cdev);
- memset(&cdev->private->irb, 0, sizeof(struct irb));
-
- switch (ret) {
- /* 0, -ETIME, -EAGAIN, -EOPNOTSUPP or -EACCES */
+out:
+ CIO_MSG_EVENT(2, "snid: device 0.%x.%04x: rc=%d pvm=%02x vpm=%02x "
+ "mism=%d rsvd=%d reset=%d\n", id->ssid, id->devno, rc,
+ cdev->private->pgid_valid_mask, sch->vpm, mismatch,
+ reserved, reset);
+ switch (rc) {
case 0:
- /* Path verification ccw finished successfully, update lpm. */
- sch->vpm |= sch->opm & cdev->private->imask;
- /* Go on with next path. */
- cdev->private->imask >>= 1;
- cdev->private->iretry = 5;
- __ccw_device_verify_start(cdev);
+ /* Anything left to do? */
+ if (sch->vpm == sch->schib.pmcw.pam) {
+ verify_done(cdev, sch->vpm == 0 ? -EACCES : 0);
+ return;
+ }
+ /* Perform path-grouping. */
+ spid_start(cdev);
break;
case -EOPNOTSUPP:
- /*
- * One of those strange devices which claim to be able
- * to do multipathing but not for Set Path Group ID.
- */
- if (cdev->private->flags.pgid_single)
- cdev->private->options.pgroup = 0;
- else
- cdev->private->flags.pgid_single = 1;
- /* Retry */
- sch->vpm = 0;
- cdev->private->imask = 0x80;
- cdev->private->iretry = 5;
- /* fall through. */
- case -EAGAIN: /* Try again. */
- __ccw_device_verify_start(cdev);
- break;
- case -ETIME: /* Set path group id stopped by timeout. */
- ccw_device_verify_done(cdev, -ETIME);
- break;
- case -EACCES: /* channel is not operational. */
- cdev->private->imask >>= 1;
- cdev->private->iretry = 5;
- __ccw_device_verify_start(cdev);
+ /* Path-grouping not supported. */
+ cdev->private->flags.pgroup = 0;
+ cdev->private->flags.mpath = 0;
+ verify_start(cdev);
break;
+ default:
+ verify_done(cdev, rc);
}
}
-void
-ccw_device_verify_start(struct ccw_device *cdev)
+/*
+ * Create channel program to perform a SENSE PGID on a single path.
+ */
+static void snid_build_cp(struct ccw_device *cdev)
+{
+ struct ccw_request *req = &cdev->private->req;
+ struct ccw1 *cp = cdev->private->iccws;
+ int i = 8 - ffs(req->lpm);
+
+ /* Channel program setup. */
+ cp->cmd_code = CCW_CMD_SENSE_PGID;
+ cp->cda = (u32) (addr_t) &cdev->private->pgid[i];
+ cp->count = sizeof(struct pgid);
+ cp->flags = CCW_FLAG_SLI;
+ req->cp = cp;
+}
+
+/*
+ * Perform SENSE PGID on a single path.
+ */
+static void snid_do(struct ccw_device *cdev)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_request *req = &cdev->private->req;
+
+ /* Adjust lpm if paths are not set in pam. */
+ req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam);
+ if (!req->lpm)
+ goto out_nopath;
+ snid_build_cp(cdev);
+ ccw_request_start(cdev);
+ return;
+
+out_nopath:
+ snid_done(cdev, cdev->private->pgid_valid_mask ? 0 : -EACCES);
+}
- cdev->private->flags.pgid_single = 0;
- cdev->private->imask = 0x80;
- cdev->private->iretry = 5;
+/*
+ * Process SENSE PGID request result for single path.
+ */
+static void snid_callback(struct ccw_device *cdev, void *data, int rc)
+{
+ struct ccw_request *req = &cdev->private->req;
+
+ if (rc == 0)
+ cdev->private->pgid_valid_mask |= req->lpm;
+ else if (rc != -EACCES)
+ goto err;
+ req->lpm >>= 1;
+ snid_do(cdev);
+ return;
+
+err:
+ snid_done(cdev, rc);
+}
- /* Start with empty vpm. */
- sch->vpm = 0;
+/*
+ * Perform path verification.
+ */
+static void verify_start(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_request *req = &cdev->private->req;
+ struct ccw_dev_id *devid = &cdev->private->dev_id;
- /* Get current pam. */
- if (cio_update_schib(sch)) {
- ccw_device_verify_done(cdev, -ENODEV);
- return;
+ sch->vpm = 0;
+ /* Initialize request data. */
+ memset(req, 0, sizeof(*req));
+ req->timeout = PGID_TIMEOUT;
+ req->maxretries = PGID_RETRIES;
+ req->lpm = 0x80;
+ if (cdev->private->flags.pgroup) {
+ CIO_TRACE_EVENT(4, "snid");
+ CIO_HEX_EVENT(4, devid, sizeof(*devid));
+ req->callback = snid_callback;
+ snid_do(cdev);
+ } else {
+ CIO_TRACE_EVENT(4, "nop");
+ CIO_HEX_EVENT(4, devid, sizeof(*devid));
+ req->filter = nop_filter;
+ req->callback = nop_callback;
+ nop_do(cdev);
}
- /* After 60s path verification is considered to have failed. */
- ccw_device_set_timeout(cdev, 60*HZ);
- __ccw_device_verify_start(cdev);
}
-static void
-__ccw_device_disband_start(struct ccw_device *cdev)
+/**
+ * ccw_device_verify_start - perform path verification
+ * @cdev: ccw device
+ *
+ * Perform an I/O on each available channel path to @cdev to determine which
+ * paths are operational. The resulting path mask is stored in sch->vpm.
+ * If device options specify pathgrouping, establish a pathgroup for the
+ * operational paths. When finished, call ccw_device_verify_done with a
+ * return code specifying the result.
+ */
+void ccw_device_verify_start(struct ccw_device *cdev)
{
- struct subchannel *sch;
- int ret;
-
- sch = to_subchannel(cdev->dev.parent);
- while (cdev->private->imask != 0) {
- if (sch->lpm & cdev->private->imask) {
- ret = __ccw_device_do_pgid(cdev, SPID_FUNC_DISBAND);
- if (ret == 0)
- return;
- }
- cdev->private->iretry = 5;
- cdev->private->imask >>= 1;
- }
- ccw_device_disband_done(cdev, (sch->lpm != 0) ? 0 : -ENODEV);
+ CIO_TRACE_EVENT(4, "vrfy");
+ CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
+ /* Initialize PGID data. */
+ memset(cdev->private->pgid, 0, sizeof(cdev->private->pgid));
+ cdev->private->pgid_valid_mask = 0;
+ /*
+ * Initialize pathgroup and multipath state with target values.
+ * They may change in the course of path verification.
+ */
+ cdev->private->flags.pgroup = cdev->private->options.pgroup;
+ cdev->private->flags.mpath = cdev->private->options.mpath;
+ cdev->private->flags.doverify = 0;
+ verify_start(cdev);
}
/*
- * Got interrupt for Unset Path Group ID.
+ * Process disband SET PGID request result.
*/
-void
-ccw_device_disband_irq(struct ccw_device *cdev, enum dev_event dev_event)
+static void disband_callback(struct ccw_device *cdev, void *data, int rc)
{
- struct subchannel *sch;
- struct irb *irb;
- int ret;
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_dev_id *id = &cdev->private->dev_id;
+
+ if (rc)
+ goto out;
+ /* Ensure consistent multipathing state at device and channel. */
+ cdev->private->flags.mpath = 0;
+ if (sch->config.mp) {
+ sch->config.mp = 0;
+ rc = cio_commit_config(sch);
+ }
+out:
+ CIO_MSG_EVENT(0, "disb: device 0.%x.%04x: rc=%d\n", id->ssid, id->devno,
+ rc);
+ ccw_device_disband_done(cdev, rc);
+}
- irb = (struct irb *) __LC_IRB;
+/**
+ * ccw_device_disband_start - disband pathgroup
+ * @cdev: ccw device
+ *
+ * Execute a SET PGID channel program on @cdev to disband a previously
+ * established pathgroup. When finished, call ccw_device_disband_done with
+ * a return code specifying the result.
+ */
+void ccw_device_disband_start(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_request *req = &cdev->private->req;
+ u8 fn;
+
+ CIO_TRACE_EVENT(4, "disb");
+ CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
+ /* Request setup. */
+ memset(req, 0, sizeof(*req));
+ req->timeout = PGID_TIMEOUT;
+ req->maxretries = PGID_RETRIES;
+ req->lpm = sch->schib.pmcw.pam & sch->opm;
+ req->callback = disband_callback;
+ fn = SPID_FUNC_DISBAND;
+ if (cdev->private->flags.mpath)
+ fn |= SPID_FUNC_MULTI_PATH;
+ spid_build_cp(cdev, fn);
+ ccw_request_start(cdev);
+}
- if (irb->scsw.cmd.stctl ==
- (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
- if (__ccw_device_should_retry(&irb->scsw))
- __ccw_device_disband_start(cdev);
- return;
- }
- if (ccw_device_accumulate_and_sense(cdev, irb) != 0)
- return;
- sch = to_subchannel(cdev->dev.parent);
- ret = __ccw_device_check_pgid(cdev);
- memset(&cdev->private->irb, 0, sizeof(struct irb));
- switch (ret) {
- /* 0, -ETIME, -EAGAIN, -EOPNOTSUPP or -EACCES */
- case 0: /* disband successful. */
- ccw_device_disband_done(cdev, ret);
- break;
- case -EOPNOTSUPP:
- /*
- * One of those strange devices which claim to be able
- * to do multipathing but not for Unset Path Group ID.
- */
- cdev->private->flags.pgid_single = 1;
- /* fall through. */
- case -EAGAIN: /* Try again. */
- __ccw_device_disband_start(cdev);
- break;
- case -ETIME: /* Set path group id stopped by timeout. */
- ccw_device_disband_done(cdev, -ETIME);
- break;
- case -EACCES: /* channel is not operational. */
- cdev->private->imask >>= 1;
- cdev->private->iretry = 5;
- __ccw_device_disband_start(cdev);
- break;
- }
+static void stlck_build_cp(struct ccw_device *cdev, void *buf1, void *buf2)
+{
+ struct ccw_request *req = &cdev->private->req;
+ struct ccw1 *cp = cdev->private->iccws;
+
+ cp[0].cmd_code = CCW_CMD_STLCK;
+ cp[0].cda = (u32) (addr_t) buf1;
+ cp[0].count = 32;
+ cp[0].flags = CCW_FLAG_CC;
+ cp[1].cmd_code = CCW_CMD_RELEASE;
+ cp[1].cda = (u32) (addr_t) buf2;
+ cp[1].count = 32;
+ cp[1].flags = 0;
+ req->cp = cp;
}
-void
-ccw_device_disband_start(struct ccw_device *cdev)
+static void stlck_callback(struct ccw_device *cdev, void *data, int rc)
{
- /* After 60s disbanding is considered to have failed. */
- ccw_device_set_timeout(cdev, 60*HZ);
+ ccw_device_stlck_done(cdev, data, rc);
+}
- cdev->private->flags.pgid_single = 0;
- cdev->private->iretry = 5;
- cdev->private->imask = 0x80;
- __ccw_device_disband_start(cdev);
+/**
+ * ccw_device_stlck_start - perform unconditional release
+ * @cdev: ccw device
+ * @data: data pointer to be passed to ccw_device_stlck_done
+ * @buf1: data pointer used in channel program
+ * @buf2: data pointer used in channel program
+ *
+ * Execute a channel program on @cdev to release an existing PGID reservation.
+ * When finished, call ccw_device_stlck_done with a return code specifying the
+ * result.
+ */
+void ccw_device_stlck_start(struct ccw_device *cdev, void *data, void *buf1,
+ void *buf2)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_request *req = &cdev->private->req;
+
+ CIO_TRACE_EVENT(4, "stlck");
+ CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
+ /* Request setup. */
+ memset(req, 0, sizeof(*req));
+ req->timeout = PGID_TIMEOUT;
+ req->maxretries = PGID_RETRIES;
+ req->lpm = sch->schib.pmcw.pam & sch->opm;
+ req->data = data;
+ req->callback = stlck_callback;
+ stlck_build_cp(cdev, buf1, buf2);
+ ccw_request_start(cdev);
}
+
diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c
index 5814dbee241..66d8066ef22 100644
--- a/drivers/s390/cio/device_status.c
+++ b/drivers/s390/cio/device_status.c
@@ -336,9 +336,6 @@ ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb)
sense_ccw->count = SENSE_MAX_COUNT;
sense_ccw->flags = CCW_FLAG_SLI;
- /* Reset internal retry indication. */
- cdev->private->flags.intretry = 0;
-
rc = cio_start(sch, sense_ccw, 0xff);
if (rc == -ENODEV || rc == -EACCES)
dev_fsm_event(cdev, DEV_EVENT_VERIFY);
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h
index 0b8f381bd20..d72ae4c93af 100644
--- a/drivers/s390/cio/io_sch.h
+++ b/drivers/s390/cio/io_sch.h
@@ -1,7 +1,10 @@
#ifndef S390_IO_SCH_H
#define S390_IO_SCH_H
+#include <linux/types.h>
#include <asm/schid.h>
+#include <asm/ccwdev.h>
+#include "css.h"
/*
* command-mode operation request block
@@ -68,6 +71,52 @@ struct io_subchannel_private {
#define MAX_CIWS 8
/*
+ * Possible status values for a CCW request's I/O.
+ */
+enum io_status {
+ IO_DONE,
+ IO_RUNNING,
+ IO_STATUS_ERROR,
+ IO_PATH_ERROR,
+ IO_REJECTED,
+ IO_KILLED
+};
+
+/**
+ * ccw_request - Internal CCW request.
+ * @cp: channel program to start
+ * @timeout: maximum allowable time in jiffies between start I/O and interrupt
+ * @maxretries: number of retries per I/O operation and path
+ * @lpm: mask of paths to use
+ * @check: optional callback that determines if results are final
+ * @filter: optional callback to adjust request status based on IRB data
+ * @callback: final callback
+ * @data: user-defined pointer passed to all callbacks
+ * @mask: current path mask
+ * @retries: current number of retries
+ * @drc: delayed return code
+ * @cancel: non-zero if request was cancelled
+ * @done: non-zero if request was finished
+ */
+struct ccw_request {
+ struct ccw1 *cp;
+ unsigned long timeout;
+ u16 maxretries;
+ u8 lpm;
+ int (*check)(struct ccw_device *, void *);
+ enum io_status (*filter)(struct ccw_device *, void *, struct irb *,
+ enum io_status);
+ void (*callback)(struct ccw_device *, void *, int);
+ void *data;
+ /* These fields are used internally. */
+ u16 mask;
+ u16 retries;
+ int drc;
+ int cancel:1;
+ int done:1;
+} __attribute__((packed));
+
+/*
* sense-id response buffer layout
*/
struct senseid {
@@ -82,32 +131,43 @@ struct senseid {
struct ciw ciw[MAX_CIWS]; /* variable # of CIWs */
} __attribute__ ((packed, aligned(4)));
+enum cdev_todo {
+ CDEV_TODO_NOTHING,
+ CDEV_TODO_ENABLE_CMF,
+ CDEV_TODO_REBIND,
+ CDEV_TODO_REGISTER,
+ CDEV_TODO_UNREG,
+ CDEV_TODO_UNREG_EVAL,
+};
+
struct ccw_device_private {
struct ccw_device *cdev;
struct subchannel *sch;
int state; /* device state */
atomic_t onoff;
- unsigned long registered;
struct ccw_dev_id dev_id; /* device id */
struct subchannel_id schid; /* subchannel number */
- u8 imask; /* lpm mask for SNID/SID/SPGID */
- int iretry; /* retry counter SNID/SID/SPGID */
+ struct ccw_request req; /* internal I/O request */
+ int iretry;
+ u8 pgid_valid_mask; /* mask of valid PGIDs */
struct {
unsigned int fast:1; /* post with "channel end" */
unsigned int repall:1; /* report every interrupt status */
unsigned int pgroup:1; /* do path grouping */
unsigned int force:1; /* allow forced online */
+ unsigned int mpath:1; /* do multipathing */
} __attribute__ ((packed)) options;
struct {
- unsigned int pgid_single:1; /* use single path for Set PGID */
unsigned int esid:1; /* Ext. SenseID supported by HW */
unsigned int dosense:1; /* delayed SENSE required */
unsigned int doverify:1; /* delayed path verification */
unsigned int donotify:1; /* call notify function */
unsigned int recog_done:1; /* dev. recog. complete */
unsigned int fake_irb:1; /* deliver faked irb */
- unsigned int intretry:1; /* retry internal operation */
unsigned int resuming:1; /* recognition while resume */
+ unsigned int pgroup:1; /* pathgroup is set up */
+ unsigned int mpath:1; /* multipathing is set up */
+ unsigned int initialized:1; /* set if initial reference held */
} __attribute__((packed)) flags;
unsigned long intparm; /* user interruption parameter */
struct qdio_irq *qdio_data;
@@ -115,7 +175,8 @@ struct ccw_device_private {
struct senseid senseid; /* SenseID info */
struct pgid pgid[8]; /* path group IDs per chpid*/
struct ccw1 iccws[2]; /* ccws for SNID/SID/SPGID commands */
- struct work_struct kick_work;
+ struct work_struct todo_work;
+ enum cdev_todo todo;
wait_queue_head_t wait_q;
struct timer_list timer;
void *cmb; /* measurement information */
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 1294876bf7b..20836eff88c 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -102,6 +102,7 @@ static atomic_t ap_poll_requests = ATOMIC_INIT(0);
static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait);
static struct task_struct *ap_poll_kthread = NULL;
static DEFINE_MUTEX(ap_poll_thread_mutex);
+static DEFINE_SPINLOCK(ap_poll_timer_lock);
static void *ap_interrupt_indicator;
static struct hrtimer ap_poll_timer;
/* In LPAR poll with 4kHz frequency. Poll every 250000 nanoseconds.
@@ -282,6 +283,7 @@ static int ap_queue_enable_interruption(ap_qid_t qid, void *ind)
* @psmid: The program supplied message identifier
* @msg: The message text
* @length: The message length
+ * @special: Special Bit
*
* Returns AP queue status structure.
* Condition code 1 on NQAP can't happen because the L bit is 1.
@@ -289,7 +291,8 @@ static int ap_queue_enable_interruption(ap_qid_t qid, void *ind)
* because a segment boundary was reached. The NQAP is repeated.
*/
static inline struct ap_queue_status
-__ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
+__ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length,
+ unsigned int special)
{
typedef struct { char _[length]; } msgblock;
register unsigned long reg0 asm ("0") = qid | 0x40000000UL;
@@ -299,6 +302,9 @@ __ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32);
register unsigned long reg5 asm ("5") = (unsigned int) psmid;
+ if (special == 1)
+ reg0 |= 0x400000UL;
+
asm volatile (
"0: .long 0xb2ad0042\n" /* DQAP */
" brc 2,0b"
@@ -312,13 +318,15 @@ int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
{
struct ap_queue_status status;
- status = __ap_send(qid, psmid, msg, length);
+ status = __ap_send(qid, psmid, msg, length, 0);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
return 0;
case AP_RESPONSE_Q_FULL:
case AP_RESPONSE_RESET_IN_PROGRESS:
return -EBUSY;
+ case AP_RESPONSE_REQ_FAC_NOT_INST:
+ return -EINVAL;
default: /* Device is gone. */
return -ENODEV;
}
@@ -1008,7 +1016,7 @@ static int ap_probe_device_type(struct ap_device *ap_dev)
}
status = __ap_send(ap_dev->qid, 0x0102030405060708ULL,
- msg, sizeof(msg));
+ msg, sizeof(msg), 0);
if (status.response_code != AP_RESPONSE_NORMAL) {
rc = -ENODEV;
goto out_free;
@@ -1163,16 +1171,19 @@ ap_config_timeout(unsigned long ptr)
static inline void ap_schedule_poll_timer(void)
{
ktime_t hr_time;
+
+ spin_lock_bh(&ap_poll_timer_lock);
if (ap_using_interrupts() || ap_suspend_flag)
- return;
+ goto out;
if (hrtimer_is_queued(&ap_poll_timer))
- return;
+ goto out;
if (ktime_to_ns(hrtimer_expires_remaining(&ap_poll_timer)) <= 0) {
hr_time = ktime_set(0, poll_timeout);
hrtimer_forward_now(&ap_poll_timer, hr_time);
hrtimer_restart(&ap_poll_timer);
}
- return;
+out:
+ spin_unlock_bh(&ap_poll_timer_lock);
}
/**
@@ -1243,7 +1254,7 @@ static int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags)
/* Start the next request on the queue. */
ap_msg = list_entry(ap_dev->requestq.next, struct ap_message, list);
status = __ap_send(ap_dev->qid, ap_msg->psmid,
- ap_msg->message, ap_msg->length);
+ ap_msg->message, ap_msg->length, ap_msg->special);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
atomic_inc(&ap_poll_requests);
@@ -1261,6 +1272,7 @@ static int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags)
*flags |= 2;
break;
case AP_RESPONSE_MESSAGE_TOO_BIG:
+ case AP_RESPONSE_REQ_FAC_NOT_INST:
return -EINVAL;
default:
return -ENODEV;
@@ -1302,7 +1314,8 @@ static int __ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_ms
if (list_empty(&ap_dev->requestq) &&
ap_dev->queue_count < ap_dev->queue_depth) {
status = __ap_send(ap_dev->qid, ap_msg->psmid,
- ap_msg->message, ap_msg->length);
+ ap_msg->message, ap_msg->length,
+ ap_msg->special);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
list_add_tail(&ap_msg->list, &ap_dev->pendingq);
@@ -1317,6 +1330,7 @@ static int __ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_ms
ap_dev->requestq_count++;
ap_dev->total_request_count++;
return -EBUSY;
+ case AP_RESPONSE_REQ_FAC_NOT_INST:
case AP_RESPONSE_MESSAGE_TOO_BIG:
ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-EINVAL));
return -EINVAL;
@@ -1658,6 +1672,7 @@ int __init ap_module_init(void)
*/
if (MACHINE_IS_VM)
poll_timeout = 1500000;
+ spin_lock_init(&ap_poll_timer_lock);
hrtimer_init(&ap_poll_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
ap_poll_timer.function = ap_poll_timeout;
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index a3536224180..4785d07cd44 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -87,6 +87,7 @@ struct ap_queue_status {
#define AP_RESPONSE_INDEX_TOO_BIG 0x11
#define AP_RESPONSE_NO_FIRST_PART 0x13
#define AP_RESPONSE_MESSAGE_TOO_BIG 0x15
+#define AP_RESPONSE_REQ_FAC_NOT_INST 0x16
/*
* Known device types
@@ -96,8 +97,8 @@ struct ap_queue_status {
#define AP_DEVICE_TYPE_PCIXCC 5
#define AP_DEVICE_TYPE_CEX2A 6
#define AP_DEVICE_TYPE_CEX2C 7
-#define AP_DEVICE_TYPE_CEX2A2 8
-#define AP_DEVICE_TYPE_CEX2C2 9
+#define AP_DEVICE_TYPE_CEX3A 8
+#define AP_DEVICE_TYPE_CEX3C 9
/*
* AP reset flag states
@@ -161,12 +162,25 @@ struct ap_message {
size_t length; /* Message length. */
void *private; /* ap driver private pointer. */
+ unsigned int special:1; /* Used for special commands. */
};
#define AP_DEVICE(dt) \
.dev_type=(dt), \
.match_flags=AP_DEVICE_ID_MATCH_DEVICE_TYPE,
+/**
+ * ap_init_message() - Initialize ap_message.
+ * Initialize a message before using. Otherwise this might result in
+ * unexpected behaviour.
+ */
+static inline void ap_init_message(struct ap_message *ap_msg)
+{
+ ap_msg->psmid = 0;
+ ap_msg->length = 0;
+ ap_msg->special = 0;
+}
+
/*
* Note: don't use ap_send/ap_recv after using ap_queue_message
* for the first time. Otherwise the ap message queue will get
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 65b6a96afe6..0d4d18bdd45 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -299,9 +299,7 @@ static ssize_t zcrypt_write(struct file *filp, const char __user *buf,
*/
static int zcrypt_open(struct inode *inode, struct file *filp)
{
- lock_kernel();
atomic_inc(&zcrypt_open_count);
- unlock_kernel();
return 0;
}
@@ -1009,6 +1007,10 @@ static int zcrypt_status_read(char *resp_buff, char **start, off_t offset,
zcrypt_count_type(ZCRYPT_CEX2C));
len += sprintf(resp_buff + len, "CEX2A count: %d\n",
zcrypt_count_type(ZCRYPT_CEX2A));
+ len += sprintf(resp_buff + len, "CEX3C count: %d\n",
+ zcrypt_count_type(ZCRYPT_CEX3C));
+ len += sprintf(resp_buff + len, "CEX3A count: %d\n",
+ zcrypt_count_type(ZCRYPT_CEX3A));
len += sprintf(resp_buff + len, "requestq count: %d\n",
zcrypt_requestq_count());
len += sprintf(resp_buff + len, "pendingq count: %d\n",
@@ -1017,7 +1019,7 @@ static int zcrypt_status_read(char *resp_buff, char **start, off_t offset,
atomic_read(&zcrypt_open_count));
zcrypt_status_mask(workarea);
len += sprinthx("Online devices: 1=PCICA 2=PCICC 3=PCIXCC(MCL2) "
- "4=PCIXCC(MCL3) 5=CEX2C 6=CEX2A",
+ "4=PCIXCC(MCL3) 5=CEX2C 6=CEX2A 7=CEX3C 8=CEX3A",
resp_buff+len, workarea, AP_DEVICES);
zcrypt_qdepth_mask(workarea);
len += sprinthx("Waiting work element counts",
@@ -1095,8 +1097,9 @@ static int zcrypt_status_write(struct file *file, const char __user *buffer,
* '0' for no device, '1' for PCICA, '2' for PCICC,
* '3' for PCIXCC_MCL2, '4' for PCIXCC_MCL3,
* '5' for CEX2C and '6' for CEX2A'
+ * '7' for CEX3C and '8' for CEX3A
*/
- if (*ptr >= '0' && *ptr <= '6')
+ if (*ptr >= '0' && *ptr <= '8')
j++;
else if (*ptr == 'd' || *ptr == 'D')
zcrypt_disable_card(j++);
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h
index 1d1ec74dadb..8e7ffbf2466 100644
--- a/drivers/s390/crypto/zcrypt_api.h
+++ b/drivers/s390/crypto/zcrypt_api.h
@@ -71,6 +71,8 @@ struct ica_z90_status {
#define ZCRYPT_PCIXCC_MCL3 4
#define ZCRYPT_CEX2C 5
#define ZCRYPT_CEX2A 6
+#define ZCRYPT_CEX3C 7
+#define ZCRYPT_CEX3A 8
/**
* Large random numbers are pulled in 4096 byte chunks from the crypto cards
diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c
index 326ea08f67c..c6fb0aa8950 100644
--- a/drivers/s390/crypto/zcrypt_cex2a.c
+++ b/drivers/s390/crypto/zcrypt_cex2a.c
@@ -39,17 +39,24 @@
#define CEX2A_MIN_MOD_SIZE 1 /* 8 bits */
#define CEX2A_MAX_MOD_SIZE 256 /* 2048 bits */
+#define CEX3A_MIN_MOD_SIZE CEX2A_MIN_MOD_SIZE
+#define CEX3A_MAX_MOD_SIZE CEX2A_MAX_MOD_SIZE
#define CEX2A_SPEED_RATING 970
+#define CEX3A_SPEED_RATING 900 /* Fixme: Needs finetuning */
#define CEX2A_MAX_MESSAGE_SIZE 0x390 /* sizeof(struct type50_crb2_msg) */
#define CEX2A_MAX_RESPONSE_SIZE 0x110 /* max outputdatalength + type80_hdr */
+#define CEX3A_MAX_MESSAGE_SIZE CEX2A_MAX_MESSAGE_SIZE
+#define CEX3A_MAX_RESPONSE_SIZE CEX2A_MAX_RESPONSE_SIZE
+
#define CEX2A_CLEANUP_TIME (15*HZ)
+#define CEX3A_CLEANUP_TIME CEX2A_CLEANUP_TIME
static struct ap_device_id zcrypt_cex2a_ids[] = {
{ AP_DEVICE(AP_DEVICE_TYPE_CEX2A) },
- { AP_DEVICE(AP_DEVICE_TYPE_CEX2A2) },
+ { AP_DEVICE(AP_DEVICE_TYPE_CEX3A) },
{ /* end of list */ },
};
@@ -298,6 +305,7 @@ static long zcrypt_cex2a_modexpo(struct zcrypt_device *zdev,
struct completion work;
int rc;
+ ap_init_message(&ap_msg);
ap_msg.message = kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL);
if (!ap_msg.message)
return -ENOMEM;
@@ -335,6 +343,7 @@ static long zcrypt_cex2a_modexpo_crt(struct zcrypt_device *zdev,
struct completion work;
int rc;
+ ap_init_message(&ap_msg);
ap_msg.message = kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL);
if (!ap_msg.message)
return -ENOMEM;
@@ -373,31 +382,45 @@ static struct zcrypt_ops zcrypt_cex2a_ops = {
*/
static int zcrypt_cex2a_probe(struct ap_device *ap_dev)
{
- struct zcrypt_device *zdev;
- int rc;
-
- zdev = zcrypt_device_alloc(CEX2A_MAX_RESPONSE_SIZE);
- if (!zdev)
- return -ENOMEM;
- zdev->ap_dev = ap_dev;
- zdev->ops = &zcrypt_cex2a_ops;
- zdev->online = 1;
- zdev->user_space_type = ZCRYPT_CEX2A;
- zdev->type_string = "CEX2A";
- zdev->min_mod_size = CEX2A_MIN_MOD_SIZE;
- zdev->max_mod_size = CEX2A_MAX_MOD_SIZE;
- zdev->short_crt = 1;
- zdev->speed_rating = CEX2A_SPEED_RATING;
- ap_dev->reply = &zdev->reply;
- ap_dev->private = zdev;
- rc = zcrypt_device_register(zdev);
- if (rc)
- goto out_free;
- return 0;
-
-out_free:
- ap_dev->private = NULL;
- zcrypt_device_free(zdev);
+ struct zcrypt_device *zdev = NULL;
+ int rc = 0;
+
+ switch (ap_dev->device_type) {
+ case AP_DEVICE_TYPE_CEX2A:
+ zdev = zcrypt_device_alloc(CEX2A_MAX_RESPONSE_SIZE);
+ if (!zdev)
+ return -ENOMEM;
+ zdev->user_space_type = ZCRYPT_CEX2A;
+ zdev->type_string = "CEX2A";
+ zdev->min_mod_size = CEX2A_MIN_MOD_SIZE;
+ zdev->max_mod_size = CEX2A_MAX_MOD_SIZE;
+ zdev->short_crt = 1;
+ zdev->speed_rating = CEX2A_SPEED_RATING;
+ break;
+ case AP_DEVICE_TYPE_CEX3A:
+ zdev = zcrypt_device_alloc(CEX3A_MAX_RESPONSE_SIZE);
+ if (!zdev)
+ return -ENOMEM;
+ zdev->user_space_type = ZCRYPT_CEX3A;
+ zdev->type_string = "CEX3A";
+ zdev->min_mod_size = CEX3A_MIN_MOD_SIZE;
+ zdev->max_mod_size = CEX3A_MAX_MOD_SIZE;
+ zdev->short_crt = 1;
+ zdev->speed_rating = CEX3A_SPEED_RATING;
+ break;
+ }
+ if (zdev != NULL) {
+ zdev->ap_dev = ap_dev;
+ zdev->ops = &zcrypt_cex2a_ops;
+ zdev->online = 1;
+ ap_dev->reply = &zdev->reply;
+ ap_dev->private = zdev;
+ rc = zcrypt_device_register(zdev);
+ }
+ if (rc) {
+ ap_dev->private = NULL;
+ zcrypt_device_free(zdev);
+ }
return rc;
}
diff --git a/drivers/s390/crypto/zcrypt_pcica.c b/drivers/s390/crypto/zcrypt_pcica.c
index 17ba81b58c7..e78df3671ca 100644
--- a/drivers/s390/crypto/zcrypt_pcica.c
+++ b/drivers/s390/crypto/zcrypt_pcica.c
@@ -281,6 +281,7 @@ static long zcrypt_pcica_modexpo(struct zcrypt_device *zdev,
struct completion work;
int rc;
+ ap_init_message(&ap_msg);
ap_msg.message = kmalloc(PCICA_MAX_MESSAGE_SIZE, GFP_KERNEL);
if (!ap_msg.message)
return -ENOMEM;
@@ -318,6 +319,7 @@ static long zcrypt_pcica_modexpo_crt(struct zcrypt_device *zdev,
struct completion work;
int rc;
+ ap_init_message(&ap_msg);
ap_msg.message = kmalloc(PCICA_MAX_MESSAGE_SIZE, GFP_KERNEL);
if (!ap_msg.message)
return -ENOMEM;
diff --git a/drivers/s390/crypto/zcrypt_pcicc.c b/drivers/s390/crypto/zcrypt_pcicc.c
index f4b0c479543..a23726a0735 100644
--- a/drivers/s390/crypto/zcrypt_pcicc.c
+++ b/drivers/s390/crypto/zcrypt_pcicc.c
@@ -483,6 +483,7 @@ static long zcrypt_pcicc_modexpo(struct zcrypt_device *zdev,
struct completion work;
int rc;
+ ap_init_message(&ap_msg);
ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
if (!ap_msg.message)
return -ENOMEM;
@@ -521,6 +522,7 @@ static long zcrypt_pcicc_modexpo_crt(struct zcrypt_device *zdev,
struct completion work;
int rc;
+ ap_init_message(&ap_msg);
ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
if (!ap_msg.message)
return -ENOMEM;
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c
index 5677b40e4ac..79c120578e6 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.c
+++ b/drivers/s390/crypto/zcrypt_pcixcc.c
@@ -43,10 +43,13 @@
#define PCIXCC_MIN_MOD_SIZE 16 /* 128 bits */
#define PCIXCC_MIN_MOD_SIZE_OLD 64 /* 512 bits */
#define PCIXCC_MAX_MOD_SIZE 256 /* 2048 bits */
+#define CEX3C_MIN_MOD_SIZE PCIXCC_MIN_MOD_SIZE
+#define CEX3C_MAX_MOD_SIZE PCIXCC_MAX_MOD_SIZE
-#define PCIXCC_MCL2_SPEED_RATING 7870 /* FIXME: needs finetuning */
+#define PCIXCC_MCL2_SPEED_RATING 7870
#define PCIXCC_MCL3_SPEED_RATING 7870
-#define CEX2C_SPEED_RATING 8540
+#define CEX2C_SPEED_RATING 7000
+#define CEX3C_SPEED_RATING 6500 /* FIXME: needs finetuning */
#define PCIXCC_MAX_ICA_MESSAGE_SIZE 0x77c /* max size type6 v2 crt message */
#define PCIXCC_MAX_ICA_RESPONSE_SIZE 0x77c /* max size type86 v2 reply */
@@ -72,7 +75,7 @@ struct response_type {
static struct ap_device_id zcrypt_pcixcc_ids[] = {
{ AP_DEVICE(AP_DEVICE_TYPE_PCIXCC) },
{ AP_DEVICE(AP_DEVICE_TYPE_CEX2C) },
- { AP_DEVICE(AP_DEVICE_TYPE_CEX2C2) },
+ { AP_DEVICE(AP_DEVICE_TYPE_CEX3C) },
{ /* end of list */ },
};
@@ -326,6 +329,11 @@ static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev,
function_code = ((unsigned char *)&msg->cprbx) + msg->cprbx.cprb_len;
memcpy(msg->hdr.function_code, function_code, sizeof(msg->hdr.function_code));
+ if (memcmp(function_code, "US", 2) == 0)
+ ap_msg->special = 1;
+ else
+ ap_msg->special = 0;
+
/* copy data block */
if (xcRB->request_data_length &&
copy_from_user(req_data, xcRB->request_data_address,
@@ -688,6 +696,7 @@ static long zcrypt_pcixcc_modexpo(struct zcrypt_device *zdev,
};
int rc;
+ ap_init_message(&ap_msg);
ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
if (!ap_msg.message)
return -ENOMEM;
@@ -727,6 +736,7 @@ static long zcrypt_pcixcc_modexpo_crt(struct zcrypt_device *zdev,
};
int rc;
+ ap_init_message(&ap_msg);
ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
if (!ap_msg.message)
return -ENOMEM;
@@ -766,6 +776,7 @@ static long zcrypt_pcixcc_send_cprb(struct zcrypt_device *zdev,
};
int rc;
+ ap_init_message(&ap_msg);
ap_msg.message = kmalloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE, GFP_KERNEL);
if (!ap_msg.message)
return -ENOMEM;
@@ -805,6 +816,7 @@ static long zcrypt_pcixcc_rng(struct zcrypt_device *zdev,
};
int rc;
+ ap_init_message(&ap_msg);
ap_msg.message = kmalloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE, GFP_KERNEL);
if (!ap_msg.message)
return -ENOMEM;
@@ -972,6 +984,7 @@ static int zcrypt_pcixcc_rng_supported(struct ap_device *ap_dev)
} __attribute__((packed)) *reply;
int rc, i;
+ ap_init_message(&ap_msg);
ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
if (!ap_msg.message)
return -ENOMEM;
@@ -1016,14 +1029,15 @@ out_free:
static int zcrypt_pcixcc_probe(struct ap_device *ap_dev)
{
struct zcrypt_device *zdev;
- int rc;
+ int rc = 0;
zdev = zcrypt_device_alloc(PCIXCC_MAX_RESPONSE_SIZE);
if (!zdev)
return -ENOMEM;
zdev->ap_dev = ap_dev;
zdev->online = 1;
- if (ap_dev->device_type == AP_DEVICE_TYPE_PCIXCC) {
+ switch (ap_dev->device_type) {
+ case AP_DEVICE_TYPE_PCIXCC:
rc = zcrypt_pcixcc_mcl(ap_dev);
if (rc < 0) {
zcrypt_device_free(zdev);
@@ -1041,13 +1055,25 @@ static int zcrypt_pcixcc_probe(struct ap_device *ap_dev)
zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE;
zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE;
}
- } else {
+ break;
+ case AP_DEVICE_TYPE_CEX2C:
zdev->user_space_type = ZCRYPT_CEX2C;
zdev->type_string = "CEX2C";
zdev->speed_rating = CEX2C_SPEED_RATING;
zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE;
zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE;
+ break;
+ case AP_DEVICE_TYPE_CEX3C:
+ zdev->user_space_type = ZCRYPT_CEX3C;
+ zdev->type_string = "CEX3C";
+ zdev->speed_rating = CEX3C_SPEED_RATING;
+ zdev->min_mod_size = CEX3C_MIN_MOD_SIZE;
+ zdev->max_mod_size = CEX3C_MAX_MOD_SIZE;
+ break;
+ default:
+ goto out_free;
}
+
rc = zcrypt_pcixcc_rng_supported(ap_dev);
if (rc < 0) {
zcrypt_device_free(zdev);
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index 50943ff78f4..9ff47db0b2c 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -996,7 +996,7 @@ config SERIAL_IP22_ZILOG_CONSOLE
config SERIAL_SH_SCI
tristate "SuperH SCI(F) serial port support"
- depends on SUPERH || H8300
+ depends on HAVE_CLK && (SUPERH || H8300)
select SERIAL_CORE
config SERIAL_SH_SCI_NR_UARTS
diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c
index 6498bd1fb6d..ff38dbdb5c6 100644
--- a/drivers/serial/sh-sci.c
+++ b/drivers/serial/sh-sci.c
@@ -50,7 +50,6 @@
#include <linux/list.h>
#ifdef CONFIG_SUPERH
-#include <asm/clock.h>
#include <asm/sh_bios.h>
#endif
@@ -79,22 +78,18 @@ struct sci_port {
struct timer_list break_timer;
int break_flag;
-#ifdef CONFIG_HAVE_CLK
/* Interface clock */
struct clk *iclk;
/* Data clock */
struct clk *dclk;
-#endif
+
struct list_head node;
};
struct sh_sci_priv {
spinlock_t lock;
struct list_head ports;
-
-#ifdef CONFIG_HAVE_CLK
struct notifier_block clk_nb;
-#endif
};
/* Function prototypes */
@@ -156,32 +151,6 @@ static void sci_poll_put_char(struct uart_port *port, unsigned char c)
}
#endif /* CONFIG_CONSOLE_POLL || CONFIG_SERIAL_SH_SCI_CONSOLE */
-#if defined(__H8300S__)
-enum { sci_disable, sci_enable };
-
-static void h8300_sci_config(struct uart_port *port, unsigned int ctrl)
-{
- volatile unsigned char *mstpcrl = (volatile unsigned char *)MSTPCRL;
- int ch = (port->mapbase - SMR0) >> 3;
- unsigned char mask = 1 << (ch+1);
-
- if (ctrl == sci_disable)
- *mstpcrl |= mask;
- else
- *mstpcrl &= ~mask;
-}
-
-static void h8300_sci_enable(struct uart_port *port)
-{
- h8300_sci_config(port, sci_enable);
-}
-
-static void h8300_sci_disable(struct uart_port *port)
-{
- h8300_sci_config(port, sci_disable);
-}
-#endif
-
#if defined(__H8300H__) || defined(__H8300S__)
static void sci_init_pins(struct uart_port *port, unsigned int cflag)
{
@@ -733,7 +702,6 @@ static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr)
return ret;
}
-#ifdef CONFIG_HAVE_CLK
/*
* Here we define a transistion notifier so that we can update all of our
* ports' baud rate when the peripheral clock changes.
@@ -751,7 +719,6 @@ static int sci_notifier(struct notifier_block *self,
spin_lock_irqsave(&priv->lock, flags);
list_for_each_entry(sci_port, &priv->ports, node)
sci_port->port.uartclk = clk_get_rate(sci_port->dclk);
-
spin_unlock_irqrestore(&priv->lock, flags);
}
@@ -778,7 +745,6 @@ static void sci_clk_disable(struct uart_port *port)
clk_disable(sci_port->dclk);
}
-#endif
static int sci_request_irq(struct sci_port *port)
{
@@ -833,8 +799,8 @@ static void sci_free_irq(struct sci_port *port)
static unsigned int sci_tx_empty(struct uart_port *port)
{
- /* Can't detect */
- return TIOCSER_TEMT;
+ unsigned short status = sci_in(port, SCxSR);
+ return status & SCxSR_TEND(port) ? TIOCSER_TEMT : 0;
}
static void sci_set_mctrl(struct uart_port *port, unsigned int mctrl)
@@ -1077,21 +1043,10 @@ static void __devinit sci_init_single(struct platform_device *dev,
sci_port->port.iotype = UPIO_MEM;
sci_port->port.line = index;
sci_port->port.fifosize = 1;
-
-#if defined(__H8300H__) || defined(__H8300S__)
-#ifdef __H8300S__
- sci_port->enable = h8300_sci_enable;
- sci_port->disable = h8300_sci_disable;
-#endif
- sci_port->port.uartclk = CONFIG_CPU_CLOCK;
-#elif defined(CONFIG_HAVE_CLK)
sci_port->iclk = p->clk ? clk_get(&dev->dev, p->clk) : NULL;
sci_port->dclk = clk_get(&dev->dev, "peripheral_clk");
sci_port->enable = sci_clk_enable;
sci_port->disable = sci_clk_disable;
-#else
-#error "Need a valid uartclk"
-#endif
sci_port->break_timer.data = (unsigned long)sci_port;
sci_port->break_timer.function = sci_break_timer;
@@ -1106,7 +1061,6 @@ static void __devinit sci_init_single(struct platform_device *dev,
sci_port->type = sci_port->port.type = p->type;
memcpy(&sci_port->irqs, &p->irqs, sizeof(p->irqs));
-
}
#ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
@@ -1239,14 +1193,11 @@ static int sci_remove(struct platform_device *dev)
struct sci_port *p;
unsigned long flags;
-#ifdef CONFIG_HAVE_CLK
cpufreq_unregister_notifier(&priv->clk_nb, CPUFREQ_TRANSITION_NOTIFIER);
-#endif
spin_lock_irqsave(&priv->lock, flags);
list_for_each_entry(p, &priv->ports, node)
uart_remove_one_port(&sci_uart_driver, &p->port);
-
spin_unlock_irqrestore(&priv->lock, flags);
kfree(priv);
@@ -1307,10 +1258,8 @@ static int __devinit sci_probe(struct platform_device *dev)
spin_lock_init(&priv->lock);
platform_set_drvdata(dev, priv);
-#ifdef CONFIG_HAVE_CLK
priv->clk_nb.notifier_call = sci_notifier;
cpufreq_register_notifier(&priv->clk_nb, CPUFREQ_TRANSITION_NOTIFIER);
-#endif
if (dev->id != -1) {
ret = sci_probe_single(dev, dev->id, p, &sci_ports[dev->id]);
@@ -1370,7 +1319,7 @@ static struct dev_pm_ops sci_dev_pm_ops = {
static struct platform_driver sci_driver = {
.probe = sci_probe,
- .remove = __devexit_p(sci_remove),
+ .remove = sci_remove,
.driver = {
.name = "sh-sci",
.owner = THIS_MODULE,
diff --git a/drivers/serial/sh-sci.h b/drivers/serial/sh-sci.h
index 3e2fcf93b42..a32094eeb42 100644
--- a/drivers/serial/sh-sci.h
+++ b/drivers/serial/sh-sci.h
@@ -1,5 +1,5 @@
#include <linux/serial_core.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/gpio.h>
#if defined(CONFIG_H83007) || defined(CONFIG_H83068)
diff --git a/drivers/sh/Makefile b/drivers/sh/Makefile
index 6a025cefe6d..4956bf1f213 100644
--- a/drivers/sh/Makefile
+++ b/drivers/sh/Makefile
@@ -3,4 +3,5 @@
#
obj-$(CONFIG_SUPERHYWAY) += superhyway/
obj-$(CONFIG_MAPLE) += maple/
+obj-$(CONFIG_GENERIC_GPIO) += pfc.o
obj-y += intc.o
diff --git a/drivers/sh/intc.c b/drivers/sh/intc.c
index 559b5fe9dc0..a7e5c2e9986 100644
--- a/drivers/sh/intc.c
+++ b/drivers/sh/intc.c
@@ -2,6 +2,7 @@
* Shared interrupt handling code for IPR and INTC2 types of IRQs.
*
* Copyright (C) 2007, 2008 Magnus Damm
+ * Copyright (C) 2009 Paul Mundt
*
* Based on intc2.c and ipr.c
*
@@ -24,6 +25,7 @@
#include <linux/sysdev.h>
#include <linux/list.h>
#include <linux/topology.h>
+#include <linux/bitmap.h>
#define _INTC_MK(fn, mode, addr_e, addr_d, width, shift) \
((shift) | ((width) << 5) | ((fn) << 9) | ((mode) << 13) | \
@@ -59,6 +61,20 @@ struct intc_desc_int {
static LIST_HEAD(intc_list);
+/*
+ * The intc_irq_map provides a global map of bound IRQ vectors for a
+ * given platform. Allocation of IRQs are either static through the CPU
+ * vector map, or dynamic in the case of board mux vectors or MSI.
+ *
+ * As this is a central point for all IRQ controllers on the system,
+ * each of the available sources are mapped out here. This combined with
+ * sparseirq makes it quite trivial to keep the vector map tightly packed
+ * when dynamically creating IRQs, as well as tying in to otherwise
+ * unused irq_desc positions in the sparse array.
+ */
+static DECLARE_BITMAP(intc_irq_map, NR_IRQS);
+static DEFINE_SPINLOCK(vector_lock);
+
#ifdef CONFIG_SMP
#define IS_SMP(x) x.smp
#define INTC_REG(d, x, c) (d->reg[(x)] + ((d->smp[(x)] & 0xff) * c))
@@ -70,9 +86,7 @@ static LIST_HEAD(intc_list);
#endif
static unsigned int intc_prio_level[NR_IRQS]; /* for now */
-#if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A)
static unsigned long ack_handle[NR_IRQS];
-#endif
static inline struct intc_desc_int *get_intc_desc(unsigned int irq)
{
@@ -250,7 +264,6 @@ static int intc_set_wake(unsigned int irq, unsigned int on)
return 0; /* allow wakeup, but setup hardware in intc_suspend() */
}
-#if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A)
static void intc_mask_ack(unsigned int irq)
{
struct intc_desc_int *d = get_intc_desc(irq);
@@ -282,7 +295,6 @@ static void intc_mask_ack(unsigned int irq)
}
}
}
-#endif
static struct intc_handle_int *intc_find_irq(struct intc_handle_int *hp,
unsigned int nr_hp,
@@ -501,7 +513,6 @@ static unsigned int __init intc_prio_data(struct intc_desc *desc,
return 0;
}
-#if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A)
static unsigned int __init intc_ack_data(struct intc_desc *desc,
struct intc_desc_int *d,
intc_enum enum_id)
@@ -533,7 +544,6 @@ static unsigned int __init intc_ack_data(struct intc_desc *desc,
return 0;
}
-#endif
static unsigned int __init intc_sense_data(struct intc_desc *desc,
struct intc_desc_int *d,
@@ -572,6 +582,11 @@ static void __init intc_register_irq(struct intc_desc *desc,
struct intc_handle_int *hp;
unsigned int data[2], primary;
+ /*
+ * Register the IRQ position with the global IRQ map
+ */
+ set_bit(irq, intc_irq_map);
+
/* Prefer single interrupt source bitmap over other combinations:
* 1. bitmap, single interrupt source
* 2. priority, single interrupt source
@@ -641,10 +656,8 @@ static void __init intc_register_irq(struct intc_desc *desc,
/* irq should be disabled by default */
d->chip.mask(irq);
-#if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A)
if (desc->ack_regs)
ack_handle[irq] = intc_ack_data(desc, d, enum_id);
-#endif
}
static unsigned int __init save_reg(struct intc_desc_int *d,
@@ -681,10 +694,8 @@ void __init register_intc_controller(struct intc_desc *desc)
d->nr_reg = desc->mask_regs ? desc->nr_mask_regs * 2 : 0;
d->nr_reg += desc->prio_regs ? desc->nr_prio_regs * 2 : 0;
d->nr_reg += desc->sense_regs ? desc->nr_sense_regs : 0;
-
-#if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A)
d->nr_reg += desc->ack_regs ? desc->nr_ack_regs : 0;
-#endif
+
d->reg = kzalloc(d->nr_reg * sizeof(*d->reg), GFP_NOWAIT);
#ifdef CONFIG_SMP
d->smp = kzalloc(d->nr_reg * sizeof(*d->smp), GFP_NOWAIT);
@@ -727,14 +738,12 @@ void __init register_intc_controller(struct intc_desc *desc)
d->chip.set_type = intc_set_sense;
d->chip.set_wake = intc_set_wake;
-#if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A)
if (desc->ack_regs) {
for (i = 0; i < desc->nr_ack_regs; i++)
k += save_reg(d, k, desc->ack_regs[i].set_reg, 0);
d->chip.mask_ack = intc_mask_ack;
}
-#endif
BUG_ON(k > 256); /* _INTC_ADDR_E() and _INTC_ADDR_D() are 8 bits */
@@ -856,5 +865,91 @@ static int __init register_intc_sysdevs(void)
return error;
}
-
device_initcall(register_intc_sysdevs);
+
+/*
+ * Dynamic IRQ allocation and deallocation
+ */
+static unsigned int create_irq_on_node(unsigned int irq_want, int node)
+{
+ unsigned int irq = 0, new;
+ unsigned long flags;
+ struct irq_desc *desc;
+
+ spin_lock_irqsave(&vector_lock, flags);
+
+ /*
+ * First try the wanted IRQ, then scan.
+ */
+ if (test_and_set_bit(irq_want, intc_irq_map)) {
+ new = find_first_zero_bit(intc_irq_map, nr_irqs);
+ if (unlikely(new == nr_irqs))
+ goto out_unlock;
+
+ desc = irq_to_desc_alloc_node(new, node);
+ if (unlikely(!desc)) {
+ pr_info("can't get irq_desc for %d\n", new);
+ goto out_unlock;
+ }
+
+ desc = move_irq_desc(desc, node);
+ __set_bit(new, intc_irq_map);
+ irq = new;
+ }
+
+out_unlock:
+ spin_unlock_irqrestore(&vector_lock, flags);
+
+ if (irq > 0)
+ dynamic_irq_init(irq);
+
+ return irq;
+}
+
+int create_irq(void)
+{
+ int nid = cpu_to_node(smp_processor_id());
+ int irq;
+
+ irq = create_irq_on_node(NR_IRQS_LEGACY, nid);
+ if (irq == 0)
+ irq = -1;
+
+ return irq;
+}
+
+void destroy_irq(unsigned int irq)
+{
+ unsigned long flags;
+
+ dynamic_irq_cleanup(irq);
+
+ spin_lock_irqsave(&vector_lock, flags);
+ __clear_bit(irq, intc_irq_map);
+ spin_unlock_irqrestore(&vector_lock, flags);
+}
+
+int reserve_irq_vector(unsigned int irq)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&vector_lock, flags);
+ if (test_and_set_bit(irq, intc_irq_map))
+ ret = -EBUSY;
+ spin_unlock_irqrestore(&vector_lock, flags);
+
+ return ret;
+}
+
+void reserve_irq_legacy(void)
+{
+ unsigned long flags;
+ int i, j;
+
+ spin_lock_irqsave(&vector_lock, flags);
+ j = find_first_bit(intc_irq_map, nr_irqs);
+ for (i = 0; i < j; i++)
+ __set_bit(i, intc_irq_map);
+ spin_unlock_irqrestore(&vector_lock, flags);
+}
diff --git a/drivers/sh/maple/maple.c b/drivers/sh/maple/maple.c
index 93c20e135ee..4e8f57d4131 100644
--- a/drivers/sh/maple/maple.c
+++ b/drivers/sh/maple/maple.c
@@ -106,7 +106,7 @@ static void maple_dma_reset(void)
* max delay is 11
*/
ctrl_outl(MAPLE_2MBPS | MAPLE_TIMEOUT(0xFFFF), MAPLE_SPEED);
- ctrl_outl(PHYSADDR(maple_sendbuf), MAPLE_DMAADDR);
+ ctrl_outl(virt_to_phys(maple_sendbuf), MAPLE_DMAADDR);
ctrl_outl(1, MAPLE_ENABLE);
}
@@ -258,7 +258,7 @@ static void maple_build_block(struct mapleq *mq)
maple_lastptr = maple_sendptr;
*maple_sendptr++ = (port << 16) | len | 0x80000000;
- *maple_sendptr++ = PHYSADDR(mq->recvbuf->buf);
+ *maple_sendptr++ = virt_to_phys(mq->recvbuf->buf);
*maple_sendptr++ =
mq->command | (to << 8) | (from << 16) | (len << 24);
while (len-- > 0)
diff --git a/arch/sh/kernel/gpio.c b/drivers/sh/pfc.c
index d22e5af699f..841ed5030c8 100644
--- a/arch/sh/kernel/gpio.c
+++ b/drivers/sh/pfc.c
@@ -7,7 +7,6 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
-
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/list.h>
@@ -35,11 +34,11 @@ static unsigned long gpio_read_raw_reg(unsigned long reg,
{
switch (reg_width) {
case 8:
- return ctrl_inb(reg);
+ return __raw_readb(reg);
case 16:
- return ctrl_inw(reg);
+ return __raw_readw(reg);
case 32:
- return ctrl_inl(reg);
+ return __raw_readl(reg);
}
BUG();
@@ -52,13 +51,13 @@ static void gpio_write_raw_reg(unsigned long reg,
{
switch (reg_width) {
case 8:
- ctrl_outb(data, reg);
+ __raw_writeb(data, reg);
return;
case 16:
- ctrl_outw(data, reg);
+ __raw_writew(data, reg);
return;
case 32:
- ctrl_outl(data, reg);
+ __raw_writel(data, reg);
return;
}
@@ -72,11 +71,9 @@ static void gpio_write_bit(struct pinmux_data_reg *dr,
pos = dr->reg_width - (in_pos + 1);
-#ifdef DEBUG
- pr_info("write_bit addr = %lx, value = %ld, pos = %ld, "
- "r_width = %ld\n",
- dr->reg, !!value, pos, dr->reg_width);
-#endif
+ pr_debug("write_bit addr = %lx, value = %ld, pos = %ld, "
+ "r_width = %ld\n",
+ dr->reg, !!value, pos, dr->reg_width);
if (value)
set_bit(pos, &dr->reg_shadow);
@@ -95,11 +92,9 @@ static int gpio_read_reg(unsigned long reg, unsigned long reg_width,
mask = (1 << field_width) - 1;
pos = reg_width - ((in_pos + 1) * field_width);
-#ifdef DEBUG
- pr_info("read_reg: addr = %lx, pos = %ld, "
- "r_width = %ld, f_width = %ld\n",
- reg, pos, reg_width, field_width);
-#endif
+ pr_debug("read_reg: addr = %lx, pos = %ld, "
+ "r_width = %ld, f_width = %ld\n",
+ reg, pos, reg_width, field_width);
data = gpio_read_raw_reg(reg, reg_width);
return (data >> pos) & mask;
@@ -114,24 +109,22 @@ static void gpio_write_reg(unsigned long reg, unsigned long reg_width,
mask = (1 << field_width) - 1;
pos = reg_width - ((in_pos + 1) * field_width);
-#ifdef DEBUG
- pr_info("write_reg addr = %lx, value = %ld, pos = %ld, "
- "r_width = %ld, f_width = %ld\n",
- reg, value, pos, reg_width, field_width);
-#endif
+ pr_debug("write_reg addr = %lx, value = %ld, pos = %ld, "
+ "r_width = %ld, f_width = %ld\n",
+ reg, value, pos, reg_width, field_width);
mask = ~(mask << pos);
value = value << pos;
switch (reg_width) {
case 8:
- ctrl_outb((ctrl_inb(reg) & mask) | value, reg);
+ __raw_writeb((__raw_readb(reg) & mask) | value, reg);
break;
case 16:
- ctrl_outw((ctrl_inw(reg) & mask) | value, reg);
+ __raw_writew((__raw_readw(reg) & mask) | value, reg);
break;
case 32:
- ctrl_outl((ctrl_inl(reg) & mask) | value, reg);
+ __raw_writel((__raw_readl(reg) & mask) | value, reg);
break;
}
}
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 5d23983f02f..815a65012cb 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -30,7 +30,6 @@
#include <linux/errno.h>
#include <linux/mutex.h>
#include <linux/slab.h>
-#include <linux/smp_lock.h>
#include <linux/spi/spi.h>
#include <linux/spi/spidev.h>
@@ -477,7 +476,6 @@ static int spidev_open(struct inode *inode, struct file *filp)
struct spidev_data *spidev;
int status = -ENXIO;
- lock_kernel();
mutex_lock(&device_list_lock);
list_for_each_entry(spidev, &device_list, device_entry) {
@@ -503,7 +501,6 @@ static int spidev_open(struct inode *inode, struct file *filp)
pr_debug("spidev: nothing for minor %d\n", iminor(inode));
mutex_unlock(&device_list_lock);
- unlock_kernel();
return status;
}
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c
index 3ad5157f989..b4b5de930cf 100644
--- a/drivers/video/sh_mobile_lcdcfb.c
+++ b/drivers/video/sh_mobile_lcdcfb.c
@@ -281,18 +281,34 @@ static void sh_mobile_lcdc_deferred_io(struct fb_info *info,
struct list_head *pagelist)
{
struct sh_mobile_lcdc_chan *ch = info->par;
- unsigned int nr_pages;
/* enable clocks before accessing hardware */
sh_mobile_lcdc_clk_on(ch->lcdc);
- nr_pages = sh_mobile_lcdc_sginit(info, pagelist);
- dma_map_sg(info->dev, ch->sglist, nr_pages, DMA_TO_DEVICE);
-
- /* trigger panel update */
- lcdc_write_chan(ch, LDSM2R, 1);
-
- dma_unmap_sg(info->dev, ch->sglist, nr_pages, DMA_TO_DEVICE);
+ /*
+ * It's possible to get here without anything on the pagelist via
+ * sh_mobile_lcdc_deferred_io_touch() or via a userspace fsync()
+ * invocation. In the former case, the acceleration routines are
+ * stepped in to when using the framebuffer console causing the
+ * workqueue to be scheduled without any dirty pages on the list.
+ *
+ * Despite this, a panel update is still needed given that the
+ * acceleration routines have their own methods for writing in
+ * that still need to be updated.
+ *
+ * The fsync() and empty pagelist case could be optimized for,
+ * but we don't bother, as any application exhibiting such
+ * behaviour is fundamentally broken anyways.
+ */
+ if (!list_empty(pagelist)) {
+ unsigned int nr_pages = sh_mobile_lcdc_sginit(info, pagelist);
+
+ /* trigger panel update */
+ dma_map_sg(info->dev, ch->sglist, nr_pages, DMA_TO_DEVICE);
+ lcdc_write_chan(ch, LDSM2R, 1);
+ dma_unmap_sg(info->dev, ch->sglist, nr_pages, DMA_TO_DEVICE);
+ } else
+ lcdc_write_chan(ch, LDSM2R, 1);
}
static void sh_mobile_lcdc_deferred_io_touch(struct fb_info *info)
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index 6a51edde6ea..e44fbb31bc6 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -1,5 +1,5 @@
/*
- * intel TCO Watchdog Driver (Used in i82801 and i63xxESB chipsets)
+ * intel TCO Watchdog Driver
*
* (c) Copyright 2006-2009 Wim Van Sebroeck <wim@iguana.be>.
*
@@ -14,47 +14,22 @@
*
* The TCO watchdog is implemented in the following I/O controller hubs:
* (See the intel documentation on http://developer.intel.com.)
- * 82801AA (ICH) : document number 290655-003, 290677-014,
- * 82801AB (ICHO) : document number 290655-003, 290677-014,
- * 82801BA (ICH2) : document number 290687-002, 298242-027,
- * 82801BAM (ICH2-M) : document number 290687-002, 298242-027,
- * 82801CA (ICH3-S) : document number 290733-003, 290739-013,
- * 82801CAM (ICH3-M) : document number 290716-001, 290718-007,
- * 82801DB (ICH4) : document number 290744-001, 290745-025,
- * 82801DBM (ICH4-M) : document number 252337-001, 252663-008,
- * 82801E (C-ICH) : document number 273599-001, 273645-002,
- * 82801EB (ICH5) : document number 252516-001, 252517-028,
- * 82801ER (ICH5R) : document number 252516-001, 252517-028,
- * 6300ESB (6300ESB) : document number 300641-004, 300884-013,
- * 82801FB (ICH6) : document number 301473-002, 301474-026,
- * 82801FR (ICH6R) : document number 301473-002, 301474-026,
- * 82801FBM (ICH6-M) : document number 301473-002, 301474-026,
- * 82801FW (ICH6W) : document number 301473-001, 301474-026,
- * 82801FRW (ICH6RW) : document number 301473-001, 301474-026,
- * 631xESB (631xESB) : document number 313082-001, 313075-006,
- * 632xESB (632xESB) : document number 313082-001, 313075-006,
- * 82801GB (ICH7) : document number 307013-003, 307014-024,
- * 82801GR (ICH7R) : document number 307013-003, 307014-024,
- * 82801GDH (ICH7DH) : document number 307013-003, 307014-024,
- * 82801GBM (ICH7-M) : document number 307013-003, 307014-024,
- * 82801GHM (ICH7-M DH) : document number 307013-003, 307014-024,
- * 82801GU (ICH7-U) : document number 307013-003, 307014-024,
- * 82801HB (ICH8) : document number 313056-003, 313057-017,
- * 82801HR (ICH8R) : document number 313056-003, 313057-017,
- * 82801HBM (ICH8M) : document number 313056-003, 313057-017,
- * 82801HH (ICH8DH) : document number 313056-003, 313057-017,
- * 82801HO (ICH8DO) : document number 313056-003, 313057-017,
- * 82801HEM (ICH8M-E) : document number 313056-003, 313057-017,
- * 82801IB (ICH9) : document number 316972-004, 316973-012,
- * 82801IR (ICH9R) : document number 316972-004, 316973-012,
- * 82801IH (ICH9DH) : document number 316972-004, 316973-012,
- * 82801IO (ICH9DO) : document number 316972-004, 316973-012,
- * 82801IBM (ICH9M) : document number 316972-004, 316973-012,
- * 82801IEM (ICH9M-E) : document number 316972-004, 316973-012,
- * 82801JIB (ICH10) : document number 319973-002, 319974-002,
- * 82801JIR (ICH10R) : document number 319973-002, 319974-002,
- * 82801JD (ICH10D) : document number 319973-002, 319974-002,
- * 82801JDO (ICH10DO) : document number 319973-002, 319974-002
+ * document number 290655-003, 290677-014: 82801AA (ICH), 82801AB (ICHO)
+ * document number 290687-002, 298242-027: 82801BA (ICH2)
+ * document number 290733-003, 290739-013: 82801CA (ICH3-S)
+ * document number 290716-001, 290718-007: 82801CAM (ICH3-M)
+ * document number 290744-001, 290745-025: 82801DB (ICH4)
+ * document number 252337-001, 252663-008: 82801DBM (ICH4-M)
+ * document number 273599-001, 273645-002: 82801E (C-ICH)
+ * document number 252516-001, 252517-028: 82801EB (ICH5), 82801ER (ICH5R)
+ * document number 300641-004, 300884-013: 6300ESB
+ * document number 301473-002, 301474-026: 82801F (ICH6)
+ * document number 313082-001, 313075-006: 631xESB, 632xESB
+ * document number 307013-003, 307014-024: 82801G (ICH7)
+ * document number 313056-003, 313057-017: 82801H (ICH8)
+ * document number 316972-004, 316973-012: 82801I (ICH9)
+ * document number 319973-002, 319974-002: 82801J (ICH10)
+ * document number 322169-001, 322170-001: 5 Series, 3400 Series (PCH)
*/
/*
@@ -122,6 +97,9 @@ enum iTCO_chipsets {
TCO_ICH10R, /* ICH10R */
TCO_ICH10D, /* ICH10D */
TCO_ICH10DO, /* ICH10DO */
+ TCO_PCH, /* PCH Desktop Full Featured */
+ TCO_PCHM, /* PCH Mobile Full Featured */
+ TCO_PCHMSFF, /* PCH Mobile SFF Full Featured */
};
static struct {
@@ -162,6 +140,9 @@ static struct {
{"ICH10R", 2},
{"ICH10D", 2},
{"ICH10DO", 2},
+ {"PCH Desktop Full Featured", 2},
+ {"PCH Mobile Full Featured", 2},
+ {"PCH Mobile SFF Full Featured", 2},
{NULL, 0}
};
@@ -230,6 +211,9 @@ static struct pci_device_id iTCO_wdt_pci_tbl[] = {
{ ITCO_PCI_DEVICE(0x3a16, TCO_ICH10R)},
{ ITCO_PCI_DEVICE(0x3a1a, TCO_ICH10D)},
{ ITCO_PCI_DEVICE(0x3a14, TCO_ICH10DO)},
+ { ITCO_PCI_DEVICE(0x3b00, TCO_PCH)},
+ { ITCO_PCI_DEVICE(0x3b01, TCO_PCHM)},
+ { ITCO_PCI_DEVICE(0x3b0d, TCO_PCHMSFF)},
{ 0, }, /* End of list */
};
MODULE_DEVICE_TABLE(pci, iTCO_wdt_pci_tbl);
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index b57ac6b4914..85b93e15d01 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -36,6 +36,7 @@
#include <linux/clk.h>
#include <linux/uaccess.h>
#include <linux/io.h>
+#include <linux/cpufreq.h>
#include <mach/map.h>
@@ -142,9 +143,14 @@ static void s3c2410wdt_start(void)
spin_unlock(&wdt_lock);
}
+static inline int s3c2410wdt_is_running(void)
+{
+ return readl(wdt_base + S3C2410_WTCON) & S3C2410_WTCON_ENABLE;
+}
+
static int s3c2410wdt_set_heartbeat(int timeout)
{
- unsigned int freq = clk_get_rate(wdt_clock);
+ unsigned long freq = clk_get_rate(wdt_clock);
unsigned int count;
unsigned int divisor = 1;
unsigned long wtcon;
@@ -155,7 +161,7 @@ static int s3c2410wdt_set_heartbeat(int timeout)
freq /= 128;
count = timeout * freq;
- DBG("%s: count=%d, timeout=%d, freq=%d\n",
+ DBG("%s: count=%d, timeout=%d, freq=%lu\n",
__func__, count, timeout, freq);
/* if the count is bigger than the watchdog register,
@@ -324,6 +330,73 @@ static irqreturn_t s3c2410wdt_irq(int irqno, void *param)
s3c2410wdt_keepalive();
return IRQ_HANDLED;
}
+
+
+#ifdef CONFIG_CPU_FREQ
+
+static int s3c2410wdt_cpufreq_transition(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ int ret;
+
+ if (!s3c2410wdt_is_running())
+ goto done;
+
+ if (val == CPUFREQ_PRECHANGE) {
+ /* To ensure that over the change we don't cause the
+ * watchdog to trigger, we perform an keep-alive if
+ * the watchdog is running.
+ */
+
+ s3c2410wdt_keepalive();
+ } else if (val == CPUFREQ_POSTCHANGE) {
+ s3c2410wdt_stop();
+
+ ret = s3c2410wdt_set_heartbeat(tmr_margin);
+
+ if (ret >= 0)
+ s3c2410wdt_start();
+ else
+ goto err;
+ }
+
+done:
+ return 0;
+
+ err:
+ dev_err(wdt_dev, "cannot set new value for timeout %d\n", tmr_margin);
+ return ret;
+}
+
+static struct notifier_block s3c2410wdt_cpufreq_transition_nb = {
+ .notifier_call = s3c2410wdt_cpufreq_transition,
+};
+
+static inline int s3c2410wdt_cpufreq_register(void)
+{
+ return cpufreq_register_notifier(&s3c2410wdt_cpufreq_transition_nb,
+ CPUFREQ_TRANSITION_NOTIFIER);
+}
+
+static inline void s3c2410wdt_cpufreq_deregister(void)
+{
+ cpufreq_unregister_notifier(&s3c2410wdt_cpufreq_transition_nb,
+ CPUFREQ_TRANSITION_NOTIFIER);
+}
+
+#else
+static inline int s3c2410wdt_cpufreq_register(void)
+{
+ return 0;
+}
+
+static inline void s3c2410wdt_cpufreq_deregister(void)
+{
+}
+#endif
+
+
+
/* device interface */
static int __devinit s3c2410wdt_probe(struct platform_device *pdev)
@@ -387,6 +460,11 @@ static int __devinit s3c2410wdt_probe(struct platform_device *pdev)
clk_enable(wdt_clock);
+ if (s3c2410wdt_cpufreq_register() < 0) {
+ printk(KERN_ERR PFX "failed to register cpufreq\n");
+ goto err_clk;
+ }
+
/* see if we can actually set the requested timer margin, and if
* not, try the default value */
@@ -407,7 +485,7 @@ static int __devinit s3c2410wdt_probe(struct platform_device *pdev)
if (ret) {
dev_err(dev, "cannot register miscdev on minor=%d (%d)\n",
WATCHDOG_MINOR, ret);
- goto err_clk;
+ goto err_cpufreq;
}
if (tmr_atboot && started == 0) {
@@ -432,6 +510,9 @@ static int __devinit s3c2410wdt_probe(struct platform_device *pdev)
return 0;
+ err_cpufreq:
+ s3c2410wdt_cpufreq_deregister();
+
err_clk:
clk_disable(wdt_clock);
clk_put(wdt_clock);
@@ -451,6 +532,8 @@ static int __devinit s3c2410wdt_probe(struct platform_device *pdev)
static int __devexit s3c2410wdt_remove(struct platform_device *dev)
{
+ s3c2410wdt_cpufreq_deregister();
+
release_resource(wdt_mem);
kfree(wdt_mem);
wdt_mem = NULL;
diff --git a/fs/reiserfs/Makefile b/fs/reiserfs/Makefile
index 7c5ab6330dd..6a9e30c041d 100644
--- a/fs/reiserfs/Makefile
+++ b/fs/reiserfs/Makefile
@@ -7,7 +7,7 @@ obj-$(CONFIG_REISERFS_FS) += reiserfs.o
reiserfs-objs := bitmap.o do_balan.o namei.o inode.o file.o dir.o fix_node.o \
super.o prints.o objectid.o lbalance.o ibalance.o stree.o \
hashes.o tail_conversion.o journal.o resize.o \
- item_ops.o ioctl.o procfs.o xattr.o
+ item_ops.o ioctl.o procfs.o xattr.o lock.o
ifeq ($(CONFIG_REISERFS_FS_XATTR),y)
reiserfs-objs += xattr_user.o xattr_trusted.o
diff --git a/fs/reiserfs/bitmap.c b/fs/reiserfs/bitmap.c
index e716161ab32..68549570718 100644
--- a/fs/reiserfs/bitmap.c
+++ b/fs/reiserfs/bitmap.c
@@ -1249,14 +1249,18 @@ struct buffer_head *reiserfs_read_bitmap_block(struct super_block *sb,
else if (bitmap == 0)
block = (REISERFS_DISK_OFFSET_IN_BYTES >> sb->s_blocksize_bits) + 1;
+ reiserfs_write_unlock(sb);
bh = sb_bread(sb, block);
+ reiserfs_write_lock(sb);
if (bh == NULL)
reiserfs_warning(sb, "sh-2029: %s: bitmap block (#%u) "
"reading failed", __func__, block);
else {
if (buffer_locked(bh)) {
PROC_INFO_INC(sb, scan_bitmap.wait);
+ reiserfs_write_unlock(sb);
__wait_on_buffer(bh);
+ reiserfs_write_lock(sb);
}
BUG_ON(!buffer_uptodate(bh));
BUG_ON(atomic_read(&bh->b_count) == 0);
diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c
index 6d2668fdc38..c094f58c744 100644
--- a/fs/reiserfs/dir.c
+++ b/fs/reiserfs/dir.c
@@ -20,7 +20,7 @@ const struct file_operations reiserfs_dir_operations = {
.read = generic_read_dir,
.readdir = reiserfs_readdir,
.fsync = reiserfs_dir_fsync,
- .ioctl = reiserfs_ioctl,
+ .unlocked_ioctl = reiserfs_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = reiserfs_compat_ioctl,
#endif
@@ -174,14 +174,22 @@ int reiserfs_readdir_dentry(struct dentry *dentry, void *dirent,
// user space buffer is swapped out. At that time
// entry can move to somewhere else
memcpy(local_buf, d_name, d_reclen);
+
+ /*
+ * Since filldir might sleep, we can release
+ * the write lock here for other waiters
+ */
+ reiserfs_write_unlock(inode->i_sb);
if (filldir
(dirent, local_buf, d_reclen, d_off, d_ino,
DT_UNKNOWN) < 0) {
+ reiserfs_write_lock(inode->i_sb);
if (local_buf != small_buf) {
kfree(local_buf);
}
goto end;
}
+ reiserfs_write_lock(inode->i_sb);
if (local_buf != small_buf) {
kfree(local_buf);
}
diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
index 128d3f7c8aa..60c08044066 100644
--- a/fs/reiserfs/do_balan.c
+++ b/fs/reiserfs/do_balan.c
@@ -21,14 +21,6 @@
#include <linux/buffer_head.h>
#include <linux/kernel.h>
-#ifdef CONFIG_REISERFS_CHECK
-
-struct tree_balance *cur_tb = NULL; /* detects whether more than one
- copy of tb exists as a means
- of checking whether schedule
- is interrupting do_balance */
-#endif
-
static inline void buffer_info_init_left(struct tree_balance *tb,
struct buffer_info *bi)
{
@@ -1840,11 +1832,12 @@ static int check_before_balancing(struct tree_balance *tb)
{
int retval = 0;
- if (cur_tb) {
+ if (REISERFS_SB(tb->tb_sb)->cur_tb) {
reiserfs_panic(tb->tb_sb, "vs-12335", "suspect that schedule "
"occurred based on cur_tb not being null at "
"this point in code. do_balance cannot properly "
- "handle schedule occurring while it runs.");
+ "handle concurrent tree accesses on a same "
+ "mount point.");
}
/* double check that buffers that we will modify are unlocked. (fix_nodes should already have
@@ -1986,7 +1979,7 @@ static inline void do_balance_starts(struct tree_balance *tb)
"check");*/
RFALSE(check_before_balancing(tb), "PAP-12340: locked buffers in TB");
#ifdef CONFIG_REISERFS_CHECK
- cur_tb = tb;
+ REISERFS_SB(tb->tb_sb)->cur_tb = tb;
#endif
}
@@ -1996,7 +1989,7 @@ static inline void do_balance_completed(struct tree_balance *tb)
#ifdef CONFIG_REISERFS_CHECK
check_leaf_level(tb);
check_internal_levels(tb);
- cur_tb = NULL;
+ REISERFS_SB(tb->tb_sb)->cur_tb = NULL;
#endif
/* reiserfs_free_block is no longer schedule safe. So, we need to
diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c
index 9f436668b7f..da2dba082e2 100644
--- a/fs/reiserfs/file.c
+++ b/fs/reiserfs/file.c
@@ -284,7 +284,7 @@ static ssize_t reiserfs_file_write(struct file *file, /* the file we are going t
const struct file_operations reiserfs_file_operations = {
.read = do_sync_read,
.write = reiserfs_file_write,
- .ioctl = reiserfs_ioctl,
+ .unlocked_ioctl = reiserfs_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = reiserfs_compat_ioctl,
#endif
diff --git a/fs/reiserfs/fix_node.c b/fs/reiserfs/fix_node.c
index 5e5a4e6fbaf..d2f31330dca 100644
--- a/fs/reiserfs/fix_node.c
+++ b/fs/reiserfs/fix_node.c
@@ -563,9 +563,6 @@ static int get_num_ver(int mode, struct tree_balance *tb, int h,
return needed_nodes;
}
-#ifdef CONFIG_REISERFS_CHECK
-extern struct tree_balance *cur_tb;
-#endif
/* Set parameters for balancing.
* Performs write of results of analysis of balancing into structure tb,
@@ -1022,7 +1019,11 @@ static int get_far_parent(struct tree_balance *tb,
/* Check whether the common parent is locked. */
if (buffer_locked(*pcom_father)) {
+
+ /* Release the write lock while the buffer is busy */
+ reiserfs_write_unlock(tb->tb_sb);
__wait_on_buffer(*pcom_father);
+ reiserfs_write_lock(tb->tb_sb);
if (FILESYSTEM_CHANGED_TB(tb)) {
brelse(*pcom_father);
return REPEAT_SEARCH;
@@ -1927,7 +1928,9 @@ static int get_direct_parent(struct tree_balance *tb, int h)
return REPEAT_SEARCH;
if (buffer_locked(bh)) {
+ reiserfs_write_unlock(tb->tb_sb);
__wait_on_buffer(bh);
+ reiserfs_write_lock(tb->tb_sb);
if (FILESYSTEM_CHANGED_TB(tb))
return REPEAT_SEARCH;
}
@@ -1965,7 +1968,9 @@ static int get_neighbors(struct tree_balance *tb, int h)
tb->FL[h]) ? tb->lkey[h] : B_NR_ITEMS(tb->
FL[h]);
son_number = B_N_CHILD_NUM(tb->FL[h], child_position);
+ reiserfs_write_unlock(sb);
bh = sb_bread(sb, son_number);
+ reiserfs_write_lock(sb);
if (!bh)
return IO_ERROR;
if (FILESYSTEM_CHANGED_TB(tb)) {
@@ -2003,7 +2008,9 @@ static int get_neighbors(struct tree_balance *tb, int h)
child_position =
(bh == tb->FR[h]) ? tb->rkey[h] + 1 : 0;
son_number = B_N_CHILD_NUM(tb->FR[h], child_position);
+ reiserfs_write_unlock(sb);
bh = sb_bread(sb, son_number);
+ reiserfs_write_lock(sb);
if (!bh)
return IO_ERROR;
if (FILESYSTEM_CHANGED_TB(tb)) {
@@ -2278,7 +2285,9 @@ static int wait_tb_buffers_until_unlocked(struct tree_balance *tb)
REPEAT_SEARCH : CARRY_ON;
}
#endif
+ reiserfs_write_unlock(tb->tb_sb);
__wait_on_buffer(locked);
+ reiserfs_write_lock(tb->tb_sb);
if (FILESYSTEM_CHANGED_TB(tb))
return REPEAT_SEARCH;
}
@@ -2349,12 +2358,14 @@ int fix_nodes(int op_mode, struct tree_balance *tb,
/* if it possible in indirect_to_direct conversion */
if (buffer_locked(tbS0)) {
+ reiserfs_write_unlock(tb->tb_sb);
__wait_on_buffer(tbS0);
+ reiserfs_write_lock(tb->tb_sb);
if (FILESYSTEM_CHANGED_TB(tb))
return REPEAT_SEARCH;
}
#ifdef CONFIG_REISERFS_CHECK
- if (cur_tb) {
+ if (REISERFS_SB(tb->tb_sb)->cur_tb) {
print_cur_tb("fix_nodes");
reiserfs_panic(tb->tb_sb, "PAP-8305",
"there is pending do_balance");
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index a14d6cd9eed..3a28e7751b3 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -251,7 +251,6 @@ static int _get_block_create_0(struct inode *inode, sector_t block,
struct cpu_key key;
struct buffer_head *bh;
struct item_head *ih, tmp_ih;
- int fs_gen;
b_blocknr_t blocknr;
char *p = NULL;
int chars;
@@ -265,7 +264,6 @@ static int _get_block_create_0(struct inode *inode, sector_t block,
(loff_t) block * inode->i_sb->s_blocksize + 1, TYPE_ANY,
3);
- research:
result = search_for_position_by_key(inode->i_sb, &key, &path);
if (result != POSITION_FOUND) {
pathrelse(&path);
@@ -340,7 +338,6 @@ static int _get_block_create_0(struct inode *inode, sector_t block,
}
// read file tail into part of page
offset = (cpu_key_k_offset(&key) - 1) & (PAGE_CACHE_SIZE - 1);
- fs_gen = get_generation(inode->i_sb);
copy_item_head(&tmp_ih, ih);
/* we only want to kmap if we are reading the tail into the page.
@@ -348,13 +345,9 @@ static int _get_block_create_0(struct inode *inode, sector_t block,
** sure we need to. But, this means the item might move if
** kmap schedules
*/
- if (!p) {
+ if (!p)
p = (char *)kmap(bh_result->b_page);
- if (fs_changed(fs_gen, inode->i_sb)
- && item_moved(&tmp_ih, &path)) {
- goto research;
- }
- }
+
p += offset;
memset(p, 0, inode->i_sb->s_blocksize);
do {
@@ -489,10 +482,14 @@ static int reiserfs_get_blocks_direct_io(struct inode *inode,
disappeared */
if (REISERFS_I(inode)->i_flags & i_pack_on_close_mask) {
int err;
- lock_kernel();
+
+ reiserfs_write_lock(inode->i_sb);
+
err = reiserfs_commit_for_inode(inode);
REISERFS_I(inode)->i_flags &= ~i_pack_on_close_mask;
- unlock_kernel();
+
+ reiserfs_write_unlock(inode->i_sb);
+
if (err < 0)
ret = err;
}
@@ -601,6 +598,7 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
__le32 *item;
int done;
int fs_gen;
+ int lock_depth;
struct reiserfs_transaction_handle *th = NULL;
/* space reserved in transaction batch:
. 3 balancings in direct->indirect conversion
@@ -616,12 +614,11 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
loff_t new_offset =
(((loff_t) block) << inode->i_sb->s_blocksize_bits) + 1;
- /* bad.... */
- reiserfs_write_lock(inode->i_sb);
+ lock_depth = reiserfs_write_lock_once(inode->i_sb);
version = get_inode_item_key_version(inode);
if (!file_capable(inode, block)) {
- reiserfs_write_unlock(inode->i_sb);
+ reiserfs_write_unlock_once(inode->i_sb, lock_depth);
return -EFBIG;
}
@@ -633,7 +630,7 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
/* find number of block-th logical block of the file */
ret = _get_block_create_0(inode, block, bh_result,
create | GET_BLOCK_READ_DIRECT);
- reiserfs_write_unlock(inode->i_sb);
+ reiserfs_write_unlock_once(inode->i_sb, lock_depth);
return ret;
}
/*
@@ -751,7 +748,7 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
if (!dangle && th)
retval = reiserfs_end_persistent_transaction(th);
- reiserfs_write_unlock(inode->i_sb);
+ reiserfs_write_unlock_once(inode->i_sb, lock_depth);
/* the item was found, so new blocks were not added to the file
** there is no need to make sure the inode is updated with this
@@ -935,7 +932,7 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
if (blocks_needed == 1) {
un = &unf_single;
} else {
- un = kzalloc(min(blocks_needed, max_to_insert) * UNFM_P_SIZE, GFP_ATOMIC); // We need to avoid scheduling.
+ un = kzalloc(min(blocks_needed, max_to_insert) * UNFM_P_SIZE, GFP_NOFS);
if (!un) {
un = &unf_single;
blocks_needed = 1;
@@ -997,10 +994,16 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
if (retval)
goto failure;
}
- /* inserting indirect pointers for a hole can take a
- ** long time. reschedule if needed
+ /*
+ * inserting indirect pointers for a hole can take a
+ * long time. reschedule if needed and also release the write
+ * lock for others.
*/
- cond_resched();
+ if (need_resched()) {
+ reiserfs_write_unlock_once(inode->i_sb, lock_depth);
+ schedule();
+ lock_depth = reiserfs_write_lock_once(inode->i_sb);
+ }
retval = search_for_position_by_key(inode->i_sb, &key, &path);
if (retval == IO_ERROR) {
@@ -1035,7 +1038,7 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
retval = err;
}
- reiserfs_write_unlock(inode->i_sb);
+ reiserfs_write_unlock_once(inode->i_sb, lock_depth);
reiserfs_check_path(&path);
return retval;
}
@@ -2072,8 +2075,9 @@ int reiserfs_truncate_file(struct inode *inode, int update_timestamps)
int error;
struct buffer_head *bh = NULL;
int err2;
+ int lock_depth;
- reiserfs_write_lock(inode->i_sb);
+ lock_depth = reiserfs_write_lock_once(inode->i_sb);
if (inode->i_size > 0) {
error = grab_tail_page(inode, &page, &bh);
@@ -2142,14 +2146,17 @@ int reiserfs_truncate_file(struct inode *inode, int update_timestamps)
page_cache_release(page);
}
- reiserfs_write_unlock(inode->i_sb);
+ reiserfs_write_unlock_once(inode->i_sb, lock_depth);
+
return 0;
out:
if (page) {
unlock_page(page);
page_cache_release(page);
}
- reiserfs_write_unlock(inode->i_sb);
+
+ reiserfs_write_unlock_once(inode->i_sb, lock_depth);
+
return error;
}
@@ -2608,7 +2615,10 @@ int reiserfs_prepare_write(struct file *f, struct page *page,
int ret;
int old_ref = 0;
+ reiserfs_write_unlock(inode->i_sb);
reiserfs_wait_on_write_block(inode->i_sb);
+ reiserfs_write_lock(inode->i_sb);
+
fix_tail_page_for_writing(page);
if (reiserfs_transaction_running(inode->i_sb)) {
struct reiserfs_transaction_handle *th;
@@ -2664,6 +2674,8 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping,
int update_sd = 0;
struct reiserfs_transaction_handle *th;
unsigned start;
+ int lock_depth = 0;
+ bool locked = false;
if ((unsigned long)fsdata & AOP_FLAG_CONT_EXPAND)
pos ++;
@@ -2690,9 +2702,11 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping,
** to do the i_size updates here.
*/
pos += copied;
+
if (pos > inode->i_size) {
struct reiserfs_transaction_handle myth;
- reiserfs_write_lock(inode->i_sb);
+ lock_depth = reiserfs_write_lock_once(inode->i_sb);
+ locked = true;
/* If the file have grown beyond the border where it
can have a tail, unmark it as needing a tail
packing */
@@ -2703,10 +2717,9 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping,
REISERFS_I(inode)->i_flags &= ~i_pack_on_close_mask;
ret = journal_begin(&myth, inode->i_sb, 1);
- if (ret) {
- reiserfs_write_unlock(inode->i_sb);
+ if (ret)
goto journal_error;
- }
+
reiserfs_update_inode_transaction(inode);
inode->i_size = pos;
/*
@@ -2718,34 +2731,36 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping,
reiserfs_update_sd(&myth, inode);
update_sd = 1;
ret = journal_end(&myth, inode->i_sb, 1);
- reiserfs_write_unlock(inode->i_sb);
if (ret)
goto journal_error;
}
if (th) {
- reiserfs_write_lock(inode->i_sb);
+ if (!locked) {
+ lock_depth = reiserfs_write_lock_once(inode->i_sb);
+ locked = true;
+ }
if (!update_sd)
mark_inode_dirty(inode);
ret = reiserfs_end_persistent_transaction(th);
- reiserfs_write_unlock(inode->i_sb);
if (ret)
goto out;
}
out:
+ if (locked)
+ reiserfs_write_unlock_once(inode->i_sb, lock_depth);
unlock_page(page);
page_cache_release(page);
return ret == 0 ? copied : ret;
journal_error:
+ reiserfs_write_unlock_once(inode->i_sb, lock_depth);
+ locked = false;
if (th) {
- reiserfs_write_lock(inode->i_sb);
if (!update_sd)
reiserfs_update_sd(th, inode);
ret = reiserfs_end_persistent_transaction(th);
- reiserfs_write_unlock(inode->i_sb);
}
-
goto out;
}
@@ -2758,7 +2773,10 @@ int reiserfs_commit_write(struct file *f, struct page *page,
int update_sd = 0;
struct reiserfs_transaction_handle *th = NULL;
+ reiserfs_write_unlock(inode->i_sb);
reiserfs_wait_on_write_block(inode->i_sb);
+ reiserfs_write_lock(inode->i_sb);
+
if (reiserfs_transaction_running(inode->i_sb)) {
th = current->journal_info;
}
@@ -2770,7 +2788,6 @@ int reiserfs_commit_write(struct file *f, struct page *page,
*/
if (pos > inode->i_size) {
struct reiserfs_transaction_handle myth;
- reiserfs_write_lock(inode->i_sb);
/* If the file have grown beyond the border where it
can have a tail, unmark it as needing a tail
packing */
@@ -2781,10 +2798,9 @@ int reiserfs_commit_write(struct file *f, struct page *page,
REISERFS_I(inode)->i_flags &= ~i_pack_on_close_mask;
ret = journal_begin(&myth, inode->i_sb, 1);
- if (ret) {
- reiserfs_write_unlock(inode->i_sb);
+ if (ret)
goto journal_error;
- }
+
reiserfs_update_inode_transaction(inode);
inode->i_size = pos;
/*
@@ -2796,16 +2812,13 @@ int reiserfs_commit_write(struct file *f, struct page *page,
reiserfs_update_sd(&myth, inode);
update_sd = 1;
ret = journal_end(&myth, inode->i_sb, 1);
- reiserfs_write_unlock(inode->i_sb);
if (ret)
goto journal_error;
}
if (th) {
- reiserfs_write_lock(inode->i_sb);
if (!update_sd)
mark_inode_dirty(inode);
ret = reiserfs_end_persistent_transaction(th);
- reiserfs_write_unlock(inode->i_sb);
if (ret)
goto out;
}
@@ -2815,11 +2828,9 @@ int reiserfs_commit_write(struct file *f, struct page *page,
journal_error:
if (th) {
- reiserfs_write_lock(inode->i_sb);
if (!update_sd)
reiserfs_update_sd(th, inode);
ret = reiserfs_end_persistent_transaction(th);
- reiserfs_write_unlock(inode->i_sb);
}
return ret;
diff --git a/fs/reiserfs/ioctl.c b/fs/reiserfs/ioctl.c
index 0ccc3fdda7b..ace77451ceb 100644
--- a/fs/reiserfs/ioctl.c
+++ b/fs/reiserfs/ioctl.c
@@ -13,44 +13,52 @@
#include <linux/compat.h>
/*
-** reiserfs_ioctl - handler for ioctl for inode
-** supported commands:
-** 1) REISERFS_IOC_UNPACK - try to unpack tail from direct item into indirect
-** and prevent packing file (argument arg has to be non-zero)
-** 2) REISERFS_IOC_[GS]ETFLAGS, REISERFS_IOC_[GS]ETVERSION
-** 3) That's all for a while ...
-*/
-int reiserfs_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
- unsigned long arg)
+ * reiserfs_ioctl - handler for ioctl for inode
+ * supported commands:
+ * 1) REISERFS_IOC_UNPACK - try to unpack tail from direct item into indirect
+ * and prevent packing file (argument arg has to be non-zero)
+ * 2) REISERFS_IOC_[GS]ETFLAGS, REISERFS_IOC_[GS]ETVERSION
+ * 3) That's all for a while ...
+ */
+long reiserfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
+ struct inode *inode = filp->f_path.dentry->d_inode;
unsigned int flags;
int err = 0;
+ reiserfs_write_lock(inode->i_sb);
+
switch (cmd) {
case REISERFS_IOC_UNPACK:
if (S_ISREG(inode->i_mode)) {
if (arg)
- return reiserfs_unpack(inode, filp);
- else
- return 0;
+ err = reiserfs_unpack(inode, filp);
} else
- return -ENOTTY;
- /* following two cases are taken from fs/ext2/ioctl.c by Remy
- Card (card@masi.ibp.fr) */
+ err = -ENOTTY;
+ break;
+ /*
+ * following two cases are taken from fs/ext2/ioctl.c by Remy
+ * Card (card@masi.ibp.fr)
+ */
case REISERFS_IOC_GETFLAGS:
- if (!reiserfs_attrs(inode->i_sb))
- return -ENOTTY;
+ if (!reiserfs_attrs(inode->i_sb)) {
+ err = -ENOTTY;
+ break;
+ }
flags = REISERFS_I(inode)->i_attrs;
i_attrs_to_sd_attrs(inode, (__u16 *) & flags);
- return put_user(flags, (int __user *)arg);
+ err = put_user(flags, (int __user *)arg);
+ break;
case REISERFS_IOC_SETFLAGS:{
- if (!reiserfs_attrs(inode->i_sb))
- return -ENOTTY;
+ if (!reiserfs_attrs(inode->i_sb)) {
+ err = -ENOTTY;
+ break;
+ }
err = mnt_want_write(filp->f_path.mnt);
if (err)
- return err;
+ break;
if (!is_owner_or_cap(inode)) {
err = -EPERM;
@@ -90,16 +98,18 @@ int reiserfs_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
mark_inode_dirty(inode);
setflags_out:
mnt_drop_write(filp->f_path.mnt);
- return err;
+ break;
}
case REISERFS_IOC_GETVERSION:
- return put_user(inode->i_generation, (int __user *)arg);
+ err = put_user(inode->i_generation, (int __user *)arg);
+ break;
case REISERFS_IOC_SETVERSION:
if (!is_owner_or_cap(inode))
- return -EPERM;
+ err = -EPERM;
+ break;
err = mnt_want_write(filp->f_path.mnt);
if (err)
- return err;
+ break;
if (get_user(inode->i_generation, (int __user *)arg)) {
err = -EFAULT;
goto setversion_out;
@@ -108,19 +118,20 @@ setflags_out:
mark_inode_dirty(inode);
setversion_out:
mnt_drop_write(filp->f_path.mnt);
- return err;
+ break;
default:
- return -ENOTTY;
+ err = -ENOTTY;
}
+
+ reiserfs_write_unlock(inode->i_sb);
+
+ return err;
}
#ifdef CONFIG_COMPAT
long reiserfs_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
- struct inode *inode = file->f_path.dentry->d_inode;
- int ret;
-
/* These are just misnamed, they actually get/put from/to user an int */
switch (cmd) {
case REISERFS_IOC32_UNPACK:
@@ -141,10 +152,8 @@ long reiserfs_compat_ioctl(struct file *file, unsigned int cmd,
default:
return -ENOIOCTLCMD;
}
- lock_kernel();
- ret = reiserfs_ioctl(inode, file, cmd, (unsigned long) compat_ptr(arg));
- unlock_kernel();
- return ret;
+
+ return reiserfs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
}
#endif
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index 90622200b39..2f8a7e7b8da 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -429,21 +429,6 @@ static void clear_prepared_bits(struct buffer_head *bh)
clear_buffer_journal_restore_dirty(bh);
}
-/* utility function to force a BUG if it is called without the big
-** kernel lock held. caller is the string printed just before calling BUG()
-*/
-void reiserfs_check_lock_depth(struct super_block *sb, char *caller)
-{
-#ifdef CONFIG_SMP
- if (current->lock_depth < 0) {
- reiserfs_panic(sb, "journal-1", "%s called without kernel "
- "lock held", caller);
- }
-#else
- ;
-#endif
-}
-
/* return a cnode with same dev, block number and size in table, or null if not found */
static inline struct reiserfs_journal_cnode *get_journal_hash_dev(struct
super_block
@@ -556,7 +541,8 @@ static inline void insert_journal_hash(struct reiserfs_journal_cnode **table,
static inline void lock_journal(struct super_block *sb)
{
PROC_INFO_INC(sb, journal.lock_journal);
- mutex_lock(&SB_JOURNAL(sb)->j_mutex);
+
+ reiserfs_mutex_lock_safe(&SB_JOURNAL(sb)->j_mutex, sb);
}
/* unlock the current transaction */
@@ -708,7 +694,9 @@ static void check_barrier_completion(struct super_block *s,
disable_barrier(s);
set_buffer_uptodate(bh);
set_buffer_dirty(bh);
+ reiserfs_write_unlock(s);
sync_dirty_buffer(bh);
+ reiserfs_write_lock(s);
}
}
@@ -996,8 +984,13 @@ static int reiserfs_async_progress_wait(struct super_block *s)
{
DEFINE_WAIT(wait);
struct reiserfs_journal *j = SB_JOURNAL(s);
- if (atomic_read(&j->j_async_throttle))
+
+ if (atomic_read(&j->j_async_throttle)) {
+ reiserfs_write_unlock(s);
congestion_wait(BLK_RW_ASYNC, HZ / 10);
+ reiserfs_write_lock(s);
+ }
+
return 0;
}
@@ -1043,7 +1036,8 @@ static int flush_commit_list(struct super_block *s,
}
/* make sure nobody is trying to flush this one at the same time */
- mutex_lock(&jl->j_commit_mutex);
+ reiserfs_mutex_lock_safe(&jl->j_commit_mutex, s);
+
if (!journal_list_still_alive(s, trans_id)) {
mutex_unlock(&jl->j_commit_mutex);
goto put_jl;
@@ -1061,12 +1055,17 @@ static int flush_commit_list(struct super_block *s,
if (!list_empty(&jl->j_bh_list)) {
int ret;
- unlock_kernel();
+
+ /*
+ * We might sleep in numerous places inside
+ * write_ordered_buffers. Relax the write lock.
+ */
+ reiserfs_write_unlock(s);
ret = write_ordered_buffers(&journal->j_dirty_buffers_lock,
journal, jl, &jl->j_bh_list);
if (ret < 0 && retval == 0)
retval = ret;
- lock_kernel();
+ reiserfs_write_lock(s);
}
BUG_ON(!list_empty(&jl->j_bh_list));
/*
@@ -1085,8 +1084,11 @@ static int flush_commit_list(struct super_block *s,
SB_ONDISK_JOURNAL_SIZE(s);
tbh = journal_find_get_block(s, bn);
if (tbh) {
- if (buffer_dirty(tbh))
- ll_rw_block(WRITE, 1, &tbh) ;
+ if (buffer_dirty(tbh)) {
+ reiserfs_write_unlock(s);
+ ll_rw_block(WRITE, 1, &tbh);
+ reiserfs_write_lock(s);
+ }
put_bh(tbh) ;
}
}
@@ -1114,12 +1116,19 @@ static int flush_commit_list(struct super_block *s,
bn = SB_ONDISK_JOURNAL_1st_BLOCK(s) +
(jl->j_start + i) % SB_ONDISK_JOURNAL_SIZE(s);
tbh = journal_find_get_block(s, bn);
+
+ reiserfs_write_unlock(s);
wait_on_buffer(tbh);
+ reiserfs_write_lock(s);
// since we're using ll_rw_blk above, it might have skipped over
// a locked buffer. Double check here
//
- if (buffer_dirty(tbh)) /* redundant, sync_dirty_buffer() checks */
+ /* redundant, sync_dirty_buffer() checks */
+ if (buffer_dirty(tbh)) {
+ reiserfs_write_unlock(s);
sync_dirty_buffer(tbh);
+ reiserfs_write_lock(s);
+ }
if (unlikely(!buffer_uptodate(tbh))) {
#ifdef CONFIG_REISERFS_CHECK
reiserfs_warning(s, "journal-601",
@@ -1143,10 +1152,15 @@ static int flush_commit_list(struct super_block *s,
if (buffer_dirty(jl->j_commit_bh))
BUG();
mark_buffer_dirty(jl->j_commit_bh) ;
+ reiserfs_write_unlock(s);
sync_dirty_buffer(jl->j_commit_bh) ;
+ reiserfs_write_lock(s);
}
- } else
+ } else {
+ reiserfs_write_unlock(s);
wait_on_buffer(jl->j_commit_bh);
+ reiserfs_write_lock(s);
+ }
check_barrier_completion(s, jl->j_commit_bh);
@@ -1286,7 +1300,9 @@ static int _update_journal_header_block(struct super_block *sb,
if (trans_id >= journal->j_last_flush_trans_id) {
if (buffer_locked((journal->j_header_bh))) {
+ reiserfs_write_unlock(sb);
wait_on_buffer((journal->j_header_bh));
+ reiserfs_write_lock(sb);
if (unlikely(!buffer_uptodate(journal->j_header_bh))) {
#ifdef CONFIG_REISERFS_CHECK
reiserfs_warning(sb, "journal-699",
@@ -1312,12 +1328,16 @@ static int _update_journal_header_block(struct super_block *sb,
disable_barrier(sb);
goto sync;
}
+ reiserfs_write_unlock(sb);
wait_on_buffer(journal->j_header_bh);
+ reiserfs_write_lock(sb);
check_barrier_completion(sb, journal->j_header_bh);
} else {
sync:
set_buffer_dirty(journal->j_header_bh);
+ reiserfs_write_unlock(sb);
sync_dirty_buffer(journal->j_header_bh);
+ reiserfs_write_lock(sb);
}
if (!buffer_uptodate(journal->j_header_bh)) {
reiserfs_warning(sb, "journal-837",
@@ -1409,7 +1429,7 @@ static int flush_journal_list(struct super_block *s,
/* if flushall == 0, the lock is already held */
if (flushall) {
- mutex_lock(&journal->j_flush_mutex);
+ reiserfs_mutex_lock_safe(&journal->j_flush_mutex, s);
} else if (mutex_trylock(&journal->j_flush_mutex)) {
BUG();
}
@@ -1553,7 +1573,11 @@ static int flush_journal_list(struct super_block *s,
reiserfs_panic(s, "journal-1011",
"cn->bh is NULL");
}
+
+ reiserfs_write_unlock(s);
wait_on_buffer(cn->bh);
+ reiserfs_write_lock(s);
+
if (!cn->bh) {
reiserfs_panic(s, "journal-1012",
"cn->bh is NULL");
@@ -1769,7 +1793,7 @@ static int kupdate_transactions(struct super_block *s,
struct reiserfs_journal *journal = SB_JOURNAL(s);
chunk.nr = 0;
- mutex_lock(&journal->j_flush_mutex);
+ reiserfs_mutex_lock_safe(&journal->j_flush_mutex, s);
if (!journal_list_still_alive(s, orig_trans_id)) {
goto done;
}
@@ -1973,11 +1997,19 @@ static int do_journal_release(struct reiserfs_transaction_handle *th,
reiserfs_mounted_fs_count--;
/* wait for all commits to finish */
cancel_delayed_work(&SB_JOURNAL(sb)->j_work);
+
+ /*
+ * We must release the write lock here because
+ * the workqueue job (flush_async_commit) needs this lock
+ */
+ reiserfs_write_unlock(sb);
flush_workqueue(commit_wq);
+
if (!reiserfs_mounted_fs_count) {
destroy_workqueue(commit_wq);
commit_wq = NULL;
}
+ reiserfs_write_lock(sb);
free_journal_ram(sb);
@@ -2243,7 +2275,11 @@ static int journal_read_transaction(struct super_block *sb,
/* read in the log blocks, memcpy to the corresponding real block */
ll_rw_block(READ, get_desc_trans_len(desc), log_blocks);
for (i = 0; i < get_desc_trans_len(desc); i++) {
+
+ reiserfs_write_unlock(sb);
wait_on_buffer(log_blocks[i]);
+ reiserfs_write_lock(sb);
+
if (!buffer_uptodate(log_blocks[i])) {
reiserfs_warning(sb, "journal-1212",
"REPLAY FAILURE fsck required! "
@@ -2765,11 +2801,27 @@ int journal_init(struct super_block *sb, const char *j_dev_name,
goto free_and_return;
}
+ /*
+ * We need to unlock here to avoid creating the following
+ * dependency:
+ * reiserfs_lock -> sysfs_mutex
+ * Because the reiserfs mmap path creates the following dependency:
+ * mm->mmap -> reiserfs_lock, hence we have
+ * mm->mmap -> reiserfs_lock ->sysfs_mutex
+ * This would ends up in a circular dependency with sysfs readdir path
+ * which does sysfs_mutex -> mm->mmap_sem
+ * This is fine because the reiserfs lock is useless in mount path,
+ * at least until we call journal_begin. We keep it for paranoid
+ * reasons.
+ */
+ reiserfs_write_unlock(sb);
if (journal_init_dev(sb, journal, j_dev_name) != 0) {
+ reiserfs_write_lock(sb);
reiserfs_warning(sb, "sh-462",
"unable to initialize jornal device");
goto free_and_return;
}
+ reiserfs_write_lock(sb);
rs = SB_DISK_SUPER_BLOCK(sb);
@@ -2881,8 +2933,11 @@ int journal_init(struct super_block *sb, const char *j_dev_name,
}
reiserfs_mounted_fs_count++;
- if (reiserfs_mounted_fs_count <= 1)
+ if (reiserfs_mounted_fs_count <= 1) {
+ reiserfs_write_unlock(sb);
commit_wq = create_workqueue("reiserfs");
+ reiserfs_write_lock(sb);
+ }
INIT_DELAYED_WORK(&journal->j_work, flush_async_commits);
journal->j_work_sb = sb;
@@ -2964,8 +3019,11 @@ static void queue_log_writer(struct super_block *s)
init_waitqueue_entry(&wait, current);
add_wait_queue(&journal->j_join_wait, &wait);
set_current_state(TASK_UNINTERRUPTIBLE);
- if (test_bit(J_WRITERS_QUEUED, &journal->j_state))
+ if (test_bit(J_WRITERS_QUEUED, &journal->j_state)) {
+ reiserfs_write_unlock(s);
schedule();
+ reiserfs_write_lock(s);
+ }
__set_current_state(TASK_RUNNING);
remove_wait_queue(&journal->j_join_wait, &wait);
}
@@ -2982,7 +3040,9 @@ static void let_transaction_grow(struct super_block *sb, unsigned int trans_id)
struct reiserfs_journal *journal = SB_JOURNAL(sb);
unsigned long bcount = journal->j_bcount;
while (1) {
+ reiserfs_write_unlock(sb);
schedule_timeout_uninterruptible(1);
+ reiserfs_write_lock(sb);
journal->j_current_jl->j_state |= LIST_COMMIT_PENDING;
while ((atomic_read(&journal->j_wcount) > 0 ||
atomic_read(&journal->j_jlock)) &&
@@ -3033,7 +3093,9 @@ static int do_journal_begin_r(struct reiserfs_transaction_handle *th,
if (test_bit(J_WRITERS_BLOCKED, &journal->j_state)) {
unlock_journal(sb);
+ reiserfs_write_unlock(sb);
reiserfs_wait_on_write_block(sb);
+ reiserfs_write_lock(sb);
PROC_INFO_INC(sb, journal.journal_relock_writers);
goto relock;
}
@@ -3506,14 +3568,14 @@ static void flush_async_commits(struct work_struct *work)
struct reiserfs_journal_list *jl;
struct list_head *entry;
- lock_kernel();
+ reiserfs_write_lock(sb);
if (!list_empty(&journal->j_journal_list)) {
/* last entry is the youngest, commit it and you get everything */
entry = journal->j_journal_list.prev;
jl = JOURNAL_LIST_ENTRY(entry);
flush_commit_list(sb, jl, 1);
}
- unlock_kernel();
+ reiserfs_write_unlock(sb);
}
/*
@@ -4041,7 +4103,7 @@ static int do_journal_end(struct reiserfs_transaction_handle *th,
* the new transaction is fully setup, and we've already flushed the
* ordered bh list
*/
- mutex_lock(&jl->j_commit_mutex);
+ reiserfs_mutex_lock_safe(&jl->j_commit_mutex, sb);
/* save the transaction id in case we need to commit it later */
commit_trans_id = jl->j_trans_id;
@@ -4156,7 +4218,9 @@ static int do_journal_end(struct reiserfs_transaction_handle *th,
next = cn->next;
free_cnode(sb, cn);
cn = next;
+ reiserfs_write_unlock(sb);
cond_resched();
+ reiserfs_write_lock(sb);
}
/* we are done with both the c_bh and d_bh, but
@@ -4203,10 +4267,10 @@ static int do_journal_end(struct reiserfs_transaction_handle *th,
* is lost.
*/
if (!list_empty(&jl->j_tail_bh_list)) {
- unlock_kernel();
+ reiserfs_write_unlock(sb);
write_ordered_buffers(&journal->j_dirty_buffers_lock,
journal, jl, &jl->j_tail_bh_list);
- lock_kernel();
+ reiserfs_write_lock(sb);
}
BUG_ON(!list_empty(&jl->j_tail_bh_list));
mutex_unlock(&jl->j_commit_mutex);
diff --git a/fs/reiserfs/lock.c b/fs/reiserfs/lock.c
new file mode 100644
index 00000000000..ee2cfc0fd8a
--- /dev/null
+++ b/fs/reiserfs/lock.c
@@ -0,0 +1,88 @@
+#include <linux/reiserfs_fs.h>
+#include <linux/mutex.h>
+
+/*
+ * The previous reiserfs locking scheme was heavily based on
+ * the tricky properties of the Bkl:
+ *
+ * - it was acquired recursively by a same task
+ * - the performances relied on the release-while-schedule() property
+ *
+ * Now that we replace it by a mutex, we still want to keep the same
+ * recursive property to avoid big changes in the code structure.
+ * We use our own lock_owner here because the owner field on a mutex
+ * is only available in SMP or mutex debugging, also we only need this field
+ * for this mutex, no need for a system wide mutex facility.
+ *
+ * Also this lock is often released before a call that could block because
+ * reiserfs performances were partialy based on the release while schedule()
+ * property of the Bkl.
+ */
+void reiserfs_write_lock(struct super_block *s)
+{
+ struct reiserfs_sb_info *sb_i = REISERFS_SB(s);
+
+ if (sb_i->lock_owner != current) {
+ mutex_lock(&sb_i->lock);
+ sb_i->lock_owner = current;
+ }
+
+ /* No need to protect it, only the current task touches it */
+ sb_i->lock_depth++;
+}
+
+void reiserfs_write_unlock(struct super_block *s)
+{
+ struct reiserfs_sb_info *sb_i = REISERFS_SB(s);
+
+ /*
+ * Are we unlocking without even holding the lock?
+ * Such a situation must raise a BUG() if we don't want
+ * to corrupt the data.
+ */
+ BUG_ON(sb_i->lock_owner != current);
+
+ if (--sb_i->lock_depth == -1) {
+ sb_i->lock_owner = NULL;
+ mutex_unlock(&sb_i->lock);
+ }
+}
+
+/*
+ * If we already own the lock, just exit and don't increase the depth.
+ * Useful when we don't want to lock more than once.
+ *
+ * We always return the lock_depth we had before calling
+ * this function.
+ */
+int reiserfs_write_lock_once(struct super_block *s)
+{
+ struct reiserfs_sb_info *sb_i = REISERFS_SB(s);
+
+ if (sb_i->lock_owner != current) {
+ mutex_lock(&sb_i->lock);
+ sb_i->lock_owner = current;
+ return sb_i->lock_depth++;
+ }
+
+ return sb_i->lock_depth;
+}
+
+void reiserfs_write_unlock_once(struct super_block *s, int lock_depth)
+{
+ if (lock_depth == -1)
+ reiserfs_write_unlock(s);
+}
+
+/*
+ * Utility function to force a BUG if it is called without the superblock
+ * write lock held. caller is the string printed just before calling BUG()
+ */
+void reiserfs_check_lock_depth(struct super_block *sb, char *caller)
+{
+ struct reiserfs_sb_info *sb_i = REISERFS_SB(sb);
+
+ if (sb_i->lock_depth < 0)
+ reiserfs_panic(sb, "%s called without kernel lock held %d",
+ caller);
+}
diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
index 27157912863..e296ff72a6c 100644
--- a/fs/reiserfs/namei.c
+++ b/fs/reiserfs/namei.c
@@ -324,6 +324,7 @@ static struct dentry *reiserfs_lookup(struct inode *dir, struct dentry *dentry,
struct nameidata *nd)
{
int retval;
+ int lock_depth;
struct inode *inode = NULL;
struct reiserfs_dir_entry de;
INITIALIZE_PATH(path_to_entry);
@@ -331,7 +332,13 @@ static struct dentry *reiserfs_lookup(struct inode *dir, struct dentry *dentry,
if (REISERFS_MAX_NAME(dir->i_sb->s_blocksize) < dentry->d_name.len)
return ERR_PTR(-ENAMETOOLONG);
- reiserfs_write_lock(dir->i_sb);
+ /*
+ * Might be called with or without the write lock, must be careful
+ * to not recursively hold it in case we want to release the lock
+ * before rescheduling.
+ */
+ lock_depth = reiserfs_write_lock_once(dir->i_sb);
+
de.de_gen_number_bit_string = NULL;
retval =
reiserfs_find_entry(dir, dentry->d_name.name, dentry->d_name.len,
@@ -341,7 +348,7 @@ static struct dentry *reiserfs_lookup(struct inode *dir, struct dentry *dentry,
inode = reiserfs_iget(dir->i_sb,
(struct cpu_key *)&(de.de_dir_id));
if (!inode || IS_ERR(inode)) {
- reiserfs_write_unlock(dir->i_sb);
+ reiserfs_write_unlock_once(dir->i_sb, lock_depth);
return ERR_PTR(-EACCES);
}
@@ -350,7 +357,7 @@ static struct dentry *reiserfs_lookup(struct inode *dir, struct dentry *dentry,
if (IS_PRIVATE(dir))
inode->i_flags |= S_PRIVATE;
}
- reiserfs_write_unlock(dir->i_sb);
+ reiserfs_write_unlock_once(dir->i_sb, lock_depth);
if (retval == IO_ERROR) {
return ERR_PTR(-EIO);
}
@@ -725,6 +732,7 @@ static int reiserfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
struct inode *inode;
struct reiserfs_transaction_handle th;
struct reiserfs_security_handle security;
+ int lock_depth;
/* We need blocks for transaction + (user+group)*(quotas for new inode + update of quota for directory owner) */
int jbegin_count =
JOURNAL_PER_BALANCE_CNT * 3 +
@@ -748,7 +756,7 @@ static int reiserfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
return retval;
}
jbegin_count += retval;
- reiserfs_write_lock(dir->i_sb);
+ lock_depth = reiserfs_write_lock_once(dir->i_sb);
retval = journal_begin(&th, dir->i_sb, jbegin_count);
if (retval) {
@@ -798,8 +806,8 @@ static int reiserfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
d_instantiate(dentry, inode);
unlock_new_inode(inode);
retval = journal_end(&th, dir->i_sb, jbegin_count);
- out_failed:
- reiserfs_write_unlock(dir->i_sb);
+out_failed:
+ reiserfs_write_unlock_once(dir->i_sb, lock_depth);
return retval;
}
diff --git a/fs/reiserfs/prints.c b/fs/reiserfs/prints.c
index 536eacaeb71..adbc6f53851 100644
--- a/fs/reiserfs/prints.c
+++ b/fs/reiserfs/prints.c
@@ -349,10 +349,6 @@ void reiserfs_debug(struct super_block *s, int level, const char *fmt, ...)
. */
-#ifdef CONFIG_REISERFS_CHECK
-extern struct tree_balance *cur_tb;
-#endif
-
void __reiserfs_panic(struct super_block *sb, const char *id,
const char *function, const char *fmt, ...)
{
diff --git a/fs/reiserfs/resize.c b/fs/reiserfs/resize.c
index 18b315d3d10..b3a94d20f0f 100644
--- a/fs/reiserfs/resize.c
+++ b/fs/reiserfs/resize.c
@@ -141,7 +141,9 @@ int reiserfs_resize(struct super_block *s, unsigned long block_count_new)
set_buffer_uptodate(bh);
mark_buffer_dirty(bh);
+ reiserfs_write_unlock(s);
sync_dirty_buffer(bh);
+ reiserfs_write_lock(s);
// update bitmap_info stuff
bitmap[i].free_count = sb_blocksize(sb) * 8 - 1;
brelse(bh);
diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
index d036ee5b1c8..5fa7118f04e 100644
--- a/fs/reiserfs/stree.c
+++ b/fs/reiserfs/stree.c
@@ -222,9 +222,6 @@ static inline int bin_search(const void *key, /* Key to search for. */
return ITEM_NOT_FOUND;
}
-#ifdef CONFIG_REISERFS_CHECK
-extern struct tree_balance *cur_tb;
-#endif
/* Minimal possible key. It is never in the tree. */
const struct reiserfs_key MIN_KEY = { 0, 0, {{0, 0},} };
@@ -519,25 +516,48 @@ static int is_tree_node(struct buffer_head *bh, int level)
#define SEARCH_BY_KEY_READA 16
-/* The function is NOT SCHEDULE-SAFE! */
-static void search_by_key_reada(struct super_block *s,
+/*
+ * The function is NOT SCHEDULE-SAFE!
+ * It might unlock the write lock if we needed to wait for a block
+ * to be read. Note that in this case it won't recover the lock to avoid
+ * high contention resulting from too much lock requests, especially
+ * the caller (search_by_key) will perform other schedule-unsafe
+ * operations just after calling this function.
+ *
+ * @return true if we have unlocked
+ */
+static bool search_by_key_reada(struct super_block *s,
struct buffer_head **bh,
b_blocknr_t *b, int num)
{
int i, j;
+ bool unlocked = false;
for (i = 0; i < num; i++) {
bh[i] = sb_getblk(s, b[i]);
}
+ /*
+ * We are going to read some blocks on which we
+ * have a reference. It's safe, though we might be
+ * reading blocks concurrently changed if we release
+ * the lock. But it's still fine because we check later
+ * if the tree changed
+ */
for (j = 0; j < i; j++) {
/*
* note, this needs attention if we are getting rid of the BKL
* you have to make sure the prepared bit isn't set on this buffer
*/
- if (!buffer_uptodate(bh[j]))
+ if (!buffer_uptodate(bh[j])) {
+ if (!unlocked) {
+ reiserfs_write_unlock(s);
+ unlocked = true;
+ }
ll_rw_block(READA, 1, bh + j);
+ }
brelse(bh[j]);
}
+ return unlocked;
}
/**************************************************************************
@@ -625,11 +645,26 @@ int search_by_key(struct super_block *sb, const struct cpu_key *key, /* Key to s
have a pointer to it. */
if ((bh = last_element->pe_buffer =
sb_getblk(sb, block_number))) {
+ bool unlocked = false;
+
if (!buffer_uptodate(bh) && reada_count > 1)
- search_by_key_reada(sb, reada_bh,
+ /* may unlock the write lock */
+ unlocked = search_by_key_reada(sb, reada_bh,
reada_blocks, reada_count);
+ /*
+ * If we haven't already unlocked the write lock,
+ * then we need to do that here before reading
+ * the current block
+ */
+ if (!buffer_uptodate(bh) && !unlocked) {
+ reiserfs_write_unlock(sb);
+ unlocked = true;
+ }
ll_rw_block(READ, 1, &bh);
wait_on_buffer(bh);
+
+ if (unlocked)
+ reiserfs_write_lock(sb);
if (!buffer_uptodate(bh))
goto io_error;
} else {
@@ -673,7 +708,7 @@ int search_by_key(struct super_block *sb, const struct cpu_key *key, /* Key to s
!key_in_buffer(search_path, key, sb),
"PAP-5130: key is not in the buffer");
#ifdef CONFIG_REISERFS_CHECK
- if (cur_tb) {
+ if (REISERFS_SB(sb)->cur_tb) {
print_cur_tb("5140");
reiserfs_panic(sb, "PAP-5140",
"schedule occurred in do_balance!");
@@ -1024,7 +1059,9 @@ static char prepare_for_delete_or_cut(struct reiserfs_transaction_handle *th, st
reiserfs_free_block(th, inode, block, 1);
}
+ reiserfs_write_unlock(sb);
cond_resched();
+ reiserfs_write_lock(sb);
if (item_moved (&s_ih, path)) {
need_re_search = 1;
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index f0ad05f3802..339b0baf2af 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -465,7 +465,7 @@ static void reiserfs_put_super(struct super_block *s)
struct reiserfs_transaction_handle th;
th.t_trans_id = 0;
- lock_kernel();
+ reiserfs_write_lock(s);
if (s->s_dirt)
reiserfs_write_super(s);
@@ -499,10 +499,10 @@ static void reiserfs_put_super(struct super_block *s)
reiserfs_proc_info_done(s);
+ reiserfs_write_unlock(s);
+ mutex_destroy(&REISERFS_SB(s)->lock);
kfree(s->s_fs_info);
s->s_fs_info = NULL;
-
- unlock_kernel();
}
static struct kmem_cache *reiserfs_inode_cachep;
@@ -554,25 +554,28 @@ static void reiserfs_dirty_inode(struct inode *inode)
struct reiserfs_transaction_handle th;
int err = 0;
+ int lock_depth;
+
if (inode->i_sb->s_flags & MS_RDONLY) {
reiserfs_warning(inode->i_sb, "clm-6006",
"writing inode %lu on readonly FS",
inode->i_ino);
return;
}
- reiserfs_write_lock(inode->i_sb);
+ lock_depth = reiserfs_write_lock_once(inode->i_sb);
/* this is really only used for atime updates, so they don't have
** to be included in O_SYNC or fsync
*/
err = journal_begin(&th, inode->i_sb, 1);
- if (err) {
- reiserfs_write_unlock(inode->i_sb);
- return;
- }
+ if (err)
+ goto out;
+
reiserfs_update_sd(&th, inode);
journal_end(&th, inode->i_sb, 1);
- reiserfs_write_unlock(inode->i_sb);
+
+out:
+ reiserfs_write_unlock_once(inode->i_sb, lock_depth);
}
#ifdef CONFIG_QUOTA
@@ -1168,11 +1171,14 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
unsigned int qfmt = 0;
#ifdef CONFIG_QUOTA
int i;
+#endif
+
+ reiserfs_write_lock(s);
+#ifdef CONFIG_QUOTA
memcpy(qf_names, REISERFS_SB(s)->s_qf_names, sizeof(qf_names));
#endif
- lock_kernel();
rs = SB_DISK_SUPER_BLOCK(s);
if (!reiserfs_parse_options
@@ -1295,12 +1301,12 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
out_ok:
replace_mount_options(s, new_opts);
- unlock_kernel();
+ reiserfs_write_unlock(s);
return 0;
out_err:
kfree(new_opts);
- unlock_kernel();
+ reiserfs_write_unlock(s);
return err;
}
@@ -1404,7 +1410,9 @@ static int read_super_block(struct super_block *s, int offset)
static int reread_meta_blocks(struct super_block *s)
{
ll_rw_block(READ, 1, &(SB_BUFFER_WITH_SB(s)));
+ reiserfs_write_unlock(s);
wait_on_buffer(SB_BUFFER_WITH_SB(s));
+ reiserfs_write_lock(s);
if (!buffer_uptodate(SB_BUFFER_WITH_SB(s))) {
reiserfs_warning(s, "reiserfs-2504", "error reading the super");
return 1;
@@ -1613,7 +1621,7 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
sbi = kzalloc(sizeof(struct reiserfs_sb_info), GFP_KERNEL);
if (!sbi) {
errval = -ENOMEM;
- goto error;
+ goto error_alloc;
}
s->s_fs_info = sbi;
/* Set default values for options: non-aggressive tails, RO on errors */
@@ -1627,6 +1635,20 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
/* setup default block allocator options */
reiserfs_init_alloc_options(s);
+ mutex_init(&REISERFS_SB(s)->lock);
+ REISERFS_SB(s)->lock_depth = -1;
+
+ /*
+ * This function is called with the bkl, which also was the old
+ * locking used here.
+ * do_journal_begin() will soon check if we hold the lock (ie: was the
+ * bkl). This is likely because do_journal_begin() has several another
+ * callers because at this time, it doesn't seem to be necessary to
+ * protect against anything.
+ * Anyway, let's be conservative and lock for now.
+ */
+ reiserfs_write_lock(s);
+
jdev_name = NULL;
if (reiserfs_parse_options
(s, (char *)data, &(sbi->s_mount_opt), &blocks, &jdev_name,
@@ -1852,9 +1874,13 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
init_waitqueue_head(&(sbi->s_wait));
spin_lock_init(&sbi->bitmap_lock);
+ reiserfs_write_unlock(s);
+
return (0);
error:
+ reiserfs_write_unlock(s);
+error_alloc:
if (jinit_done) { /* kill the commit thread, free journal ram */
journal_release_error(NULL, s);
}
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index 6925b835a43..58aa8e75f7f 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -975,7 +975,7 @@ int reiserfs_lookup_privroot(struct super_block *s)
int err = 0;
/* If we don't have the privroot located yet - go find it */
- mutex_lock(&s->s_root->d_inode->i_mutex);
+ reiserfs_mutex_lock_safe(&s->s_root->d_inode->i_mutex, s);
dentry = lookup_one_len(PRIVROOT_NAME, s->s_root,
strlen(PRIVROOT_NAME));
if (!IS_ERR(dentry)) {
@@ -1004,14 +1004,14 @@ int reiserfs_xattr_init(struct super_block *s, int mount_flags)
goto error;
if (!privroot->d_inode && !(mount_flags & MS_RDONLY)) {
- mutex_lock(&s->s_root->d_inode->i_mutex);
+ reiserfs_mutex_lock_safe(&s->s_root->d_inode->i_mutex, s);
err = create_privroot(REISERFS_SB(s)->priv_root);
mutex_unlock(&s->s_root->d_inode->i_mutex);
}
if (privroot->d_inode) {
s->s_xattr = reiserfs_xattr_handlers;
- mutex_lock(&privroot->d_inode->i_mutex);
+ reiserfs_mutex_lock_safe(&privroot->d_inode->i_mutex, s);
if (!REISERFS_SB(s)->xattr_root) {
struct dentry *dentry;
dentry = lookup_one_len(XAROOT_NAME, privroot,
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index 1ffb53f74d3..fc0d575c71e 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -106,7 +106,6 @@ struct blkcipher_walk {
extern const struct crypto_type crypto_ablkcipher_type;
extern const struct crypto_type crypto_aead_type;
extern const struct crypto_type crypto_blkcipher_type;
-extern const struct crypto_type crypto_hash_type;
void crypto_mod_put(struct crypto_alg *alg);
diff --git a/include/crypto/cryptd.h b/include/crypto/cryptd.h
index 2f65a6e8ea4..1c96b255017 100644
--- a/include/crypto/cryptd.h
+++ b/include/crypto/cryptd.h
@@ -39,6 +39,7 @@ static inline struct cryptd_ahash *__cryptd_ahash_cast(
struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
u32 type, u32 mask);
struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm);
+struct shash_desc *cryptd_shash_desc(struct ahash_request *req);
void cryptd_free_ahash(struct cryptd_ahash *tfm);
#endif
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
index 3a1dbba4d3a..0cf725bdd2a 100644
--- a/include/linux/clockchips.h
+++ b/include/linux/clockchips.h
@@ -77,10 +77,10 @@ enum clock_event_nofitiers {
struct clock_event_device {
const char *name;
unsigned int features;
- unsigned long max_delta_ns;
- unsigned long min_delta_ns;
- unsigned long mult;
- int shift;
+ u64 max_delta_ns;
+ u64 min_delta_ns;
+ u32 mult;
+ u32 shift;
int rating;
int irq;
const struct cpumask *cpumask;
@@ -116,8 +116,8 @@ static inline unsigned long div_sc(unsigned long ticks, unsigned long nsec,
}
/* Clock event layer functions */
-extern unsigned long clockevent_delta2ns(unsigned long latch,
- struct clock_event_device *evt);
+extern u64 clockevent_delta2ns(unsigned long latch,
+ struct clock_event_device *evt);
extern void clockevents_register_device(struct clock_event_device *dev);
extern void clockevents_exchange_device(struct clock_event_device *old,
@@ -130,6 +130,13 @@ extern int clockevents_program_event(struct clock_event_device *dev,
extern void clockevents_handle_noop(struct clock_event_device *dev);
+static inline void
+clockevents_calc_mult_shift(struct clock_event_device *ce, u32 freq, u32 minsec)
+{
+ return clocks_calc_mult_shift(&ce->mult, &ce->shift, NSEC_PER_SEC,
+ freq, minsec);
+}
+
#ifdef CONFIG_GENERIC_CLOCKEVENTS
extern void clockevents_notify(unsigned long reason, void *arg);
#else
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 83d2fbd81b9..8a4a130cc19 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -151,6 +151,7 @@ extern u64 timecounter_cyc2time(struct timecounter *tc,
* subtraction of non 64 bit counters
* @mult: cycle to nanosecond multiplier
* @shift: cycle to nanosecond divisor (power of two)
+ * @max_idle_ns: max idle time permitted by the clocksource (nsecs)
* @flags: flags describing special properties
* @vread: vsyscall based read
* @resume: resume function for the clocksource, if necessary
@@ -168,6 +169,7 @@ struct clocksource {
cycle_t mask;
u32 mult;
u32 shift;
+ u64 max_idle_ns;
unsigned long flags;
cycle_t (*vread)(void);
void (*resume)(void);
@@ -279,11 +281,23 @@ extern void clocksource_resume(void);
extern struct clocksource * __init __weak clocksource_default_clock(void);
extern void clocksource_mark_unstable(struct clocksource *cs);
+extern void
+clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 minsec);
+
+static inline void
+clocksource_calc_mult_shift(struct clocksource *cs, u32 freq, u32 minsec)
+{
+ return clocks_calc_mult_shift(&cs->mult, &cs->shift, freq,
+ NSEC_PER_SEC, minsec);
+}
+
#ifdef CONFIG_GENERIC_TIME_VSYSCALL
-extern void update_vsyscall(struct timespec *ts, struct clocksource *c);
+extern void
+update_vsyscall(struct timespec *ts, struct clocksource *c, u32 mult);
extern void update_vsyscall_tz(void);
#else
-static inline void update_vsyscall(struct timespec *ts, struct clocksource *c)
+static inline void
+update_vsyscall(struct timespec *ts, struct clocksource *c, u32 mult)
{
}
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index fd929889e8d..24d2e30f1b4 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -250,29 +250,6 @@ struct cipher_alg {
void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
};
-struct digest_alg {
- unsigned int dia_digestsize;
- void (*dia_init)(struct crypto_tfm *tfm);
- void (*dia_update)(struct crypto_tfm *tfm, const u8 *data,
- unsigned int len);
- void (*dia_final)(struct crypto_tfm *tfm, u8 *out);
- int (*dia_setkey)(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keylen);
-};
-
-struct hash_alg {
- int (*init)(struct hash_desc *desc);
- int (*update)(struct hash_desc *desc, struct scatterlist *sg,
- unsigned int nbytes);
- int (*final)(struct hash_desc *desc, u8 *out);
- int (*digest)(struct hash_desc *desc, struct scatterlist *sg,
- unsigned int nbytes, u8 *out);
- int (*setkey)(struct crypto_hash *tfm, const u8 *key,
- unsigned int keylen);
-
- unsigned int digestsize;
-};
-
struct compress_alg {
int (*coa_compress)(struct crypto_tfm *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen);
@@ -293,8 +270,6 @@ struct rng_alg {
#define cra_aead cra_u.aead
#define cra_blkcipher cra_u.blkcipher
#define cra_cipher cra_u.cipher
-#define cra_digest cra_u.digest
-#define cra_hash cra_u.hash
#define cra_compress cra_u.compress
#define cra_rng cra_u.rng
@@ -320,8 +295,6 @@ struct crypto_alg {
struct aead_alg aead;
struct blkcipher_alg blkcipher;
struct cipher_alg cipher;
- struct digest_alg digest;
- struct hash_alg hash;
struct compress_alg compress;
struct rng_alg rng;
} cra_u;
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
index 5de4c9e5856..d7cecc90ed3 100644
--- a/include/linux/dmar.h
+++ b/include/linux/dmar.h
@@ -126,7 +126,9 @@ extern int free_irte(int irq);
extern int irq_remapped(int irq);
extern struct intel_iommu *map_dev_to_ir(struct pci_dev *dev);
extern struct intel_iommu *map_ioapic_to_ir(int apic);
+extern struct intel_iommu *map_hpet_to_ir(u8 id);
extern int set_ioapic_sid(struct irte *irte, int apic);
+extern int set_hpet_sid(struct irte *irte, u8 id);
extern int set_msi_sid(struct irte *irte, struct pci_dev *dev);
#else
static inline int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
@@ -158,10 +160,18 @@ static inline struct intel_iommu *map_ioapic_to_ir(int apic)
{
return NULL;
}
+static inline struct intel_iommu *map_hpet_to_ir(unsigned int hpet_id)
+{
+ return NULL;
+}
static inline int set_ioapic_sid(struct irte *irte, int apic)
{
return 0;
}
+static inline int set_hpet_sid(struct irte *irte, u8 id)
+{
+ return -1;
+}
static inline int set_msi_sid(struct irte *irte, struct pci_dev *dev)
{
return 0;
diff --git a/include/linux/hpet.h b/include/linux/hpet.h
index 79f63a27bce..219ca4f6bea 100644
--- a/include/linux/hpet.h
+++ b/include/linux/hpet.h
@@ -126,4 +126,6 @@ struct hpet_info {
#define HPET_DPI _IO('h', 0x05) /* disable periodic */
#define HPET_IRQFREQ _IOW('h', 0x6, unsigned long) /* IRQFREQ usec */
+#define MAX_HPET_TBS 8 /* maximum hpet timer blocks */
+
#endif /* !__HPET__ */
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index ff037f0b1b4..9bace4b9f4f 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -446,7 +446,7 @@ extern void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
static inline void timer_stats_account_hrtimer(struct hrtimer *timer)
{
- if (likely(!timer->start_site))
+ if (likely(!timer_stats_active))
return;
timer_stats_update_stats(timer, timer->start_pid, timer->start_site,
timer->function, timer->start_comm, 0);
@@ -457,8 +457,6 @@ extern void __timer_stats_hrtimer_set_start_info(struct hrtimer *timer,
static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer)
{
- if (likely(!timer_stats_active))
- return;
__timer_stats_hrtimer_set_start_info(timer, __builtin_return_address(0));
}
diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h
index 7244456e7e6..9bede7633f7 100644
--- a/include/linux/hw_random.h
+++ b/include/linux/hw_random.h
@@ -22,10 +22,12 @@
* @cleanup: Cleanup callback (can be NULL).
* @data_present: Callback to determine if data is available
* on the RNG. If NULL, it is assumed that
- * there is always data available.
+ * there is always data available. *OBSOLETE*
* @data_read: Read data from the RNG device.
* Returns the number of lower random bytes in "data".
- * Must not be NULL.
+ * Must not be NULL. *OSOLETE*
+ * @read: New API. drivers can fill up to max bytes of data
+ * into the buffer. The buffer is aligned for any type.
* @priv: Private data, for use by the RNG driver.
*/
struct hwrng {
@@ -34,6 +36,7 @@ struct hwrng {
void (*cleanup)(struct hwrng *rng);
int (*data_present)(struct hwrng *rng, int wait);
int (*data_read)(struct hwrng *rng, u32 *data);
+ int (*read)(struct hwrng *rng, void *data, size_t max, bool wait);
unsigned long priv;
/* internal. */
diff --git a/arch/sh/include/asm/sh_keysc.h b/include/linux/input/sh_keysc.h
index 4a65b1e40ea..c211b5cf08e 100644
--- a/arch/sh/include/asm/sh_keysc.h
+++ b/include/linux/input/sh_keysc.h
@@ -1,5 +1,5 @@
-#ifndef __ASM_KEYSC_H__
-#define __ASM_KEYSC_H__
+#ifndef __SH_KEYSC_H__
+#define __SH_KEYSC_H__
#define SH_KEYSC_MAXKEYS 30
@@ -11,4 +11,4 @@ struct sh_keysc_info {
int keycodes[SH_KEYSC_MAXKEYS];
};
-#endif /* __ASM_KEYSC_H__ */
+#endif /* __SH_KEYSC_H__ */
diff --git a/include/linux/mfd/sh_mobile_sdhi.h b/include/linux/mfd/sh_mobile_sdhi.h
new file mode 100644
index 00000000000..3bcd7163485
--- /dev/null
+++ b/include/linux/mfd/sh_mobile_sdhi.h
@@ -0,0 +1,8 @@
+#ifndef __SH_MOBILE_SDHI_H__
+#define __SH_MOBILE_SDHI_H__
+
+struct sh_mobile_sdhi_info {
+ void (*set_pwr)(struct platform_device *pdev, int state);
+};
+
+#endif /* __SH_MOBILE_SDHI_H__ */
diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
index dd31e7bae35..a05b4a20768 100644
--- a/include/linux/reiserfs_fs.h
+++ b/include/linux/reiserfs_fs.h
@@ -52,11 +52,63 @@
#define REISERFS_IOC32_GETVERSION FS_IOC32_GETVERSION
#define REISERFS_IOC32_SETVERSION FS_IOC32_SETVERSION
-/* Locking primitives */
-/* Right now we are still falling back to (un)lock_kernel, but eventually that
- would evolve into real per-fs locks */
-#define reiserfs_write_lock( sb ) lock_kernel()
-#define reiserfs_write_unlock( sb ) unlock_kernel()
+/*
+ * Locking primitives. The write lock is a per superblock
+ * special mutex that has properties close to the Big Kernel Lock
+ * which was used in the previous locking scheme.
+ */
+void reiserfs_write_lock(struct super_block *s);
+void reiserfs_write_unlock(struct super_block *s);
+int reiserfs_write_lock_once(struct super_block *s);
+void reiserfs_write_unlock_once(struct super_block *s, int lock_depth);
+
+/*
+ * Several mutexes depend on the write lock.
+ * However sometimes we want to relax the write lock while we hold
+ * these mutexes, according to the release/reacquire on schedule()
+ * properties of the Bkl that were used.
+ * Reiserfs performances and locking were based on this scheme.
+ * Now that the write lock is a mutex and not the bkl anymore, doing so
+ * may result in a deadlock:
+ *
+ * A acquire write_lock
+ * A acquire j_commit_mutex
+ * A release write_lock and wait for something
+ * B acquire write_lock
+ * B can't acquire j_commit_mutex and sleep
+ * A can't acquire write lock anymore
+ * deadlock
+ *
+ * What we do here is avoiding such deadlock by playing the same game
+ * than the Bkl: if we can't acquire a mutex that depends on the write lock,
+ * we release the write lock, wait a bit and then retry.
+ *
+ * The mutexes concerned by this hack are:
+ * - The commit mutex of a journal list
+ * - The flush mutex
+ * - The journal lock
+ * - The inode mutex
+ */
+static inline void reiserfs_mutex_lock_safe(struct mutex *m,
+ struct super_block *s)
+{
+ reiserfs_write_unlock(s);
+ mutex_lock(m);
+ reiserfs_write_lock(s);
+}
+
+/*
+ * When we schedule, we usually want to also release the write lock,
+ * according to the previous bkl based locking scheme of reiserfs.
+ */
+static inline void reiserfs_cond_resched(struct super_block *s)
+{
+ if (need_resched()) {
+ reiserfs_write_unlock(s);
+ schedule();
+ reiserfs_write_lock(s);
+ }
+}
struct fid;
@@ -1329,7 +1381,11 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
#define get_generation(s) atomic_read (&fs_generation(s))
#define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
#define __fs_changed(gen,s) (gen != get_generation (s))
-#define fs_changed(gen,s) ({cond_resched(); __fs_changed(gen, s);})
+#define fs_changed(gen,s) \
+({ \
+ reiserfs_cond_resched(s); \
+ __fs_changed(gen, s); \
+})
/***************************************************************************/
/* FIXATE NODES */
@@ -2258,8 +2314,7 @@ __u32 r5_hash(const signed char *msg, int len);
#define SPARE_SPACE 500
/* prototypes from ioctl.c */
-int reiserfs_ioctl(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
+long reiserfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
long reiserfs_compat_ioctl(struct file *filp,
unsigned int cmd, unsigned long arg);
int reiserfs_unpack(struct inode *inode, struct file *filp);
diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
index dab68bbed67..52c83b6a758 100644
--- a/include/linux/reiserfs_fs_sb.h
+++ b/include/linux/reiserfs_fs_sb.h
@@ -7,6 +7,8 @@
#ifdef __KERNEL__
#include <linux/workqueue.h>
#include <linux/rwsem.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
#endif
typedef enum {
@@ -355,6 +357,13 @@ struct reiserfs_sb_info {
struct reiserfs_journal *s_journal; /* pointer to journal information */
unsigned short s_mount_state; /* reiserfs state (valid, invalid) */
+ /* Serialize writers access, replace the old bkl */
+ struct mutex lock;
+ /* Owner of the lock (can be recursive) */
+ struct task_struct *lock_owner;
+ /* Depth of the lock, start from -1 like the bkl */
+ int lock_depth;
+
/* Comment? -Hans */
void (*end_io_handler) (struct buffer_head *, int);
hashf_t s_hash_function; /* pointer to function which is used
@@ -408,6 +417,17 @@ struct reiserfs_sb_info {
char *s_qf_names[MAXQUOTAS];
int s_jquota_fmt;
#endif
+#ifdef CONFIG_REISERFS_CHECK
+
+ struct tree_balance *cur_tb; /*
+ * Detects whether more than one
+ * copy of tb exists per superblock
+ * as a means of checking whether
+ * do_balance is executing concurrently
+ * against another tree reader/writer
+ * on a same mount point.
+ */
+#endif
};
/* Definitions of reiserfs on-disk properties: */
diff --git a/include/linux/sh_intc.h b/include/linux/sh_intc.h
index 68e212ff9dd..4ef246f1465 100644
--- a/include/linux/sh_intc.h
+++ b/include/linux/sh_intc.h
@@ -57,10 +57,8 @@ struct intc_desc {
struct intc_sense_reg *sense_regs;
unsigned int nr_sense_regs;
char *name;
-#if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A)
struct intc_mask_reg *ack_regs;
unsigned int nr_ack_regs;
-#endif
};
#define _INTC_ARRAY(a) a, sizeof(a)/sizeof(*a)
@@ -73,7 +71,6 @@ struct intc_desc symbol __initdata = { \
chipname, \
}
-#if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A)
#define DECLARE_INTC_DESC_ACK(symbol, chipname, vectors, groups, \
mask_regs, prio_regs, sense_regs, ack_regs) \
struct intc_desc symbol __initdata = { \
@@ -83,9 +80,11 @@ struct intc_desc symbol __initdata = { \
chipname, \
_INTC_ARRAY(ack_regs), \
}
-#endif
void __init register_intc_controller(struct intc_desc *desc);
int intc_set_priority(unsigned int irq, unsigned int prio);
+int reserve_irq_vector(unsigned int irq);
+void reserve_irq_legacy(void);
+
#endif /* __SH_INTC_H */
diff --git a/include/linux/sh_pfc.h b/include/linux/sh_pfc.h
new file mode 100644
index 00000000000..07c08af9f8f
--- /dev/null
+++ b/include/linux/sh_pfc.h
@@ -0,0 +1,96 @@
+/*
+ * SuperH Pin Function Controller Support
+ *
+ * Copyright (c) 2008 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#ifndef __SH_PFC_H
+#define __SH_PFC_H
+
+#include <asm-generic/gpio.h>
+
+typedef unsigned short pinmux_enum_t;
+typedef unsigned short pinmux_flag_t;
+
+#define PINMUX_TYPE_NONE 0
+#define PINMUX_TYPE_FUNCTION 1
+#define PINMUX_TYPE_GPIO 2
+#define PINMUX_TYPE_OUTPUT 3
+#define PINMUX_TYPE_INPUT 4
+#define PINMUX_TYPE_INPUT_PULLUP 5
+#define PINMUX_TYPE_INPUT_PULLDOWN 6
+
+#define PINMUX_FLAG_TYPE (0x7)
+#define PINMUX_FLAG_WANT_PULLUP (1 << 3)
+#define PINMUX_FLAG_WANT_PULLDOWN (1 << 4)
+
+#define PINMUX_FLAG_DBIT_SHIFT 5
+#define PINMUX_FLAG_DBIT (0x1f << PINMUX_FLAG_DBIT_SHIFT)
+#define PINMUX_FLAG_DREG_SHIFT 10
+#define PINMUX_FLAG_DREG (0x3f << PINMUX_FLAG_DREG_SHIFT)
+
+struct pinmux_gpio {
+ pinmux_enum_t enum_id;
+ pinmux_flag_t flags;
+};
+
+#define PINMUX_GPIO(gpio, data_or_mark) [gpio] = { data_or_mark }
+#define PINMUX_DATA(data_or_mark, ids...) data_or_mark, ids, 0
+
+struct pinmux_cfg_reg {
+ unsigned long reg, reg_width, field_width;
+ unsigned long *cnt;
+ pinmux_enum_t *enum_ids;
+};
+
+#define PINMUX_CFG_REG(name, r, r_width, f_width) \
+ .reg = r, .reg_width = r_width, .field_width = f_width, \
+ .cnt = (unsigned long [r_width / f_width]) {}, \
+ .enum_ids = (pinmux_enum_t [(r_width / f_width) * (1 << f_width)]) \
+
+struct pinmux_data_reg {
+ unsigned long reg, reg_width, reg_shadow;
+ pinmux_enum_t *enum_ids;
+};
+
+#define PINMUX_DATA_REG(name, r, r_width) \
+ .reg = r, .reg_width = r_width, \
+ .enum_ids = (pinmux_enum_t [r_width]) \
+
+struct pinmux_range {
+ pinmux_enum_t begin;
+ pinmux_enum_t end;
+ pinmux_enum_t force;
+};
+
+struct pinmux_info {
+ char *name;
+ pinmux_enum_t reserved_id;
+ struct pinmux_range data;
+ struct pinmux_range input;
+ struct pinmux_range input_pd;
+ struct pinmux_range input_pu;
+ struct pinmux_range output;
+ struct pinmux_range mark;
+ struct pinmux_range function;
+
+ unsigned first_gpio, last_gpio;
+
+ struct pinmux_gpio *gpios;
+ struct pinmux_cfg_reg *cfg_regs;
+ struct pinmux_data_reg *data_regs;
+
+ pinmux_enum_t *gpio_data;
+ unsigned int gpio_data_size;
+
+ unsigned long *gpio_in_use;
+ struct gpio_chip chip;
+};
+
+int register_pinmux(struct pinmux_info *pip);
+
+#endif /* __SH_PFC_H */
diff --git a/include/linux/tick.h b/include/linux/tick.h
index 0482229c07d..d2ae79e21be 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -43,6 +43,7 @@ enum tick_nohz_mode {
* @idle_exittime: Time when the idle state was left
* @idle_sleeptime: Sum of the time slept in idle with sched tick stopped
* @sleep_length: Duration of the current idle sleep
+ * @do_timer_lst: CPU was the last one doing do_timer before going idle
*/
struct tick_sched {
struct hrtimer sched_timer;
@@ -64,6 +65,7 @@ struct tick_sched {
unsigned long last_jiffies;
unsigned long next_jiffies;
ktime_t idle_expires;
+ int do_timer_last;
};
extern void __init tick_init(void);
@@ -98,6 +100,9 @@ extern int tick_check_oneshot_change(int allow_nohz);
extern struct tick_sched *tick_get_tick_sched(int cpu);
extern void tick_check_idle(int cpu);
extern int tick_oneshot_mode_active(void);
+# ifndef arch_needs_cpu
+# define arch_needs_cpu(cpu) (0)
+# endif
# else
static inline void tick_clock_notify(void) { }
static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
diff --git a/include/linux/time.h b/include/linux/time.h
index fe04e5ef6a5..6e026e45a17 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -148,6 +148,7 @@ extern void monotonic_to_bootbased(struct timespec *ts);
extern struct timespec timespec_trunc(struct timespec t, unsigned gran);
extern int timekeeping_valid_for_hres(void);
+extern u64 timekeeping_max_deferment(void);
extern void update_wall_time(void);
extern void update_xtime_cache(u64 nsec);
extern void timekeeping_leap_insert(int leapsecond);
diff --git a/include/linux/timex.h b/include/linux/timex.h
index e6967d10d9e..94f8faecdcb 100644
--- a/include/linux/timex.h
+++ b/include/linux/timex.h
@@ -115,13 +115,16 @@ struct timex {
#define ADJ_OFFSET_SS_READ 0xa001 /* read-only adjtime */
#endif
-/* xntp 3.4 compatibility names */
+/* NTP userland likes the MOD_ prefix better */
#define MOD_OFFSET ADJ_OFFSET
#define MOD_FREQUENCY ADJ_FREQUENCY
#define MOD_MAXERROR ADJ_MAXERROR
#define MOD_ESTERROR ADJ_ESTERROR
#define MOD_STATUS ADJ_STATUS
#define MOD_TIMECONST ADJ_TIMECONST
+#define MOD_TAI ADJ_TAI
+#define MOD_MICRO ADJ_MICRO
+#define MOD_NANO ADJ_NANO
/*
@@ -261,11 +264,7 @@ static inline int ntp_synced(void)
#define NTP_SCALE_SHIFT 32
-#ifdef CONFIG_NO_HZ
-#define NTP_INTERVAL_FREQ (2)
-#else
#define NTP_INTERVAL_FREQ (HZ)
-#endif
#define NTP_INTERVAL_LENGTH (NSEC_PER_SEC/NTP_INTERVAL_FREQ)
/* Returns how long ticks are at present, in ns / 2^NTP_SCALE_SHIFT. */
diff --git a/init/calibrate.c b/init/calibrate.c
index a379c906119..6eb48e53d61 100644
--- a/init/calibrate.c
+++ b/init/calibrate.c
@@ -123,23 +123,26 @@ void __cpuinit calibrate_delay(void)
{
unsigned long ticks, loopbit;
int lps_precision = LPS_PREC;
+ static bool printed;
if (preset_lpj) {
loops_per_jiffy = preset_lpj;
- printk(KERN_INFO
- "Calibrating delay loop (skipped) preset value.. ");
- } else if ((smp_processor_id() == 0) && lpj_fine) {
+ if (!printed)
+ pr_info("Calibrating delay loop (skipped) "
+ "preset value.. ");
+ } else if ((!printed) && lpj_fine) {
loops_per_jiffy = lpj_fine;
- printk(KERN_INFO
- "Calibrating delay loop (skipped), "
+ pr_info("Calibrating delay loop (skipped), "
"value calculated using timer frequency.. ");
} else if ((loops_per_jiffy = calibrate_delay_direct()) != 0) {
- printk(KERN_INFO
- "Calibrating delay using timer specific routine.. ");
+ if (!printed)
+ pr_info("Calibrating delay using timer "
+ "specific routine.. ");
} else {
loops_per_jiffy = (1<<12);
- printk(KERN_INFO "Calibrating delay loop... ");
+ if (!printed)
+ pr_info("Calibrating delay loop... ");
while ((loops_per_jiffy <<= 1) != 0) {
/* wait for "start of" clock tick */
ticks = jiffies;
@@ -170,7 +173,10 @@ void __cpuinit calibrate_delay(void)
loops_per_jiffy &= ~loopbit;
}
}
- printk(KERN_CONT "%lu.%02lu BogoMIPS (lpj=%lu)\n",
+ if (!printed)
+ pr_cont("%lu.%02lu BogoMIPS (lpj=%lu)\n",
loops_per_jiffy/(500000/HZ),
(loops_per_jiffy/(5000/HZ)) % 100, loops_per_jiffy);
+
+ printed = true;
}
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 6ba0f1ecb21..7c4e2713df0 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -392,10 +392,9 @@ int disable_nonboot_cpus(void)
if (cpu == first_cpu)
continue;
error = _cpu_down(cpu, 1);
- if (!error) {
+ if (!error)
cpumask_set_cpu(cpu, frozen_cpus);
- printk("CPU%d is down\n", cpu);
- } else {
+ else {
printk(KERN_ERR "Error taking CPU%d down: %d\n",
cpu, error);
break;
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 3e1c36e7998..ede52770812 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -1238,7 +1238,8 @@ hrtimer_interrupt_hanging(struct clock_event_device *dev,
force_clock_reprogram = 1;
dev->min_delta_ns = (unsigned long)try_time.tv64 * 3;
printk(KERN_WARNING "hrtimer: interrupt too slow, "
- "forcing clock min delta to %lu ns\n", dev->min_delta_ns);
+ "forcing clock min delta to %llu ns\n",
+ (unsigned long long) dev->min_delta_ns);
}
/*
* High resolution timer interrupt
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index bde4c667d24..7305b297d1e 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -1067,7 +1067,7 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler,
kfree(action);
#ifdef CONFIG_DEBUG_SHIRQ
- if (irqflags & IRQF_SHARED) {
+ if (!retval && (irqflags & IRQF_SHARED)) {
/*
* It's a shared IRQ -- the driver ought to be prepared for it
* to happen immediately, so let's make sure....
diff --git a/kernel/itimer.c b/kernel/itimer.c
index b03451ede52..d802883153d 100644
--- a/kernel/itimer.c
+++ b/kernel/itimer.c
@@ -146,6 +146,7 @@ static void set_cpu_itimer(struct task_struct *tsk, unsigned int clock_id,
{
cputime_t cval, nval, cinterval, ninterval;
s64 ns_ninterval, ns_nval;
+ u32 error, incr_error;
struct cpu_itimer *it = &tsk->signal->it[clock_id];
nval = timeval_to_cputime(&value->it_value);
@@ -153,8 +154,8 @@ static void set_cpu_itimer(struct task_struct *tsk, unsigned int clock_id,
ninterval = timeval_to_cputime(&value->it_interval);
ns_ninterval = timeval_to_ns(&value->it_interval);
- it->incr_error = cputime_sub_ns(ninterval, ns_ninterval);
- it->error = cputime_sub_ns(nval, ns_nval);
+ error = cputime_sub_ns(nval, ns_nval);
+ incr_error = cputime_sub_ns(ninterval, ns_ninterval);
spin_lock_irq(&tsk->sighand->siglock);
@@ -168,6 +169,8 @@ static void set_cpu_itimer(struct task_struct *tsk, unsigned int clock_id,
}
it->expires = nval;
it->incr = ninterval;
+ it->error = error;
+ it->incr_error = incr_error;
trace_itimer_state(clock_id == CPUCLOCK_VIRT ?
ITIMER_VIRTUAL : ITIMER_PROF, value, nval);
diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c
index dfdec524d1b..3db49b9ca37 100644
--- a/kernel/pm_qos_params.c
+++ b/kernel/pm_qos_params.c
@@ -29,7 +29,6 @@
#include <linux/pm_qos_params.h>
#include <linux/sched.h>
-#include <linux/smp_lock.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/time.h>
@@ -344,37 +343,33 @@ int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier)
}
EXPORT_SYMBOL_GPL(pm_qos_remove_notifier);
-#define PID_NAME_LEN sizeof("process_1234567890")
-static char name[PID_NAME_LEN];
+#define PID_NAME_LEN 32
static int pm_qos_power_open(struct inode *inode, struct file *filp)
{
int ret;
long pm_qos_class;
+ char name[PID_NAME_LEN];
- lock_kernel();
pm_qos_class = find_pm_qos_object_by_minor(iminor(inode));
if (pm_qos_class >= 0) {
filp->private_data = (void *)pm_qos_class;
- sprintf(name, "process_%d", current->pid);
+ snprintf(name, PID_NAME_LEN, "process_%d", current->pid);
ret = pm_qos_add_requirement(pm_qos_class, name,
PM_QOS_DEFAULT_VALUE);
- if (ret >= 0) {
- unlock_kernel();
+ if (ret >= 0)
return 0;
- }
}
- unlock_kernel();
-
return -EPERM;
}
static int pm_qos_power_release(struct inode *inode, struct file *filp)
{
int pm_qos_class;
+ char name[PID_NAME_LEN];
pm_qos_class = (long)filp->private_data;
- sprintf(name, "process_%d", current->pid);
+ snprintf(name, PID_NAME_LEN, "process_%d", current->pid);
pm_qos_remove_requirement(pm_qos_class, name);
return 0;
@@ -385,13 +380,14 @@ static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
{
s32 value;
int pm_qos_class;
+ char name[PID_NAME_LEN];
pm_qos_class = (long)filp->private_data;
if (count != sizeof(s32))
return -EINVAL;
if (copy_from_user(&value, buf, sizeof(s32)))
return -EFAULT;
- sprintf(name, "process_%d", current->pid);
+ snprintf(name, PID_NAME_LEN, "process_%d", current->pid);
pm_qos_update_requirement(pm_qos_class, name, value);
return sizeof(s32);
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index 5c9dc228747..438ff452351 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -384,7 +384,8 @@ int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp)
/*
* Validate the clockid_t for a new CPU-clock timer, and initialize the timer.
- * This is called from sys_timer_create with the new timer already locked.
+ * This is called from sys_timer_create() and do_cpu_nanosleep() with the
+ * new timer already all-zeros initialized.
*/
int posix_cpu_timer_create(struct k_itimer *new_timer)
{
@@ -396,8 +397,6 @@ int posix_cpu_timer_create(struct k_itimer *new_timer)
return -EINVAL;
INIT_LIST_HEAD(&new_timer->it.cpu.entry);
- new_timer->it.cpu.incr.sched = 0;
- new_timer->it.cpu.expires.sched = 0;
read_lock(&tasklist_lock);
if (CPUCLOCK_PERTHREAD(new_timer->it_clock)) {
diff --git a/kernel/sys.c b/kernel/sys.c
index 9968c5fb55b..585d6cd1004 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -8,7 +8,6 @@
#include <linux/mm.h>
#include <linux/utsname.h>
#include <linux/mman.h>
-#include <linux/smp_lock.h>
#include <linux/notifier.h>
#include <linux/reboot.h>
#include <linux/prctl.h>
@@ -349,6 +348,9 @@ void kernel_power_off(void)
machine_power_off();
}
EXPORT_SYMBOL_GPL(kernel_power_off);
+
+static DEFINE_MUTEX(reboot_mutex);
+
/*
* Reboot system call: for obvious reasons only root may call it,
* and even root needs to set up some magic numbers in the registers
@@ -381,7 +383,7 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd,
if ((cmd == LINUX_REBOOT_CMD_POWER_OFF) && !pm_power_off)
cmd = LINUX_REBOOT_CMD_HALT;
- lock_kernel();
+ mutex_lock(&reboot_mutex);
switch (cmd) {
case LINUX_REBOOT_CMD_RESTART:
kernel_restart(NULL);
@@ -397,20 +399,18 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd,
case LINUX_REBOOT_CMD_HALT:
kernel_halt();
- unlock_kernel();
do_exit(0);
panic("cannot halt");
case LINUX_REBOOT_CMD_POWER_OFF:
kernel_power_off();
- unlock_kernel();
do_exit(0);
break;
case LINUX_REBOOT_CMD_RESTART2:
if (strncpy_from_user(&buffer[0], arg, sizeof(buffer) - 1) < 0) {
- unlock_kernel();
- return -EFAULT;
+ ret = -EFAULT;
+ break;
}
buffer[sizeof(buffer) - 1] = '\0';
@@ -433,7 +433,7 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd,
ret = -EINVAL;
break;
}
- unlock_kernel();
+ mutex_unlock(&reboot_mutex);
return ret;
}
diff --git a/kernel/time.c b/kernel/time.c
index 804798005d1..c6324d96009 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -136,7 +136,6 @@ static inline void warp_clock(void)
write_seqlock_irq(&xtime_lock);
wall_to_monotonic.tv_sec -= sys_tz.tz_minuteswest * 60;
xtime.tv_sec += sys_tz.tz_minuteswest * 60;
- update_xtime_cache(0);
write_sequnlock_irq(&xtime_lock);
clock_was_set();
}
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 620b58abdc3..20a8920029e 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -20,6 +20,8 @@
#include <linux/sysdev.h>
#include <linux/tick.h>
+#include "tick-internal.h"
+
/* The registered clock event devices */
static LIST_HEAD(clockevent_devices);
static LIST_HEAD(clockevents_released);
@@ -37,10 +39,9 @@ static DEFINE_SPINLOCK(clockevents_lock);
*
* Math helper, returns latch value converted to nanoseconds (bound checked)
*/
-unsigned long clockevent_delta2ns(unsigned long latch,
- struct clock_event_device *evt)
+u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt)
{
- u64 clc = ((u64) latch << evt->shift);
+ u64 clc = (u64) latch << evt->shift;
if (unlikely(!evt->mult)) {
evt->mult = 1;
@@ -50,10 +51,10 @@ unsigned long clockevent_delta2ns(unsigned long latch,
do_div(clc, evt->mult);
if (clc < 1000)
clc = 1000;
- if (clc > LONG_MAX)
- clc = LONG_MAX;
+ if (clc > KTIME_MAX)
+ clc = KTIME_MAX;
- return (unsigned long) clc;
+ return clc;
}
EXPORT_SYMBOL_GPL(clockevent_delta2ns);
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 4a310906b3e..d422c7b2236 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -107,6 +107,59 @@ u64 timecounter_cyc2time(struct timecounter *tc,
}
EXPORT_SYMBOL_GPL(timecounter_cyc2time);
+/**
+ * clocks_calc_mult_shift - calculate mult/shift factors for scaled math of clocks
+ * @mult: pointer to mult variable
+ * @shift: pointer to shift variable
+ * @from: frequency to convert from
+ * @to: frequency to convert to
+ * @minsec: guaranteed runtime conversion range in seconds
+ *
+ * The function evaluates the shift/mult pair for the scaled math
+ * operations of clocksources and clockevents.
+ *
+ * @to and @from are frequency values in HZ. For clock sources @to is
+ * NSEC_PER_SEC == 1GHz and @from is the counter frequency. For clock
+ * event @to is the counter frequency and @from is NSEC_PER_SEC.
+ *
+ * The @minsec conversion range argument controls the time frame in
+ * seconds which must be covered by the runtime conversion with the
+ * calculated mult and shift factors. This guarantees that no 64bit
+ * overflow happens when the input value of the conversion is
+ * multiplied with the calculated mult factor. Larger ranges may
+ * reduce the conversion accuracy by chosing smaller mult and shift
+ * factors.
+ */
+void
+clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 minsec)
+{
+ u64 tmp;
+ u32 sft, sftacc= 32;
+
+ /*
+ * Calculate the shift factor which is limiting the conversion
+ * range:
+ */
+ tmp = ((u64)minsec * from) >> 32;
+ while (tmp) {
+ tmp >>=1;
+ sftacc--;
+ }
+
+ /*
+ * Find the conversion shift/mult pair which has the best
+ * accuracy and fits the maxsec conversion range:
+ */
+ for (sft = 32; sft > 0; sft--) {
+ tmp = (u64) to << sft;
+ do_div(tmp, from);
+ if ((tmp >> sftacc) == 0)
+ break;
+ }
+ *mult = tmp;
+ *shift = sft;
+}
+
/*[Clocksource internal variables]---------
* curr_clocksource:
* currently selected clocksource.
@@ -413,6 +466,47 @@ void clocksource_touch_watchdog(void)
clocksource_resume_watchdog();
}
+/**
+ * clocksource_max_deferment - Returns max time the clocksource can be deferred
+ * @cs: Pointer to clocksource
+ *
+ */
+static u64 clocksource_max_deferment(struct clocksource *cs)
+{
+ u64 max_nsecs, max_cycles;
+
+ /*
+ * Calculate the maximum number of cycles that we can pass to the
+ * cyc2ns function without overflowing a 64-bit signed result. The
+ * maximum number of cycles is equal to ULLONG_MAX/cs->mult which
+ * is equivalent to the below.
+ * max_cycles < (2^63)/cs->mult
+ * max_cycles < 2^(log2((2^63)/cs->mult))
+ * max_cycles < 2^(log2(2^63) - log2(cs->mult))
+ * max_cycles < 2^(63 - log2(cs->mult))
+ * max_cycles < 1 << (63 - log2(cs->mult))
+ * Please note that we add 1 to the result of the log2 to account for
+ * any rounding errors, ensure the above inequality is satisfied and
+ * no overflow will occur.
+ */
+ max_cycles = 1ULL << (63 - (ilog2(cs->mult) + 1));
+
+ /*
+ * The actual maximum number of cycles we can defer the clocksource is
+ * determined by the minimum of max_cycles and cs->mask.
+ */
+ max_cycles = min_t(u64, max_cycles, (u64) cs->mask);
+ max_nsecs = clocksource_cyc2ns(max_cycles, cs->mult, cs->shift);
+
+ /*
+ * To ensure that the clocksource does not wrap whilst we are idle,
+ * limit the time the clocksource can be deferred by 12.5%. Please
+ * note a margin of 12.5% is used because this can be computed with
+ * a shift, versus say 10% which would require division.
+ */
+ return max_nsecs - (max_nsecs >> 5);
+}
+
#ifdef CONFIG_GENERIC_TIME
/**
@@ -511,6 +605,9 @@ static void clocksource_enqueue(struct clocksource *cs)
*/
int clocksource_register(struct clocksource *cs)
{
+ /* calculate max idle time permitted for this clocksource */
+ cs->max_idle_ns = clocksource_max_deferment(cs);
+
mutex_lock(&clocksource_mutex);
clocksource_enqueue(cs);
clocksource_select();
diff --git a/kernel/time/tick-oneshot.c b/kernel/time/tick-oneshot.c
index a96c0e2b89c..0a8a213016f 100644
--- a/kernel/time/tick-oneshot.c
+++ b/kernel/time/tick-oneshot.c
@@ -50,9 +50,9 @@ int tick_dev_program_event(struct clock_event_device *dev, ktime_t expires,
dev->min_delta_ns += dev->min_delta_ns >> 1;
printk(KERN_WARNING
- "CE: %s increasing min_delta_ns to %lu nsec\n",
+ "CE: %s increasing min_delta_ns to %llu nsec\n",
dev->name ? dev->name : "?",
- dev->min_delta_ns << 1);
+ (unsigned long long) dev->min_delta_ns << 1);
i = 0;
}
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 89aed5933ed..f992762d7f5 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -134,18 +134,13 @@ __setup("nohz=", setup_tick_nohz);
* value. We do this unconditionally on any cpu, as we don't know whether the
* cpu, which has the update task assigned is in a long sleep.
*/
-static void tick_nohz_update_jiffies(void)
+static void tick_nohz_update_jiffies(ktime_t now)
{
int cpu = smp_processor_id();
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
unsigned long flags;
- ktime_t now;
-
- if (!ts->tick_stopped)
- return;
cpumask_clear_cpu(cpu, nohz_cpu_mask);
- now = ktime_get();
ts->idle_waketime = now;
local_irq_save(flags);
@@ -155,20 +150,17 @@ static void tick_nohz_update_jiffies(void)
touch_softlockup_watchdog();
}
-static void tick_nohz_stop_idle(int cpu)
+static void tick_nohz_stop_idle(int cpu, ktime_t now)
{
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
+ ktime_t delta;
- if (ts->idle_active) {
- ktime_t now, delta;
- now = ktime_get();
- delta = ktime_sub(now, ts->idle_entrytime);
- ts->idle_lastupdate = now;
- ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
- ts->idle_active = 0;
+ delta = ktime_sub(now, ts->idle_entrytime);
+ ts->idle_lastupdate = now;
+ ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
+ ts->idle_active = 0;
- sched_clock_idle_wakeup_event(0);
- }
+ sched_clock_idle_wakeup_event(0);
}
static ktime_t tick_nohz_start_idle(struct tick_sched *ts)
@@ -216,6 +208,7 @@ void tick_nohz_stop_sched_tick(int inidle)
struct tick_sched *ts;
ktime_t last_update, expires, now;
struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
+ u64 time_delta;
int cpu;
local_irq_save(flags);
@@ -263,7 +256,7 @@ void tick_nohz_stop_sched_tick(int inidle)
if (ratelimit < 10) {
printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
- local_softirq_pending());
+ (unsigned int) local_softirq_pending());
ratelimit++;
}
goto end;
@@ -275,14 +268,18 @@ void tick_nohz_stop_sched_tick(int inidle)
seq = read_seqbegin(&xtime_lock);
last_update = last_jiffies_update;
last_jiffies = jiffies;
+ time_delta = timekeeping_max_deferment();
} while (read_seqretry(&xtime_lock, seq));
- /* Get the next timer wheel timer */
- next_jiffies = get_next_timer_interrupt(last_jiffies);
- delta_jiffies = next_jiffies - last_jiffies;
-
- if (rcu_needs_cpu(cpu) || printk_needs_cpu(cpu))
+ if (rcu_needs_cpu(cpu) || printk_needs_cpu(cpu) ||
+ arch_needs_cpu(cpu)) {
+ next_jiffies = last_jiffies + 1;
delta_jiffies = 1;
+ } else {
+ /* Get the next timer wheel timer */
+ next_jiffies = get_next_timer_interrupt(last_jiffies);
+ delta_jiffies = next_jiffies - last_jiffies;
+ }
/*
* Do not stop the tick, if we are only one off
* or if the cpu is required for rcu
@@ -294,22 +291,51 @@ void tick_nohz_stop_sched_tick(int inidle)
if ((long)delta_jiffies >= 1) {
/*
- * calculate the expiry time for the next timer wheel
- * timer
- */
- expires = ktime_add_ns(last_update, tick_period.tv64 *
- delta_jiffies);
-
- /*
* If this cpu is the one which updates jiffies, then
* give up the assignment and let it be taken by the
* cpu which runs the tick timer next, which might be
* this cpu as well. If we don't drop this here the
* jiffies might be stale and do_timer() never
- * invoked.
+ * invoked. Keep track of the fact that it was the one
+ * which had the do_timer() duty last. If this cpu is
+ * the one which had the do_timer() duty last, we
+ * limit the sleep time to the timekeeping
+ * max_deferement value which we retrieved
+ * above. Otherwise we can sleep as long as we want.
*/
- if (cpu == tick_do_timer_cpu)
+ if (cpu == tick_do_timer_cpu) {
tick_do_timer_cpu = TICK_DO_TIMER_NONE;
+ ts->do_timer_last = 1;
+ } else if (tick_do_timer_cpu != TICK_DO_TIMER_NONE) {
+ time_delta = KTIME_MAX;
+ ts->do_timer_last = 0;
+ } else if (!ts->do_timer_last) {
+ time_delta = KTIME_MAX;
+ }
+
+ /*
+ * calculate the expiry time for the next timer wheel
+ * timer. delta_jiffies >= NEXT_TIMER_MAX_DELTA signals
+ * that there is no timer pending or at least extremely
+ * far into the future (12 days for HZ=1000). In this
+ * case we set the expiry to the end of time.
+ */
+ if (likely(delta_jiffies < NEXT_TIMER_MAX_DELTA)) {
+ /*
+ * Calculate the time delta for the next timer event.
+ * If the time delta exceeds the maximum time delta
+ * permitted by the current clocksource then adjust
+ * the time delta accordingly to ensure the
+ * clocksource does not wrap.
+ */
+ time_delta = min_t(u64, time_delta,
+ tick_period.tv64 * delta_jiffies);
+ }
+
+ if (time_delta < KTIME_MAX)
+ expires = ktime_add_ns(last_update, time_delta);
+ else
+ expires.tv64 = KTIME_MAX;
if (delta_jiffies > 1)
cpumask_set_cpu(cpu, nohz_cpu_mask);
@@ -342,22 +368,19 @@ void tick_nohz_stop_sched_tick(int inidle)
ts->idle_sleeps++;
+ /* Mark expires */
+ ts->idle_expires = expires;
+
/*
- * delta_jiffies >= NEXT_TIMER_MAX_DELTA signals that
- * there is no timer pending or at least extremly far
- * into the future (12 days for HZ=1000). In this case
- * we simply stop the tick timer:
+ * If the expiration time == KTIME_MAX, then
+ * in this case we simply stop the tick timer.
*/
- if (unlikely(delta_jiffies >= NEXT_TIMER_MAX_DELTA)) {
- ts->idle_expires.tv64 = KTIME_MAX;
+ if (unlikely(expires.tv64 == KTIME_MAX)) {
if (ts->nohz_mode == NOHZ_MODE_HIGHRES)
hrtimer_cancel(&ts->sched_timer);
goto out;
}
- /* Mark expiries */
- ts->idle_expires = expires;
-
if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
hrtimer_start(&ts->sched_timer, expires,
HRTIMER_MODE_ABS_PINNED);
@@ -436,7 +459,11 @@ void tick_nohz_restart_sched_tick(void)
ktime_t now;
local_irq_disable();
- tick_nohz_stop_idle(cpu);
+ if (ts->idle_active || (ts->inidle && ts->tick_stopped))
+ now = ktime_get();
+
+ if (ts->idle_active)
+ tick_nohz_stop_idle(cpu, now);
if (!ts->inidle || !ts->tick_stopped) {
ts->inidle = 0;
@@ -450,7 +477,6 @@ void tick_nohz_restart_sched_tick(void)
/* Update jiffies first */
select_nohz_load_balancer(0);
- now = ktime_get();
tick_do_update_jiffies64(now);
cpumask_clear_cpu(cpu, nohz_cpu_mask);
@@ -584,22 +610,18 @@ static void tick_nohz_switch_to_nohz(void)
* timer and do not touch the other magic bits which need to be done
* when idle is left.
*/
-static void tick_nohz_kick_tick(int cpu)
+static void tick_nohz_kick_tick(int cpu, ktime_t now)
{
#if 0
/* Switch back to 2.6.27 behaviour */
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
- ktime_t delta, now;
-
- if (!ts->tick_stopped)
- return;
+ ktime_t delta;
/*
* Do not touch the tick device, when the next expiry is either
* already reached or less/equal than the tick period.
*/
- now = ktime_get();
delta = ktime_sub(hrtimer_get_expires(&ts->sched_timer), now);
if (delta.tv64 <= tick_period.tv64)
return;
@@ -608,9 +630,26 @@ static void tick_nohz_kick_tick(int cpu)
#endif
}
+static inline void tick_check_nohz(int cpu)
+{
+ struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
+ ktime_t now;
+
+ if (!ts->idle_active && !ts->tick_stopped)
+ return;
+ now = ktime_get();
+ if (ts->idle_active)
+ tick_nohz_stop_idle(cpu, now);
+ if (ts->tick_stopped) {
+ tick_nohz_update_jiffies(now);
+ tick_nohz_kick_tick(cpu, now);
+ }
+}
+
#else
static inline void tick_nohz_switch_to_nohz(void) { }
+static inline void tick_check_nohz(int cpu) { }
#endif /* NO_HZ */
@@ -620,11 +659,7 @@ static inline void tick_nohz_switch_to_nohz(void) { }
void tick_check_idle(int cpu)
{
tick_check_oneshot_broadcast(cpu);
-#ifdef CONFIG_NO_HZ
- tick_nohz_stop_idle(cpu);
- tick_nohz_update_jiffies();
- tick_nohz_kick_tick(cpu);
-#endif
+ tick_check_nohz(cpu);
}
/*
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index c3a4e2907ea..af4135f0582 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -165,19 +165,12 @@ struct timespec raw_time;
/* flag for if timekeeping is suspended */
int __read_mostly timekeeping_suspended;
-static struct timespec xtime_cache __attribute__ ((aligned (16)));
-void update_xtime_cache(u64 nsec)
-{
- xtime_cache = xtime;
- timespec_add_ns(&xtime_cache, nsec);
-}
-
/* must hold xtime_lock */
void timekeeping_leap_insert(int leapsecond)
{
xtime.tv_sec += leapsecond;
wall_to_monotonic.tv_sec -= leapsecond;
- update_vsyscall(&xtime, timekeeper.clock);
+ update_vsyscall(&xtime, timekeeper.clock, timekeeper.mult);
}
#ifdef CONFIG_GENERIC_TIME
@@ -332,12 +325,10 @@ int do_settimeofday(struct timespec *tv)
xtime = *tv;
- update_xtime_cache(0);
-
timekeeper.ntp_error = 0;
ntp_clear();
- update_vsyscall(&xtime, timekeeper.clock);
+ update_vsyscall(&xtime, timekeeper.clock, timekeeper.mult);
write_sequnlock_irqrestore(&xtime_lock, flags);
@@ -488,6 +479,17 @@ int timekeeping_valid_for_hres(void)
}
/**
+ * timekeeping_max_deferment - Returns max time the clocksource can be deferred
+ *
+ * Caller must observe xtime_lock via read_seqbegin/read_seqretry to
+ * ensure that the clocksource does not change!
+ */
+u64 timekeeping_max_deferment(void)
+{
+ return timekeeper.clock->max_idle_ns;
+}
+
+/**
* read_persistent_clock - Return time from the persistent clock.
*
* Weak dummy function for arches that do not yet support it.
@@ -548,7 +550,6 @@ void __init timekeeping_init(void)
}
set_normalized_timespec(&wall_to_monotonic,
-boot.tv_sec, -boot.tv_nsec);
- update_xtime_cache(0);
total_sleep_time.tv_sec = 0;
total_sleep_time.tv_nsec = 0;
write_sequnlock_irqrestore(&xtime_lock, flags);
@@ -582,7 +583,6 @@ static int timekeeping_resume(struct sys_device *dev)
wall_to_monotonic = timespec_sub(wall_to_monotonic, ts);
total_sleep_time = timespec_add_safe(total_sleep_time, ts);
}
- update_xtime_cache(0);
/* re-base the last cycle value */
timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock);
timekeeper.ntp_error = 0;
@@ -723,6 +723,49 @@ static void timekeeping_adjust(s64 offset)
}
/**
+ * logarithmic_accumulation - shifted accumulation of cycles
+ *
+ * This functions accumulates a shifted interval of cycles into
+ * into a shifted interval nanoseconds. Allows for O(log) accumulation
+ * loop.
+ *
+ * Returns the unconsumed cycles.
+ */
+static cycle_t logarithmic_accumulation(cycle_t offset, int shift)
+{
+ u64 nsecps = (u64)NSEC_PER_SEC << timekeeper.shift;
+
+ /* If the offset is smaller then a shifted interval, do nothing */
+ if (offset < timekeeper.cycle_interval<<shift)
+ return offset;
+
+ /* Accumulate one shifted interval */
+ offset -= timekeeper.cycle_interval << shift;
+ timekeeper.clock->cycle_last += timekeeper.cycle_interval << shift;
+
+ timekeeper.xtime_nsec += timekeeper.xtime_interval << shift;
+ while (timekeeper.xtime_nsec >= nsecps) {
+ timekeeper.xtime_nsec -= nsecps;
+ xtime.tv_sec++;
+ second_overflow();
+ }
+
+ /* Accumulate into raw time */
+ raw_time.tv_nsec += timekeeper.raw_interval << shift;;
+ while (raw_time.tv_nsec >= NSEC_PER_SEC) {
+ raw_time.tv_nsec -= NSEC_PER_SEC;
+ raw_time.tv_sec++;
+ }
+
+ /* Accumulate error between NTP and clock interval */
+ timekeeper.ntp_error += tick_length << shift;
+ timekeeper.ntp_error -= timekeeper.xtime_interval <<
+ (timekeeper.ntp_error_shift + shift);
+
+ return offset;
+}
+
+/**
* update_wall_time - Uses the current clocksource to increment the wall time
*
* Called from the timer interrupt, must hold a write on xtime_lock.
@@ -731,7 +774,7 @@ void update_wall_time(void)
{
struct clocksource *clock;
cycle_t offset;
- u64 nsecs;
+ int shift = 0, maxshift;
/* Make sure we're fully resumed: */
if (unlikely(timekeeping_suspended))
@@ -745,33 +788,22 @@ void update_wall_time(void)
#endif
timekeeper.xtime_nsec = (s64)xtime.tv_nsec << timekeeper.shift;
- /* normally this loop will run just once, however in the
- * case of lost or late ticks, it will accumulate correctly.
+ /*
+ * With NO_HZ we may have to accumulate many cycle_intervals
+ * (think "ticks") worth of time at once. To do this efficiently,
+ * we calculate the largest doubling multiple of cycle_intervals
+ * that is smaller then the offset. We then accumulate that
+ * chunk in one go, and then try to consume the next smaller
+ * doubled multiple.
*/
+ shift = ilog2(offset) - ilog2(timekeeper.cycle_interval);
+ shift = max(0, shift);
+ /* Bound shift to one less then what overflows tick_length */
+ maxshift = (8*sizeof(tick_length) - (ilog2(tick_length)+1)) - 1;
+ shift = min(shift, maxshift);
while (offset >= timekeeper.cycle_interval) {
- u64 nsecps = (u64)NSEC_PER_SEC << timekeeper.shift;
-
- /* accumulate one interval */
- offset -= timekeeper.cycle_interval;
- clock->cycle_last += timekeeper.cycle_interval;
-
- timekeeper.xtime_nsec += timekeeper.xtime_interval;
- if (timekeeper.xtime_nsec >= nsecps) {
- timekeeper.xtime_nsec -= nsecps;
- xtime.tv_sec++;
- second_overflow();
- }
-
- raw_time.tv_nsec += timekeeper.raw_interval;
- if (raw_time.tv_nsec >= NSEC_PER_SEC) {
- raw_time.tv_nsec -= NSEC_PER_SEC;
- raw_time.tv_sec++;
- }
-
- /* accumulate error between NTP and clock interval */
- timekeeper.ntp_error += tick_length;
- timekeeper.ntp_error -= timekeeper.xtime_interval <<
- timekeeper.ntp_error_shift;
+ offset = logarithmic_accumulation(offset, shift);
+ shift--;
}
/* correct the clock when NTP error is too big */
@@ -807,11 +839,8 @@ void update_wall_time(void)
timekeeper.ntp_error += timekeeper.xtime_nsec <<
timekeeper.ntp_error_shift;
- nsecs = clocksource_cyc2ns(offset, timekeeper.mult, timekeeper.shift);
- update_xtime_cache(nsecs);
-
/* check to see if there is a new clocksource to use */
- update_vsyscall(&xtime, timekeeper.clock);
+ update_vsyscall(&xtime, timekeeper.clock, timekeeper.mult);
}
/**
@@ -846,13 +875,13 @@ void monotonic_to_bootbased(struct timespec *ts)
unsigned long get_seconds(void)
{
- return xtime_cache.tv_sec;
+ return xtime.tv_sec;
}
EXPORT_SYMBOL(get_seconds);
struct timespec __current_kernel_time(void)
{
- return xtime_cache;
+ return xtime;
}
struct timespec current_kernel_time(void)
@@ -862,8 +891,7 @@ struct timespec current_kernel_time(void)
do {
seq = read_seqbegin(&xtime_lock);
-
- now = xtime_cache;
+ now = xtime;
} while (read_seqretry(&xtime_lock, seq));
return now;
@@ -877,8 +905,7 @@ struct timespec get_monotonic_coarse(void)
do {
seq = read_seqbegin(&xtime_lock);
-
- now = xtime_cache;
+ now = xtime;
mono = wall_to_monotonic;
} while (read_seqretry(&xtime_lock, seq));
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
index 1b5b7aa2fdf..665c76edbf1 100644
--- a/kernel/time/timer_list.c
+++ b/kernel/time/timer_list.c
@@ -204,10 +204,12 @@ print_tickdevice(struct seq_file *m, struct tick_device *td, int cpu)
return;
}
SEQ_printf(m, "%s\n", dev->name);
- SEQ_printf(m, " max_delta_ns: %lu\n", dev->max_delta_ns);
- SEQ_printf(m, " min_delta_ns: %lu\n", dev->min_delta_ns);
- SEQ_printf(m, " mult: %lu\n", dev->mult);
- SEQ_printf(m, " shift: %d\n", dev->shift);
+ SEQ_printf(m, " max_delta_ns: %llu\n",
+ (unsigned long long) dev->max_delta_ns);
+ SEQ_printf(m, " min_delta_ns: %llu\n",
+ (unsigned long long) dev->min_delta_ns);
+ SEQ_printf(m, " mult: %u\n", dev->mult);
+ SEQ_printf(m, " shift: %u\n", dev->shift);
SEQ_printf(m, " mode: %d\n", dev->mode);
SEQ_printf(m, " next_event: %Ld nsecs\n",
(unsigned long long) ktime_to_ns(dev->next_event));